diff --git a/.dockerignore b/.dockerignore index c2e9e8e84..9d9f68fbe 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,6 @@ .tox .venv +.venv39 .mypy_cache .pytest_cache .git diff --git a/.github/dependabot.yml b/.github/dependabot.yml index fb915a9e8..3b6ada218 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,10 +9,6 @@ updates: # see https://github.com/dependabot/dependabot-core/pull/10194 versioning-strategy: auto ignore: - - dependency-name: sphinx - versions: - - 3.4.3 - - 3.5.2 # We only use setuptools for a couple of things in the test suite # There is no need to keep it bleeding-edge. There are too frequent # updates to setuptools, requires too much maintenance to keep it up to date. diff --git a/.github/workflows/validate.yaml b/.github/workflows/validate.yaml index b4318a959..f797c3247 100644 --- a/.github/workflows/validate.yaml +++ b/.github/workflows/validate.yaml @@ -25,26 +25,26 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] os: [ubuntu-latest, macos-latest, windows-latest] # This is used for injecting additional tests for a specific python # version and OS. suffix: [""] include: - - python-version: "3.8" + - python-version: "3.9" os: ubuntu-latest extensive-tests: true TOXENV_SUFFIX: "-docs" - - python-version: "3.8" + - python-version: "3.9" os: ubuntu-latest extensive-tests: true PREPARATION: "sudo apt-get install -y libxml2-dev libxslt-dev" suffix: "-min" TOXENV_SUFFIX: "-min" - - python-version: "3.9" + - python-version: "3.10" os: ubuntu-latest TOXENV_SUFFIX: "-docs" - - python-version: "3.10" + - python-version: "3.11" os: ubuntu-latest TOX_EXTRA_COMMAND: "- black --check --diff ./rdflib" TOXENV_SUFFIX: "-lxml" @@ -55,6 +55,10 @@ jobs: extensive-tests: true TOX_TEST_HARNESS: "firejail --net=none --" TOX_PYTEST_EXTRA_ARGS: "-m 'not webtest'" + - python-version: "3.12" + os: ubuntu-latest + TOX_EXTRA_COMMAND: "- black --check --diff ./rdflib" + TOXENV_SUFFIX: "-lxml" steps: - uses: actions/checkout@v4 - name: Cache XDG_CACHE_HOME @@ -121,7 +125,7 @@ jobs: matrix: include: - task: "gha:lint" - python-version: 3.8 + python-version: 3.9 steps: - uses: actions/checkout@v4 - name: Cache XDG_CACHE_HOME diff --git a/.gitignore b/.gitignore index d42dc26fd..83a2c4ada 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ .flakeheaven_cache/ RDFLib.sublime-project -/docs/_build/ RDFLib.sublime-workspace coverage/ cov.xml @@ -8,6 +7,8 @@ cov.xml /.hgignore build/ /docs/draft/ +/docs/apidocs/ +/docs/_build/ *~ test_reports/*latest.ttl # PyCharm @@ -139,6 +140,7 @@ celerybeat.pid # Environments .env .venv +.venv39 env/ venv/ ENV/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a74122cc..1f3965bab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,29 +1,29 @@ --- ci: # https://pre-commit.ci/#configuration - autoupdate_schedule: weekly + autoupdate_schedule: quarterly autofix_prs: false # https://pre-commit.com/#adding-pre-commit-plugins-to-your-project repos: - repo: https://github.com/astral-sh/ruff-pre-commit # WARNING: Ruff version should be the same as in `pyproject.toml` - rev: v0.5.4 + rev: v0.8.6 hooks: - id: ruff args: ["--fix"] - repo: https://github.com/psf/black-pre-commit-mirror # WARNING: Black version should be the same as in `pyproject.toml` - rev: "24.4.2" + rev: "24.10.0" hooks: - id: black pass_filenames: false require_serial: true args: ["."] - repo: https://github.com/python-poetry/poetry - rev: 1.8.3 + rev: 2.0.0 hooks: - id: poetry-check - id: poetry-lock # sadly `--no-update` does not work on pre-commit.ci - args: ["--check"] + diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f5becb937..96dcb371c 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,7 +1,7 @@ --- # https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 -# NOTE: not builing epub because epub does not know how to handle .ico files +# NOTE: not building epub because epub does not know how to handle .ico files # which results in a warning which causes the build to fail due to # `sphinx.fail_on_warning` # https://github.com/sphinx-doc/sphinx/issues/10350 @@ -9,12 +9,11 @@ formats: - htmlzip - pdf +# https://docs.readthedocs.com/platform/stable/intro/mkdocs.html build: - os: ubuntu-20.04 + os: "ubuntu-24.04" tools: - # Using 3.9 as earlier versions have trouble generating documentation for - # `@typing.overload`` with type aliases. - python: "3.9" + python: "3" jobs: post_create_environment: # Using requirements-poetry.in as requirements-poetry.txt has conflicts with @@ -24,7 +23,6 @@ build: - poetry export --only=main --only=docs --without-hashes -o requirements.txt - pip install --no-cache-dir -r requirements.txt - pip install . - - python -c "from rdflib import Graph; print(Graph)" -sphinx: - fail_on_warning: true +mkdocs: + configuration: mkdocs.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index bb6d15999..301e6a3e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## 2025-01-10 RELEASE 7.1.2 + +A minor release that bumped up a few dev dependencies and achieved a few small but notable improvements, particularly with longturtle sorting: + +Feature PRs: + +* [PR #2963](https://github.com/RDFLib/rdflib/pull/2963) Big typing updates +* [PR #2964](https://github.com/RDFLib/rdflib/pull/2964) Defined Namesapce warnings fix +* [PR #2971](https://github.com/RDFLib/rdflib/pull/2971) convert uses of Optional and some Union usage to union operator | +* [PR #2989](https://github.com/RDFLib/rdflib/pull/2989) Fixed incorrect ASK behaviour for dataset with one element +* [PR #2997](https://github.com/RDFLib/rdflib/pull/2997) sort longturtle blank nodes +* [PR #3008](https://github.com/RDFLib/rdflib/pull/3008) deterministic longturtle serialisation using RDF canonicalization + n-triples sort +* [PR #3012](https://github.com/RDFLib/rdflib/pull/3012) Dataset documentation improvements + +Dependency bumps: + +* ruff from 0.71 -> 0.8.6 +* orjson 3.10.10 -> +* pytest-cov to 6.0.0 +* coverage to 7.6.10 +* pytest to 8.3.4 +* poetry to 2.0.0 + ## 2024-10-17 RELEASE 7.1.1 This minor release removes the dependency on some only Python packages, in particular diff --git a/CITATION.cff b/CITATION.cff index c403aa383..88ae8274d 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -4,9 +4,12 @@ authors: - family-names: "Krech" given-names: "Daniel" - family-names: "Grimnes" - given-names: "Gunnar AAstrand" + given-names: "Gunnar Aastrand" - family-names: "Higgins" given-names: "Graham" +- family-names: "Car" + given-names: "Nicholas" + orcid: "https://orcid.org/0000-0002-8742-7730" - family-names: "Hees" given-names: "Jörn" orcid: "https://orcid.org/0000-0002-0084-8998" @@ -69,7 +72,7 @@ authors: - family-names: "Stuart" given-names: "Veyndan" title: "RDFLib" -version: 7.1.1 -date-released: 2024-10-28 +version: 7.1.2 +date-released: 2025-01-10 url: "https://github.com/RDFLib/rdflib" doi: 10.5281/zenodo.6845245 diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 506bda0e6..9e78c618f 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -46,6 +46,7 @@ Daniel Krech Dann Martens Darren Garvey Dave Challis +David Habgood David H Jones David Steinberg Debabrata Deka diff --git a/LICENSE b/LICENSE index 6f2449678..75e852b7b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2002-2024, RDFLib Team +Copyright (c) 2002-2025, RDFLib Team All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/MANIFEST.in b/MANIFEST.in index 1eeed9fe9..276b18a56 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,4 +9,5 @@ recursive-include examples *.py graft test graft docs prune docs/_build +prune site/ global-exclude *.pyc *$py.class diff --git a/README.md b/README.md index a5b9c9ff2..608eed102 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -![](docs/_static/RDFlib.png) +![](docs/_static/RDFlib.png) + +# RDFLib -RDFLib -====== [![Build Status](https://github.com/RDFLib/rdflib/actions/workflows/validate.yaml/badge.svg?branch=main)](https://github.com/RDFLib/rdflib/actions?query=branch%3Amain) [![Documentation Status](https://readthedocs.org/projects/rdflib/badge/?version=latest)](https://rdflib.readthedocs.io/en/latest/?badge=latest) [![Coveralls branch](https://img.shields.io/coveralls/RDFLib/rdflib/main.svg)](https://coveralls.io/r/RDFLib/rdflib?branch=main) @@ -31,7 +31,7 @@ The RDFlib community maintains many RDF-related Python code repositories with di * [sparqlwrapper](https://github.com/RDFLib/sparqlwrapper) - a simple Python wrapper around a SPARQL service to remotely execute your queries * [pyLODE](https://github.com/RDFLib/pyLODE) - An OWL ontology documentation tool using Python and templating, based on LODE. * [pyrdfa3](https://github.com/RDFLib/pyrdfa3) - RDFa 1.1 distiller/parser library: can extract RDFa 1.1/1.0 from (X)HTML, SVG, or XML in general. -* [pymicrodata](https://github.com/RDFLib/pymicrodata) - A module to extract RDF from an HTML5 page annotated with microdata. +* [pymicrodata](https://github.com/RDFLib/pymicrodata) - A module to extract RDF from an HTML5 page annotated with microdata. * [pySHACL](https://github.com/RDFLib/pySHACL) - A pure Python module which allows for the validation of RDF graphs against SHACL graphs. * [OWL-RL](https://github.com/RDFLib/OWL-RL) - A simple implementation of the OWL2 RL Profile which expands the graph with all possible triples that OWL RL defines. @@ -43,13 +43,15 @@ Help with maintenance of all of the RDFLib family of packages is always welcome ## Versions & Releases -* `main` branch in this repository is the unstable release -* `7.1.1` current stable release, bugfixes to 7.1.0 -* `7.0.0` previous stable release, supports Python 3.8.1+ only. - * see [Releases](https://github.com/RDFLib/rdflib/releases) +* `main` branch in this repository is the current unstable release - version 8 alpha +* `7.1.4` tidy-up release, possibly last 7.x release +* `7.1.2` current stable release, small improvements on to 7.1.1 +* `7.1.1` previous stable release + * see * `6.x.y` supports Python 3.7+ only. Many improvements over 5.0.0 - * see [Releases](https://github.com/RDFLib/rdflib/releases) + * see * `5.x.y` supports Python 2.7 and 3.4+ and is [mostly backwards compatible with 4.2.2](https://rdflib.readthedocs.io/en/stable/upgrade4to5.html). + * * see See for the release details. @@ -68,7 +70,7 @@ Some features of RDFLib require optional dependencies which may be installed usi Alternatively manually download the package from the Python Package Index (PyPI) at https://pypi.python.org/pypi/rdflib -The current version of RDFLib is 7.1.1, see the ``CHANGELOG.md`` file for what's new in this release. +The current version of RDFLib is 7.1.2, see the ``CHANGELOG.md`` file for what's new in this release. ### Installation of the current main branch (for developers) @@ -132,18 +134,21 @@ g.add(( Literal("Nick", datatype=XSD.string) )) ``` + The triple (in n-triples notation) ` "Nick"^^ .` is created where the property `FOAF.givenName` is the URI `` and `XSD.string` is the URI ``. You can bind namespaces to prefixes to shorten the URIs for RDF/XML, Turtle, N3, TriG, TriX & JSON-LD serializations: - ```python +```python g.bind("foaf", FOAF) g.bind("xsd", XSD) ``` + This will allow the n-triples triple above to be serialised like this: - ```python + +```python print(g.serialize(format="turtle")) ``` diff --git a/Taskfile.yml b/Taskfile.yml index 2b9582f5f..20643ca34 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -170,19 +170,19 @@ tasks: desc: Clean generated documentation cmds: - task: _rimraf - vars: { RIMRAF_TARGET: "docs/_build/" } + vars: { RIMRAF_TARGET: "site/" } docs: desc: Build documentation cmds: - echo "PYTHONPATH=${PYTHONPATH}" - - "{{.VENV_PYTHON}} -m sphinx.cmd.build -T -W -b html -d docs/_build/doctree docs docs/_build/html {{.CLI_ARGS}}" + - "{{.VENV_PYTHON}} -m mkdocs build {{.CLI_ARGS}}" docs:live-server: desc: Run a live server on generated docs cmds: - 'echo "NOTE: Docs must be built for this to work"' - - npx -p live-server live-server docs/_build/html/ {{.CLI_ARGS}} + - npx -p live-server live-server site/ {{.CLI_ARGS}} default: desc: Run validate @@ -272,7 +272,7 @@ tasks: pip-compile: cmds: - - cmd: "{{.PIP_COMPILE}} --quiet --annotate --emit-options --resolver=backtracking {{.CLI_ARGS}}" + - cmd: "{{.PIP_COMPILE}} --quiet --annotate --emit-options --no-strip-extras --resolver=backtracking {{.CLI_ARGS}}" docker:prepare: cmds: @@ -356,7 +356,7 @@ tasks: cd var/test-sdist/rdflib-* poetry install poetry run mypy --show-error-context --show-error-codes -p rdflib - poetry run sphinx-build -T -W -b html -d docs/_build/doctree docs docs/_build/html + poetry run mkdocs build poetry run pytest test:no_internet: diff --git a/admin/get_merged_prs.py b/admin/get_merged_prs.py index 7e96d1d47..53294bda1 100644 --- a/admin/get_merged_prs.py +++ b/admin/get_merged_prs.py @@ -5,7 +5,7 @@ import urllib.request # https://api.github.com/search/issues?q=repo:rdflib/rdflib+is:pr+merged:%3E=2023-08-02&per_page=300&page=1 -LAST_RELEASE_DATE = "2024-10-17" +LAST_RELEASE_DATE = "2024-10-29" ISSUES_URL = "https://api.github.com/search/issues" ITEMS = [] PAGE = 1 @@ -23,7 +23,11 @@ print(f"Getting {url}") with urllib.request.urlopen(url) as response: response_text = response.read() - link_headers = response.info()["link"].split(",") if response.info()["link"] is not None else None + link_headers = ( + response.info()["link"].split(",") + if response.info()["link"] is not None + else None + ) json_data = json.loads(response_text) ITEMS.extend(json_data["items"]) diff --git a/devtools/constraints.min b/devtools/constraints.min index 2a3f256b5..ccb2ec5b3 100644 --- a/devtools/constraints.min +++ b/devtools/constraints.min @@ -2,10 +2,11 @@ # these versions. The file's extension (`.min`) is chosen to evade Dependabot # which operates on `*.{txt,in}` files. isodate==0.7.2; python_version < "3.11" -pyparsing==2.1.0 +pyparsing==3.2.0 importlib-metadata==4.0.0 berkeleydb==18.1.2 networkx==2.0 -html5rdf==1.2.0 -lxml==4.3.0 +html5rdf==1.2.1 +lxml==4.8.0; python_version < "3.11" +lxml==4.9.3; python_version >= "3.11" orjson==3.9.14 diff --git a/devtools/diffrtpy.py b/devtools/diffrtpy.py index 1d4b09722..ad20c9e1f 100755 --- a/devtools/diffrtpy.py +++ b/devtools/diffrtpy.py @@ -3,18 +3,19 @@ This is a tool that can be used with git difftool to generate a diff that ignores type hints and comments. -The name of this script, ``diffrtpy`` is short for "diff runtime python", as +The name of this script, `diffrtpy` is short for "diff runtime python", as this will only compare the parts of the python code that has a runtime impact. This is to make it easier to review PRs that contain type hints. To use this script -.. code-block:: bash - task run -- python -m pip install --upgrade strip-hints black python-minifier - PYLOGGING_LEVEL=INFO task run -- git difftool -y -x $(readlink -f devtools/diffrtpy.py) upstream/main | tee /var/tmp/compact.diff +```bash +task run -- python -m pip install --upgrade strip-hints black python-minifier +PYLOGGING_LEVEL=INFO task run -- git difftool -y -x $(readlink -f devtools/diffrtpy.py) upstream/main | tee /var/tmp/compact.diff +``` -Then attach ``/var/tmp/compact.diff`` to the PR. +Then attach `/var/tmp/compact.diff` to the PR. """ from __future__ import annotations @@ -26,7 +27,6 @@ from dataclasses import dataclass, field from difflib import unified_diff from pathlib import Path -from typing import List import black import python_minifier @@ -79,7 +79,7 @@ def __post_init__(self) -> None: parser.add_argument("rhs_file", nargs=1, type=str) parser.set_defaults(handler=self.handle) - def run(self, args: List[str]) -> None: + def run(self, args: list[str]) -> None: parse_result = self.parser.parse_args(args) verbosity = parse_result.verbosity diff --git a/devtools/requirements-poetry.in b/devtools/requirements-poetry.in index 1bf7b707a..e88caca3b 100644 --- a/devtools/requirements-poetry.in +++ b/devtools/requirements-poetry.in @@ -1,3 +1,4 @@ # Fixing this here as readthedocs can't use the compiled requirements-poetry.txt # due to conflicts. -poetry==1.8.4 +poetry==2.0.0 +poetry-plugin-export==1.9.0 diff --git a/docker/latest/Dockerfile b/docker/latest/Dockerfile index fbaa97480..d7417b63c 100644 --- a/docker/latest/Dockerfile +++ b/docker/latest/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/library/python:3.12.7-slim@sha256:af4e85f1cac90dd3771e47292ea7c8a9830abfabbe4faa5c53f158854c2e819d +FROM docker.io/library/python:3.13.3-slim@sha256:21e39cf1815802d4c6f89a0d3a166cc67ce58f95b6d1639e68a394c99310d2e5 COPY docker/latest/requirements.txt /var/tmp/build/ diff --git a/docker/latest/requirements.in b/docker/latest/requirements.in index 42fb39ae7..1a26d19a0 100644 --- a/docker/latest/requirements.in +++ b/docker/latest/requirements.in @@ -1,6 +1,4 @@ # This file is used for building a docker image of the latest rdflib release. It # will be updated by dependabot when new releases are made. -rdflib==7.1.0 -html5rdf==1.2.0 -# html5lib-modern is required to allow the Dockerfile to build on with pre-RDFLib-7.1.1 releases. -html5lib-modern==1.2.0 +rdflib==7.1.4 +html5rdf==1.2.1 diff --git a/docker/latest/requirements.txt b/docker/latest/requirements.txt index 570502462..51170050d 100644 --- a/docker/latest/requirements.txt +++ b/docker/latest/requirements.txt @@ -1,16 +1,12 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile docker/latest/requirements.in # -html5rdf==1.2 - # via - # -r docker/latest/requirements.in - # rdflib -html5lib-modern==1.2 - # via -r docker/latest/requirements.in -pyparsing==3.0.9 +html5rdf==1.2.1 + # via -r requirements.in +pyparsing==3.2.0 # via rdflib -rdflib==7.1.0 - # via -r docker/latest/requirements.in +rdflib==7.1.4 + # via -r requirements.in diff --git a/docker/unstable/Dockerfile b/docker/unstable/Dockerfile index 85564a955..7c58c5b56 100644 --- a/docker/unstable/Dockerfile +++ b/docker/unstable/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/library/python:3.12.7-slim@sha256:af4e85f1cac90dd3771e47292ea7c8a9830abfabbe4faa5c53f158854c2e819d +FROM docker.io/library/python:3.13.3-slim@sha256:21e39cf1815802d4c6f89a0d3a166cc67ce58f95b6d1639e68a394c99310d2e5 # This file is generated from docker:unstable in Taskfile.yml COPY var/requirements.txt /var/tmp/build/ diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 03f06e183..259d80c8e 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -46,7 +46,7 @@ Some ways in which you can contribute to RDFLib are: ## Pull Requests Contributions that involve changes to the RDFLib repository have to be made with -pull requests and should follow the [RDFLib developers guide](./developers.rst). +pull requests and should follow the [RDFLib developers guide](./developers.md). For changes that add features or affect the public API of RDFLib, it is recommended to first open an issue to discuss the change before starting to work @@ -55,5 +55,5 @@ spending time on it. ## Code of Conduct -All contributions to the project should be consistent with the [code of -conduct](./CODE_OF_CONDUCT.md) adopted by RDFLib. +All contributions to the project should be consistent with the +[code of conduct](./CODE_OF_CONDUCT.md) adopted by RDFLib. diff --git a/docs/_static/pyramid.css b/docs/_static/pyramid.css deleted file mode 100644 index e238803a4..000000000 --- a/docs/_static/pyramid.css +++ /dev/null @@ -1,323 +0,0 @@ -/* - * pylons.css_t - * ~~~~~~~~~~~~ - * - * Sphinx stylesheet -- pylons theme. - * - * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: "Nobile", sans-serif; - font-size: 100%; - background-color: #393939; - color: #ffffff; - margin: 0; - padding: 0; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -hr { - border: 1px solid #B1B4B6; -} - -div.document { - background-color: #eee; -} - -div.header { - width:100%; - background: #f4ad32 url(headerbg.png) repeat-x 0 top; - border-bottom: 2px solid #ffffff; -} - -div.logo { - text-align: center; - padding-top: 10px; -} - -div.body { - background-color: #ffffff; - color: #3E4349; - padding: 0 30px 30px 30px; - font-size: 1em; - border: 2px solid #ddd; - border-right-style: none; - overflow: auto; -} - -div.footer { - color: #ffffff; - width: 100%; - padding: 13px 0; - text-align: center; - font-size: 75%; - background: transparent; - clear:both; -} - -div.footer a { - color: #ffffff; - text-decoration: none; -} - -div.footer a:hover { - color: #e88f00; - text-decoration: underline; -} - -div.related { - line-height: 30px; - color: #373839; - font-size: 0.8em; - background-color: #eee; -} - -div.related a { - color: #1b61d6; -} - -div.related ul { - padding-left: 240px; -} - -div.sphinxsidebar { - font-size: 0.75em; - line-height: 1.5em; -} - -div.sphinxsidebarwrapper{ - padding: 10px 0; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: "Neuton", sans-serif; - color: #373839; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 5px 10px; - border-bottom: 2px solid #ddd; -} - -div.sphinxsidebar h4{ - font-size: 1.3em; -} - -div.sphinxsidebar h3 a { - color: #000000; -} - - -div.sphinxsidebar p { - color: #888; - padding: 5px 20px; -} - -div.sphinxsidebar p.topless { -} - -div.sphinxsidebar ul { - margin: 10px 20px; - padding: 0; - color: #373839; -} - -div.sphinxsidebar a { - color: #444; -} - -div.sphinxsidebar input { - border: 1px solid #ccc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar input[type=text]{ - margin-left: 20px; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 2px solid #c6d880; - background-color: #e6efc2; - width: 40%; - float: right; - border-right-style: none; - border-left-style: none; - padding: 10px 20px; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- body styles ----------------------------------------------------------- */ - -a, a .pre { - color: #1b61d6; - text-decoration: none; -} - -a:hover, a:hover .pre { - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: "Neuton", sans-serif; - background-color: #ffffff; - font-weight: normal; - color: #373839; - margin: 30px 0px 10px 0px; - padding: 5px 0; -} - -div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 150%; background-color: #ffffff; } -div.body h3 { font-size: 120%; background-color: #ffffff; } -div.body h4 { font-size: 110%; background-color: #ffffff; } -div.body h5 { font-size: 100%; background-color: #ffffff; } -div.body h6 { font-size: 100%; background-color: #ffffff; } - -a.headerlink { - color: #1b61d6; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - text-decoration: underline; -} - -div.body p, div.body dd, div.body li { - line-height: 1.5em; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.highlight{ - background-color: white; -} - -div.note { - border: 2px solid #7a9eec; - border-right-style: none; - border-left-style: none; - padding: 10px 20px 10px 60px; - background: #e1ecfe url(dialog-note.png) no-repeat 10px 8px; -} - -div.seealso { - background: #fff6bf url(dialog-seealso.png) no-repeat 10px 8px; - border: 2px solid #ffd324; - border-left-style: none; - border-right-style: none; - padding: 10px 20px 10px 60px; -} - -div.topic { - background: #eeeeee; - border: 2px solid #C6C9CB; - padding: 10px 20px; - border-right-style: none; - border-left-style: none; -} - -div.warning { - background: #fbe3e4 url(dialog-warning.png) no-repeat 10px 8px; - border: 2px solid #fbc2c4; - border-right-style: none; - border-left-style: none; - padding: 10px 20px 10px 60px; -} - -p.admonition-title { - display: none; -} - -p.admonition-title:after { - content: ":"; -} - -pre { - padding: 10px; - background-color: #fafafa; - color: #222; - line-height: 1.2em; - border: 2px solid #C6C9CB; - font-size: 1.1em; - margin: 1.5em 0 1.5em 0; - border-right-style: none; - border-left-style: none; -} - -tt { - background-color: transparent; - color: #222; - font-size: 1.1em; - font-family: monospace; -} - -.viewcode-back { - font-family: "Nobile", sans-serif; -} - -div.viewcode-block:target { - background-color: #fff6bf; - border: 2px solid #ffd324; - border-left-style: none; - border-right-style: none; - padding: 10px 20px; -} - -table.highlighttable { - width: 100%; -} - -table.highlighttable td { - padding: 0; -} - -a em.std-term { - color: #007f00; -} - -a:hover em.std-term { - text-decoration: underline; -} - -.download { - font-family: "Nobile", sans-serif; - font-weight: normal; - font-style: normal; -} - -tt.xref { - font-weight: normal; - font-style: normal; -} \ No newline at end of file diff --git a/docs/_themes/armstrong/LICENSE b/docs/_themes/armstrong/LICENSE deleted file mode 100644 index 894aa018a..000000000 --- a/docs/_themes/armstrong/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2011 Bay Citizen & Texas Tribune - -Original ReadTheDocs.org code -Copyright (c) 2010 Charles Leifer, Eric Holscher, Bobby Grace - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - diff --git a/docs/_themes/armstrong/README b/docs/_themes/armstrong/README deleted file mode 100644 index 56ce661cd..000000000 --- a/docs/_themes/armstrong/README +++ /dev/null @@ -1,3 +0,0 @@ -This is the Armstrong Sphinx theme from https://github.com/armstrong/armstrong_sphinx - -Used under BSD license. diff --git a/docs/_themes/armstrong/layout.html b/docs/_themes/armstrong/layout.html deleted file mode 100644 index d7b8fbb14..000000000 --- a/docs/_themes/armstrong/layout.html +++ /dev/null @@ -1,48 +0,0 @@ -{% extends "basic/layout.html" %} - -{% set script_files = script_files + [pathto("_static/searchtools.js", 1)] %} - -{% block htmltitle %} -{{ super() }} - - - -{% endblock %} - -{% block footer %} - - - -{% if theme_analytics_code %} - - -{% endif %} - -{% endblock %} diff --git a/docs/_themes/armstrong/static/rtd.css_t b/docs/_themes/armstrong/static/rtd.css_t deleted file mode 100644 index 489911a2f..000000000 --- a/docs/_themes/armstrong/static/rtd.css_t +++ /dev/null @@ -1,784 +0,0 @@ -/* - * rtd.css - * ~~~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- sphinxdoc theme. Originally created by - * Armin Ronacher for Werkzeug. - * - * Customized for ReadTheDocs by Eric Pierce & Eric Holscher - * - * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* RTD colors - * light blue: {{ theme_light_color }} - * medium blue: {{ theme_medium_color }} - * dark blue: {{ theme_dark_color }} - * dark grey: {{ theme_grey_color }} - * - * medium blue hover: {{ theme_medium_color_hover }}; - * green highlight: {{ theme_green_highlight }} - * light blue (project bar): {{ theme_light_color }} - */ - -@import url("basic.css"); - -/* PAGE LAYOUT -------------------------------------------------------------- */ - -body { - font: 100%/1.5 "ff-meta-web-pro-1","ff-meta-web-pro-2",Arial,"Helvetica Neue",sans-serif; - text-align: center; - color: black; - background-color: {{ theme_background }}; - padding: 0; - margin: 0; -} - -div.document { - text-align: left; - background-color: {{ theme_light_color }}; -} - -div.bodywrapper { - background-color: {{ theme_white }}; - border-left: 1px solid {{ theme_lighter_gray }}; - border-bottom: 1px solid {{ theme_lighter_gray }}; - margin: 0 0 0 16em; -} - -div.body { - margin: 0; - padding: 0.5em 1.3em; - max-width: 55em; - min-width: 20em; -} - -div.related { - font-size: 1em; - background-color: {{ theme_background }}; -} - -div.documentwrapper { - float: left; - width: 100%; - background-color: {{ theme_light_color }}; -} - -p.logo { - padding-top: 30px; -} - -/* HEADINGS --------------------------------------------------------------- */ - -h1 { - margin: 0; - padding: 0.7em 0 0.3em 0; - font-size: 1.5em; - line-height: 1.15; - color: {{ theme_h1 }}; - clear: both; -} - -h2 { - margin: 2em 0 0.2em 0; - font-size: 1.35em; - padding: 0; - color: {{ theme_h2 }}; -} - -h3 { - margin: 1em 0 -0.3em 0; - font-size: 1.2em; - color: {{ theme_h3 }}; -} - -div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { - color: black; -} - -h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { - display: none; - margin: 0 0 0 0.3em; - padding: 0 0.2em 0 0.2em; - color: {{ theme_gray_a }} !important; -} - -h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, -h5:hover a.anchor, h6:hover a.anchor { - display: inline; -} - -h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, -h5 a.anchor:hover, h6 a.anchor:hover { - color: {{ theme_gray_7 }}; - background-color: {{ theme_dirty_white }}; -} - - -/* LINKS ------------------------------------------------------------------ */ - -/* Normal links get a pseudo-underline */ -a { - color: {{ theme_link_color }}; - text-decoration: none; - border-bottom: 1px solid {{ theme_link_color_decoration }}; -} - -/* Links in sidebar, TOC, index trees and tables have no underline */ -.sphinxsidebar a, -.toctree-wrapper a, -.indextable a, -#indices-and-tables a { - color: {{ theme_dark_gray }}; - text-decoration: none; - border-bottom: none; -} - -/* Most links get an underline-effect when hovered */ -a:hover, -div.toctree-wrapper a:hover, -.indextable a:hover, -#indices-and-tables a:hover { - color: {{ theme_black }}; - text-decoration: none; - border-bottom: 1px solid {{ theme_black }}; -} - -/* Footer links */ -div.footer a { - color: {{ theme_background_text_link }}; - text-decoration: none; - border: none; -} -div.footer a:hover { - color: {{ theme_medium_color_link_hover }}; - text-decoration: underline; - border: none; -} - -/* Permalink anchor (subtle grey with a red hover) */ -div.body a.headerlink { - color: {{ theme_lighter_gray }}; - font-size: 1em; - margin-left: 6px; - padding: 0 4px 0 4px; - text-decoration: none; - border: none; -} -div.body a.headerlink:hover { - color: {{ theme_negative_text }}; - border: none; -} - - -/* NAVIGATION BAR --------------------------------------------------------- */ - -div.related ul { - height: 2.5em; -} - -div.related ul li { - margin: 0; - padding: 0.65em 0; - float: left; - display: block; - color: {{ theme_background_link_half }}; /* For the >> separators */ - font-size: 0.8em; -} - -div.related ul li.right { - float: right; - margin-right: 5px; - color: transparent; /* Hide the | separators */ -} - -/* "Breadcrumb" links in nav bar */ -div.related ul li a { - order: none; - background-color: inherit; - font-weight: bold; - margin: 6px 0 6px 4px; - line-height: 1.75em; - color: {{ theme_background_link }}; - text-shadow: 0 1px rgba(0, 0, 0, 0.5); - padding: 0.4em 0.8em; - border: none; - border-radius: 3px; -} -/* previous / next / modules / index links look more like buttons */ -div.related ul li.right a { - margin: 0.375em 0; - background-color: {{ theme_medium_color_hover }}; - text-shadow: 0 1px rgba(0, 0, 0, 0.5); - border-radius: 3px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; -} -/* All navbar links light up as buttons when hovered */ -div.related ul li a:hover { - background-color: {{ theme_medium_color }}; - color: {{ theme_white }}; - text-decoration: none; - border-radius: 3px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; -} -/* Take extra precautions for tt within links */ -a tt, -div.related ul li a tt { - background: inherit !important; - color: inherit !important; -} - - -/* SIDEBAR ---------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 0; -} - -div.sphinxsidebar { - margin: 0; - margin-left: -100%; - float: left; - top: 3em; - left: 0; - padding: 0 1em; - width: 14em; - font-size: 1em; - text-align: left; - background-color: {{ theme_light_color }}; -} - -div.sphinxsidebar img { - max-width: 12em; -} - -div.sphinxsidebar h3, div.sphinxsidebar h4 { - margin: 1.2em 0 0.3em 0; - font-size: 1em; - padding: 0; - color: {{ theme_gray_2 }}; - font-family: "ff-meta-web-pro-1", "ff-meta-web-pro-2", "Arial", "Helvetica Neue", sans-serif; -} - -div.sphinxsidebar h3 a { - color: {{ theme_grey_color }}; -} - -div.sphinxsidebar ul, -div.sphinxsidebar p { - margin-top: 0; - padding-left: 0; - line-height: 130%; - background-color: {{ theme_light_color }}; -} - -/* No bullets for nested lists, but a little extra indentation */ -div.sphinxsidebar ul ul { - list-style-type: none; - margin-left: 1.5em; - padding: 0; -} - -/* A little top/bottom padding to prevent adjacent links' borders - * from overlapping each other */ -div.sphinxsidebar ul li { - padding: 1px 0; -} - -/* A little left-padding to make these align with the ULs */ -div.sphinxsidebar p.topless { - padding-left: 0 0 0 1em; -} - -/* Make these into hidden one-liners */ -div.sphinxsidebar ul li, -div.sphinxsidebar p.topless { - white-space: nowrap; - overflow: hidden; -} -/* ...which become visible when hovered */ -div.sphinxsidebar ul li:hover, -div.sphinxsidebar p.topless:hover { - overflow: visible; -} - -/* Search text box and "Go" button */ -#searchbox { - margin-top: 2em; - margin-bottom: 1em; - background: {{ theme_dirtier_white }}; - padding: 0.5em; - border-radius: 6px; - -moz-border-radius: 6px; - -webkit-border-radius: 6px; -} -#searchbox h3 { - margin-top: 0; -} - -/* Make search box and button abut and have a border */ -input, -div.sphinxsidebar input { - border: 1px solid {{ theme_gray_9 }}; - float: left; -} - -/* Search textbox */ -input[type="text"] { - margin: 0; - padding: 0 3px; - height: 20px; - width: 144px; - border-top-left-radius: 3px; - border-bottom-left-radius: 3px; - -moz-border-radius-topleft: 3px; - -moz-border-radius-bottomleft: 3px; - -webkit-border-top-left-radius: 3px; - -webkit-border-bottom-left-radius: 3px; -} -/* Search button */ -input[type="submit"] { - margin: 0 0 0 -1px; /* -1px prevents a double-border with textbox */ - height: 22px; - color: {{ theme_dark_gray }}; - background-color: {{ theme_light_color }}; - padding: 1px 4px; - font-weight: bold; - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; - -moz-border-radius-topright: 3px; - -moz-border-radius-bottomright: 3px; - -webkit-border-top-right-radius: 3px; - -webkit-border-bottom-right-radius: 3px; -} -input[type="submit"]:hover { - color: {{ theme_white }}; - background-color: {{ theme_green_highlight }}; -} - -div.sphinxsidebar p.searchtip { - clear: both; - padding: 0.5em 0 0 0; - background: {{ theme_dirtier_white }}; - color: {{ theme_gray }}; - font-size: 0.9em; -} - -/* Sidebar links are unusual */ -div.sphinxsidebar li a, -div.sphinxsidebar p a { - background: {{ theme_light_color }}; /* In case links overlap main content */ - border-radius: 3px; - -moz-border-radius: 3px; - -webkit-border-radius: 3px; - border: 1px solid transparent; /* To prevent things jumping around on hover */ - padding: 0 5px 0 5px; -} -div.sphinxsidebar li a:hover, -div.sphinxsidebar p a:hover { - color: {{ theme_black }}; - text-decoration: none; - border: 1px solid {{ theme_light_gray }}; -} - -/* Tweak any link appearing in a heading */ -div.sphinxsidebar h3 a { -} - - - - -/* OTHER STUFF ------------------------------------------------------------ */ - -cite, code, tt { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.01em; -} - -tt { - background-color: {{ theme_code_background }}; - color: {{ theme_dark_gray }}; -} - -tt.descname, tt.descclassname, tt.xref { - border: 0; -} - -hr { - border: 1px solid {{ theme_ruler }}; - margin: 2em; -} - -pre, #_fontwidthtest { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - margin: 1em 2em; - font-size: 0.95em; - letter-spacing: 0.015em; - line-height: 120%; - padding: 0.5em; - border: 1px solid {{ theme_lighter_gray }}; - background-color: {{ theme_code_background }}; - border-radius: 6px; - -moz-border-radius: 6px; - -webkit-border-radius: 6px; -} - -pre a { - color: inherit; - text-decoration: underline; -} - -td.linenos pre { - padding: 0.5em 0; -} - -div.quotebar { - background-color: {{ theme_almost_white }}; - max-width: 250px; - float: right; - padding: 2px 7px; - border: 1px solid {{ theme_lighter_gray }}; -} - -div.topic { - background-color: {{ theme_almost_white }}; -} - -table { - border-collapse: collapse; - margin: 0 -0.5em 0 0; -} - -table td, table th { - padding: 0.2em 0.5em 0.2em 0.5em; -} - - -/* ADMONITIONS AND WARNINGS ------------------------------------------------- */ - -/* Shared by admonitions, warnings and sidebars */ -div.admonition, -div.warning, -div.sidebar { - font-size: 0.9em; - margin: 2em; - padding: 0; - /* - border-radius: 6px; - -moz-border-radius: 6px; - -webkit-border-radius: 6px; - */ -} -div.admonition p, -div.warning p, -div.sidebar p { - margin: 0.5em 1em 0.5em 1em; - padding: 0; -} -div.admonition pre, -div.warning pre, -div.sidebar pre { - margin: 0.4em 1em 0.4em 1em; -} -div.admonition p.admonition-title, -div.warning p.admonition-title, -div.sidebar p.sidebar-title { - margin: 0; - padding: 0.1em 0 0.1em 0.5em; - color: white; - font-weight: bold; - font-size: 1.1em; - text-shadow: 0 1px rgba(0, 0, 0, 0.5); -} -div.admonition ul, div.admonition ol, -div.warning ul, div.warning ol, -div.sidebar ul, div.sidebar ol { - margin: 0.1em 0.5em 0.5em 3em; - padding: 0; -} - - -/* Admonitions and sidebars only */ -div.admonition, div.sidebar { - border: 1px solid {{ theme_positive_dark }}; - background-color: {{ theme_positive_light }}; -} -div.admonition p.admonition-title, -div.sidebar p.sidebar-title { - background-color: {{ theme_positive_medium }}; - border-bottom: 1px solid {{ theme_positive_dark }}; -} - - -/* Warnings only */ -div.warning { - border: 1px solid {{ theme_negative_dark }}; - background-color: {{ theme_negative_light }}; -} -div.warning p.admonition-title { - background-color: {{ theme_negative_medium }}; - border-bottom: 1px solid {{ theme_negative_dark }}; -} - - -/* Sidebars only */ -div.sidebar { - max-width: 200px; -} - - - -div.versioninfo { - margin: 1em 0 0 0; - border: 1px solid {{ theme_lighter_gray }}; - background-color: {{ theme_light_medium_color }}; - padding: 8px; - line-height: 1.3em; - font-size: 0.9em; -} - -.viewcode-back { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; -} - -div.viewcode-block:target { - background-color: {{ theme_viewcode_bg }}; - border-top: 1px solid {{ theme_viewcode_border }}; - border-bottom: 1px solid {{ theme_viewcode_border }}; -} - -dl { - margin: 1em 0 2.5em 0; -} - -/* Highlight target when you click an internal link */ -dt:target { - background: {{ theme_highlight }}; -} -/* Don't highlight whole divs */ -div.highlight { - background: transparent; -} -/* But do highlight spans (so search results can be highlighted) */ -span.highlight { - background: {{ theme_highlight }}; -} - -div.footer { - background-color: {{ theme_background }}; - color: {{ theme_background_text }}; - padding: 0 2em 2em 2em; - clear: both; - font-size: 0.8em; - text-align: center; -} - -p { - margin: 0.8em 0 0.5em 0; -} - -.section p img { - margin: 1em 2em; -} - - -/* MOBILE LAYOUT -------------------------------------------------------------- */ - -@media screen and (max-width: 600px) { - - h1, h2, h3, h4, h5 { - position: relative; - } - - ul { - padding-left: 1.75em; - } - - div.bodywrapper a.headerlink, #indices-and-tables h1 a { - color: {{ theme_almost_dirty_white }}; - font-size: 80%; - float: right; - line-height: 1.8; - position: absolute; - right: -0.7em; - visibility: inherit; - } - - div.bodywrapper h1 a.headerlink, #indices-and-tables h1 a { - line-height: 1.5; - } - - pre { - font-size: 0.7em; - overflow: auto; - word-wrap: break-word; - white-space: pre-wrap; - } - - div.related ul { - height: 2.5em; - padding: 0; - text-align: left; - } - - div.related ul li { - clear: both; - color: {{ theme_dark_color }}; - padding: 0.2em 0; - } - - div.related ul li:last-child { - border-bottom: 1px dotted {{ theme_medium_color }}; - padding-bottom: 0.4em; - margin-bottom: 1em; - width: 100%; - } - - div.related ul li a { - color: {{ theme_dark_color }}; - padding-right: 0; - } - - div.related ul li a:hover { - background: inherit; - color: inherit; - } - - div.related ul li.right { - clear: none; - padding: 0.65em 0; - margin-bottom: 0.5em; - } - - div.related ul li.right a { - color: {{ theme_white }}; - padding-right: 0.8em; - } - - div.related ul li.right a:hover { - background-color: {{ theme_medium_color }}; - } - - div.body { - clear: both; - min-width: 0; - word-wrap: break-word; - } - - div.bodywrapper { - margin: 0 0 0 0; - } - - div.sphinxsidebar { - float: none; - margin: 0; - width: auto; - } - - div.sphinxsidebar input[type="text"] { - height: 2em; - line-height: 2em; - width: 70%; - } - - div.sphinxsidebar input[type="submit"] { - height: 2em; - margin-left: 0.5em; - width: 20%; - } - - div.sphinxsidebar p.searchtip { - background: inherit; - margin-bottom: 1em; - } - - div.sphinxsidebar ul li, div.sphinxsidebar p.topless { - white-space: normal; - } - - .bodywrapper img { - display: block; - margin-left: auto; - margin-right: auto; - max-width: 100%; - } - - div.documentwrapper { - float: none; - } - - div.admonition, div.warning, pre, blockquote { - margin-left: 0em; - margin-right: 0em; - } - - .body p img { - margin: 0; - } - - #searchbox { - background: transparent; - } - - .related:not(:first-child) li { - display: none; - } - - .related:not(:first-child) li.right { - display: block; - } - - div.footer { - padding: 1em; - } - - .rtd_doc_footer .badge { - float: none; - margin: 1em auto; - position: static; - } - - .rtd_doc_footer .badge.revsys-inline { - margin-right: auto; - margin-bottom: 2em; - } - - table.indextable { - display: block; - width: auto; - } - - .indextable tr { - display: block; - } - - .indextable td { - display: block; - padding: 0; - width: auto !important; - } - - .indextable td dt { - margin: 1em 0; - } - - ul.search { - margin-left: 0.25em; - } - - ul.search li div.context { - font-size: 90%; - line-height: 1.1; - margin-bottom: 1; - margin-left: 0; - } - -} diff --git a/docs/_themes/armstrong/theme-old.conf b/docs/_themes/armstrong/theme-old.conf deleted file mode 100644 index c77da3a19..000000000 --- a/docs/_themes/armstrong/theme-old.conf +++ /dev/null @@ -1,65 +0,0 @@ -[theme] -inherit = default -stylesheet = rtd.css -pygment_style = default -show_sphinx = False - -[options] -show_rtd = True - -white = #ffffff -almost_white = #f8f8f8 -barely_white = #f2f2f2 -dirty_white = #eeeeee -almost_dirty_white = #e6e6e6 -dirtier_white = #DAC6AF -lighter_gray = #cccccc -gray_a = #aaaaaa -gray_9 = #999999 -light_gray = #888888 -gray_7 = #777777 -gray = #666666 -dark_gray = #444444 -gray_2 = #222222 -black = #111111 -light_color = #EDE4D8 -light_medium_color = #DDEAF0 -medium_color_link = #634320 -medium_color_link_hover = #261a0c -dark_color = rgba(160, 109, 52, 1.0) - -h1 = #1f3744 -h2 = #335C72 -h3 = #638fa6 - -link_color = #335C72 -link_color_decoration = #99AEB9 - -medium_color_hover = rgba(255, 255, 255, 0.25) -medium_color = rgba(255, 255, 255, 0.5) -green_highlight = #8ecc4c - - -positive_dark = rgba(51, 77, 0, 1.0) -positive_medium = rgba(102, 153, 0, 1.0) -positive_light = rgba(102, 153, 0, 0.1) - -negative_dark = rgba(51, 13, 0, 1.0) -negative_medium = rgba(204, 51, 0, 1.0) -negative_light = rgba(204, 51, 0, 0.1) -negative_text = #c60f0f - -ruler = #abc - -viewcode_bg = #f4debf -viewcode_border = #ac9 - -highlight = #ffe080 - -code_background = rgba(0, 0, 0, 0.075) - -background = rgba(135, 57, 34, 1.0) -background_link = rgba(212, 195, 172, 1.0) -background_link_half = rgba(212, 195, 172, 0.5) -background_text = rgba(212, 195, 172, 1.0) -background_text_link = rgba(171, 138, 93, 1.0) diff --git a/docs/_themes/armstrong/theme.conf b/docs/_themes/armstrong/theme.conf deleted file mode 100644 index 5930488d7..000000000 --- a/docs/_themes/armstrong/theme.conf +++ /dev/null @@ -1,65 +0,0 @@ -[theme] -inherit = default -stylesheet = rtd.css -pygment_style = default -show_sphinx = False - -[options] -show_rtd = True - -white = #ffffff -almost_white = #f8f8f8 -barely_white = #f2f2f2 -dirty_white = #eeeeee -almost_dirty_white = #e6e6e6 -dirtier_white = #dddddd -lighter_gray = #cccccc -gray_a = #aaaaaa -gray_9 = #999999 -light_gray = #888888 -gray_7 = #777777 -gray = #666666 -dark_gray = #444444 -gray_2 = #222222 -black = #111111 -light_color = #e8ecef -light_medium_color = #DDEAF0 -medium_color = #8ca1af -medium_color_link = #86989b -medium_color_link_hover = #a6b8bb -dark_color = #465158 - -h1 = #000000 -h2 = #465158 -h3 = #6c818f - -link_color = #444444 -link_color_decoration = #CCCCCC - -medium_color_hover = #697983 -green_highlight = #8ecc4c - - -positive_dark = #609060 -positive_medium = #70a070 -positive_light = #e9ffe9 - -negative_dark = #900000 -negative_medium = #b04040 -negative_light = #ffe9e9 -negative_text = #c60f0f - -ruler = #abc - -viewcode_bg = #f4debf -viewcode_border = #ac9 - -highlight = #ffe080 - -code_background = #eeeeee - -background = #465158 -background_link = #ffffff -background_link_half = #ffffff -background_text = #eeeeee -background_text_link = #86989b diff --git a/docs/apidocs/.gitignore b/docs/apidocs/.gitignore deleted file mode 100644 index 89867378b..000000000 --- a/docs/apidocs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -modules.rst -rdflib*.rst diff --git a/docs/apidocs/examples.rst b/docs/apidocs/examples.rst deleted file mode 100644 index 43b92c137..000000000 --- a/docs/apidocs/examples.rst +++ /dev/null @@ -1,133 +0,0 @@ -examples Package -================ - -These examples all live in ``./examples`` in the source-distribution of RDFLib. - -:mod:`~examples.conjunctive_graphs` Module ------------------------------------------- - -.. automodule:: examples.conjunctive_graphs - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.custom_datatype` Module ---------------------------------------- - -.. automodule:: examples.custom_datatype - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.custom_eval` Module ------------------------------------ - -.. automodule:: examples.custom_eval - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.foafpaths` Module ---------------------------------- - -.. automodule:: examples.foafpaths - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.prepared_query` Module --------------------------------------- - -.. automodule:: examples.prepared_query - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.resource_example` Module ----------------------------------------- - -.. automodule:: examples.resource_example - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.berkeleydb_example` Module ------------------------------------------- - -.. automodule:: examples.berkeleydb_example - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.slice` Module ------------------------------ - -.. automodule:: examples.slice - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.smushing` Module --------------------------------- - -.. automodule:: examples.smushing - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.sparql_query_example` Module --------------------------------------------- - -.. automodule:: examples.sparql_query_example - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.sparql_update_example` Module ---------------------------------------------- - -.. automodule:: examples.sparql_update_example - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.sparqlstore_example` Module -------------------------------------------- - -.. automodule:: examples.sparqlstore_example - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.swap_primer` Module ------------------------------------ - -.. automodule:: examples.swap_primer - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.transitive` Module ----------------------------------- - -.. automodule:: examples.transitive - :members: - :undoc-members: - :show-inheritance: - -:mod:`~examples.secure_with_audit` Module ------------------------------------------ - -.. automodule:: examples.secure_with_audit - :members: - :undoc-members: - :show-inheritance: - - -:mod:`~examples.secure_with_urlopen` Module -------------------------------------------- - -.. automodule:: examples.secure_with_urlopen - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/changelog.md b/docs/changelog.md index 63ae71beb..e40ac58a2 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,4 +1,3 @@ # Changelog -```{include} ../CHANGELOG.md -``` +{% include "../CHANGELOG.md" %} diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 44b21a91b..000000000 --- a/docs/conf.py +++ /dev/null @@ -1,333 +0,0 @@ -# rdflib documentation build configuration file, created by -# sphinx-quickstart on Fri May 15 15:03:54 2009. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. -# https://www.sphinx-doc.org/en/master/usage/configuration.html -from __future__ import annotations - -import logging -import os -import re -import sys -from typing import Any, Dict - -import sphinx -import sphinx.application - -import rdflib - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.append(os.path.abspath("..")) -sys.path.append(os.path.abspath("..")) - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -# extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.doctest'] -extensions = [ - "sphinxcontrib.apidoc", - "sphinx.ext.autodoc", - #'sphinx.ext.autosummary', - "sphinx_autodoc_typehints", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.coverage", - "sphinx.ext.ifconfig", - "sphinx.ext.viewcode", - "myst_parser", - "sphinx.ext.autosectionlabel", -] - -# https://github.com/sphinx-contrib/apidoc/blob/master/README.rst#configuration -apidoc_module_dir = "../rdflib" -apidoc_output_dir = "apidocs" - -# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html -autodoc_default_options = {"special-members": True} -autodoc_inherit_docstrings = True - -# https://github.com/tox-dev/sphinx-autodoc-typehints -always_document_param_types = True - -autosummary_generate = True - -autosectionlabel_prefix_document = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# epydoc_mapping = { -# '/_static/api/': [r'rdflib\.'], -# } - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -source_encoding = "utf-8" - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "rdflib" -copyright = "2009 - 2024, RDFLib Team" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. - - -# Find version. We have to do this because we can't import it in Python 3 until -# its been automatically converted in the setup process. -# UPDATE: This function is no longer used; once builds are confirmed to succeed, it -# can/should be removed. --JCL 2022-12-30 -def find_version(filename): - _version_re = re.compile(r'__version__ = "(.*)"') - for line in open(filename): - version_match = _version_re.match(line) - if version_match: - return version_match.group(1) - - -# The full version, including alpha/beta/rc tags. -release = rdflib.__version__ -# The short X.Y version. -version = re.sub("[0-9]+\\.[0-9]\\..*", "\1", release) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -# unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ["_build", "draft"] - -# The reST default role (used for this markup: `text`) to use for all documents. -default_role = "py:obj" - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = "armstrong" - - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [ - "_themes", -] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None -html_logo = "_static/RDFlib.png" - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -html_favicon = "_static/RDFlib.ico" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = "rdflibdoc" - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -# latex_documents = [ -# ("index", "rdflib.tex", "rdflib Documentation", "RDFLib Team", "manual"), -# ] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("https://docs.python.org/3.8", None), -} - -html_experimental_html5_writer = True - -needs_sphinx = "4.1.2" - -suppress_warnings = [ - # This is here to prevent: - # "WARNING: more than one target found for cross-reference" - "ref.python", - "autosectionlabel.*", -] - -sphinx_version = tuple(int(part) for part in sphinx.__version__.split(".")) - - -nitpicky = True - -nitpick_ignore = [ - ("py:class", "urllib.response.addinfourl"), - ("py:class", "importlib.metadata.EntryPoint"), - ("py:class", "xml.dom.minidom.Document"), - ("py:class", "xml.dom.minidom.DocumentFragment"), - ("py:class", "isodate.duration.Duration"), - ("py:class", "pyparsing.core.TokenConverter"), - ("py:class", "pyparsing.results.ParseResults"), - ("py:class", "pyparsing.core.ParserElement"), -] - - -def autodoc_skip_member_handler( - app: sphinx.application.Sphinx, - what: str, - name: str, - obj: Any, - skip: bool, - options: Dict[str, Any], -): - """ - This function will be called by Sphinx when it is deciding whether to skip a - member of a class or module. - """ - # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#event-autodoc-skip-member - if ( - app.env.docname == "apidocs/rdflib" - and what == "module" - and type(obj).__name__.endswith("DefinedNamespaceMeta") - ): - # Don't document namespaces in the `rdflib` module, they will be - # documented in the `rdflib.namespace` module instead and Sphinx does - # not like when these are documented in two places. - # - # An example of the WARNINGS that occur without this is: - # - # "WARNING: duplicate object description of rdflib.namespace._SDO.SDO, - # other instance in apidocs/rdflib, use :noindex: for one of them" - logging.info( - "Skipping %s %s in %s, it will be documented in ", - what, - name, - app.env.docname, - ) - return True - return None - - -# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#skipping-members -def setup(app: sphinx.application.Sphinx) -> None: - """ - Setup the Sphinx application. - """ - - # Register a autodoc-skip-member handler so that certain members can be - # skipped. - app.connect("autodoc-skip-member", autodoc_skip_member_handler) diff --git a/docs/decisions.md b/docs/decisions.md new file mode 100644 index 000000000..9f68c4260 --- /dev/null +++ b/docs/decisions.md @@ -0,0 +1,35 @@ +# Decision Records + +To ensure that significant changes to RDFLib are made with sufficient consultation, consideration and planning they should be preceded by a decision record that captures the particulars of the decision that lead to the change. + +Decision records present the users and maintainers of RDFLib with an opportunity to review decisions before effort is expended to implement the decision in code, and it also makes it possible to review decisions without having to reconstruct them from the code changes that implement them. + +Whether a change is significant is hard to measure objectively, but some characteristics that may indicate that a change is significant include: + +* It will require changes to code that use RDFLib. +* It cannot be reversed without requiring changes to code that use RDFLib. +* It is onerous to reverse later. +* It increases the maintenance burden of RDFLib. +* It is very large. + +Some of these characteristics are not binary but measured in degrees, so some discretion is required when determining if an architectural decision record is appropriate. + +Decision records may also be used for changes that do not have any of the listed characteristics if a decision record would be otherwise helpful, for example to capture a decision to change the maintenance process of RDFLib. + +Changes not preceded by decision records won't be rejected solely on this basis even if they are deemed significant, and decision records may also be created retrospectively for changes. + +Decision records as described here are similar to the concept of [Architectural Decision Records](https://adr.github.io/), though it is slightly broader as it could include decisions which are not classified as architectural. + +## Creating a decision record + +Decision records should be added to the RDFLib repository in the `./docs/decisions/` directory with a name `{YYYYmmdd}-{title}.md`. + +The content of the decision record should succinctly describe the context of the decision, the decision itself, and the status of the decision. + +Decision records should preferably follow [Michael Nygard decision record template](https://github.com/joelparkerhenderson/architecture-decision-record/blob/main/templates/decision-record-template-by-michael-nygard/index.md) that he described in a [2011 article](https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions.html) on documenting architecture decisions. + +For questions about decision records please reach out to the RDFLib maintainers and community using the options given in [further_help_and_contact]. + +## Decisions list + +- [Default branch](decisions/20220826-default_branch.md) diff --git a/docs/decisions/20220826-default_branch.md b/docs/decisions/20220826-default_branch.md new file mode 100644 index 000000000..22443cfca --- /dev/null +++ b/docs/decisions/20220826-default_branch.md @@ -0,0 +1,30 @@ +# Default Branch Name + +!!! success "Status" + Accepted + +## Context + +In recent years usage of the word `master` has become somewhat controversial [as noted by SFC][SFC-BNAMING] and consequently default branch name of Git repos has become `main`, both in Git itself [according to SFC][SFC-BNAMING] and in Git hosting solutions such as GitHub [documentation][GH-BRANCHES]. + +## Decision + +RDFLib's default branch will be renamed from `master` to `main`. This is primarily to stay in line with modern conventions and to adhere to the principle of least surprise. + +## Consequences + +Anticipated negative consequences: + +* Some links to old code will be broken. +* Some people's workflow may break unexpectedly and need adjusting. +* Any code and systems reliant on the old default branch name will fail. + +Anticipated positive consequences: + +* It will become a bit easier to work with RDFLib for developers that are used + to `main` as the default branch. + +## References + +[GH-BRANCHES]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches#about-the-default-branch "GitHub: About the default branch" +[SFC-BNAMING]: https://sfconservancy.org/news/2020/jun/23/gitbranchname/ "Regarding Git and Branch Naming" diff --git a/docs/decisions/20220826-default_branch.rst b/docs/decisions/20220826-default_branch.rst deleted file mode 100644 index dfa4189fa..000000000 --- a/docs/decisions/20220826-default_branch.rst +++ /dev/null @@ -1,42 +0,0 @@ -Default Branch Name -=========================== - -.. admonition:: Status - - Accepted - -Context -------- - -In recent years usage of the word ``master`` has become somewhat controversial -[SFC-BNAMING]_ and consequently default branch name of Git repos has become -``main``, both in Git itself [SFC-BNAMING]_ and in Git hosting solutions such as -GitHub [GH-BRANCHES]_. - -Decision --------- - -RDFLib's -default branch will be renamed from ``master`` to ``main``. This is primarily to stay in line with modern conventions and to adhere to the principle of least surprise. - -Consequences ------------- - -Anticipated negative consequences: - -* Some links to old code will be broken. -* Some people's workflow may break unexpectedly and need adjusting. -* Any code and systems reliant on the old default branch name will fail. - -Anticipated positive consequences: - -* It will become a bit easier to work with RDFLib for developers that are used - to ``main`` as the default branch. - -References ----------- - -.. [GH-BRANCHES] `GitHub: About the default branch - `_ -.. [SFC-BNAMING] `Regarding Git and Branch Naming - `_ diff --git a/docs/decisions/index.rst b/docs/decisions/index.rst deleted file mode 100644 index 39d02ccc9..000000000 --- a/docs/decisions/index.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _decision_records: Decision Records - -Decision Records -================ - -To ensure that significant changes to RDFLib are made with sufficient consultation, -consideration and planning they should be preceded by a decision record that -captures the particulars of the decision that lead to the change. - -Decision records present the users and maintainers of RDFLib with an opportunity -to review decisions before effort is expended to implement the decision in code, -and it also makes it possible to review decisions without having to reconstruct -them from the code changes that implement them. - -Whether a change is significant is hard to measure objectively, but some -characteristics that may indicate that a change is significant include: - -* It will require changes to code that use RDFLib. -* It cannot be reversed without requiring changes to code that use - RDFLib. -* It is onerous to reverse later. -* It increases the maintenance burden of RDFLib. -* It is very large. - -Some of these characteristics are not binary but measured in degrees, so some -discretion is required when determining if an architectural decision record is -appropriate. - -Decision records may also be used for changes that do not have any of the listed -characteristics if a decision record would be otherwise helpful, for example to -capture a decision to change the maintenance process of RDFLib. - -Changes not preceded by decision records won't be rejected solely on this basis -even if they are deemed significant, and decision records may also be created -retrospectively for changes. - -Decision records as described here are similar to the concept of `Architectural -Decision Records `_, though it is slightly broader as it -could include decisions which are not classified as architectural. - -Creating a decision record --------------------------- - -Decision records should be added to the RDFLib repository in the -``./docs/decisions/`` directory with a name ``{YYYYmmdd}-{title}.rst``. - -The content of the decision record should succinctly describe the context of the -decision, the decision itself, and the status of the decision. - -Decision records should preferably follow `Michael Nygard decision record -template -`_ -that he described in a `2011 article -`_ -on documenting architecture decisions. - -For questions about decision records please reach out to the RDFLib maintainers -and community using the options given in :ref:`further_help_and_contact`. - - -Decision list -------------- - -.. toctree:: - :glob: - - 20*-* - - \ No newline at end of file diff --git a/docs/developers.md b/docs/developers.md new file mode 100644 index 000000000..7384d6050 --- /dev/null +++ b/docs/developers.md @@ -0,0 +1,396 @@ +# RDFLib developers guide + +## Introduction + +This document describes the process and conventions to follow when +developing RDFLib code. + +* Please be as Pythonic as possible ([PEP 8](https://www.python.org/dev/peps/pep-0008/)). +* Code should be formatted using [black](https://github.com/psf/black) and we use Black v23.1.0, with the black config in `pyproject.toml`. +* Code should also pass [flake8](https://flake8.pycqa.org/en/latest/) linting + and [mypy](http://mypy-lang.org/) type checking. +* You must supply tests for new code. +* RDFLib uses [Poetry](https://python-poetry.org/docs/master/) for dependency management and packaging. + +If you add a new cool feature, consider also adding an example in `./examples`. + +## Pull Requests Guidelines + +Contributions to RDFLib are made through pull requests (PRs). + +For changes that add features or affect the public API of RDFLib, it is recommended to first open an issue to discuss the change before starting to work on it. That way you can get feedback on the design of the feature before spending time on it. + +In general, maintainers will only merge PRs if the following conditions are met: + +* The PR has been sufficiently reviewed. + + Each PR should be reviewed and approved by at least two people other than the + author of the PR before it is merged and PRs will be processed faster if + they are easier to review and approve of. + + Reviews are open to everyone, but the weight assigned to any particular + review is at the discretion of maintainers. + +* Changes that have a runtime impact are covered by unit tests. + + There should either be existing tests that cover the changed code and + behaviour, or the PR should include tests. For more information about what is + considered adequate testing see the [Tests section](#tests). + +* Documentation that covers something that changed has been updated. + +* Type checks and unit tests that are part of our continuous integration workflow pass. + +In addition to these conditions, PRs that are easier to review and approve will be processed quicker. The primary factors that determine this are the scope and size of a PR. If there are few changes and the scope is limited, then there is less that a reviewer has to understand and less that they can disagree with. It is thus important to try to split up your changes into multiple independent PRs if possible. No PR is too small. + +For PRs that introduce breaking changes, it is even more critical that they are limited in size and scope, as they will likely have to be kept up to date with the `main` branch of this project for some time before they are merged. + +It is also critical that your PR is understandable both in what it does and why it does it, and how the change will impact the users of this project, for this reason, it is essential that your PR's description explains the nature of the PR, what the PR intends to do, why this is desirable, and how this will affect the users of this project. + +Please note that while we would like all PRs to follow the guidelines given here, we will not reject a PR just because it does not. + +## Maintenance Guidelines + +This section contains guidelines for maintaining RDFLib. RDFLib maintainers should try to follow these. These guidelines also serve as an indication to RDFLib users what they can expect. + +### Breaking changes + +Breaking changes to RDFLib's public API should be made incrementally, with small pull requests to the main branch that change as few things as possible. + +Breaking changes should be discussed first in an issue before work is started, as it is possible that the change is not necessary or that there is a better way to achieve the same goal, in which case the work on the PR would have been wasted. This will however not be strictly enforced, and no PR will be rejected solely on the basis that it was not discussed upfront. + +RDFLib follows [semantic versioning](https://semver.org/spec/v2.0.0.html) and [trunk-based development](https://trunkbaseddevelopment.com/), so if any breaking changes were introduced into the main branch since the last release, then the next release will be a major release with an incremented major version. + +Releases of RDFLib will not as a rule be conditioned on specific features, so there may be new major releases that contain very few breaking changes, and there could be no minor or patch releases between two major releases. + +#### Rationale + +RDFLib has been around for more than a decade, and in this time both Python and RDF have evolved, and RDFLib's API also has to evolve to keep up with these changes and to make it easier for users to use. This will inevitably require breaking changes. + +There are more or less two ways to introduce breaking changes to RDFLib's public API: + +- Revolutionary: Create a new API from scratch and reimplement it, and when + ready, release a new version of RDFLib with the new API. +- Evolutionary: Incrementally improve the existing API with small changes and + release any breaking changes that were made at regular intervals. + +While the revolutionary approach seems appealing, it is also risky and time-consuming. + +The evolutionary approach puts a lot of strain on the users of RDFLib as they have to adapt to breaking changes more often, but the shortcomings of the RDFLib public API also put a lot of strain on the users of RDFLib. On the other hand, a major advantage of the evolutionary approach is that it is simple and achievable from a maintenance and contributor perspective. + +### Deprecating functionality + +To whatever extent possible, classes, functions, variables, or parameters that will be removed should be marked for deprecation in documentation, and if possible, should be changed to raise deprecation warnings if used. + +There is however no hard requirement that something may only be removed after a deprecation notice has been added, or only after a release was made with a deprecation notice. + +Consequently, functionality may be removed without it ever being marked as deprecated. + +#### Rationale + +Current resource limitations and the backlog of issues make it impractical to first release or incorporate deprecation notices before making quality of life changes. + +RDFLib uses semantic versioning and provides type hints, and these are the primary mechanisms for signalling breaking changes to our users. + +## Tests + +Any new functionality being added to RDFLib *must* have unit tests and should have doc tests supplied. + +Typically, you should add your functionality and new tests to a branch of RDFlib and run all tests locally and see them pass. There are currently close to 4,000 tests, with a some expected failures and skipped tests. We won't merge pull requests unless the test suite completes successfully. + +Tests that you add should show how your new feature or bug fix is doing what you say it is doing: if you remove your enhancement, your new tests should fail! + +Finally, please consider adding simple and more complex tests. It's good to see the basic functionality of your feature tests and then also any tricky bits or edge cases. + +### Testing framework + +RDFLib uses the [pytest](https://docs.pytest.org/en/latest/) testing framework. + +### Running tests + +To run RDFLib's test suite with [pytest](https://docs.pytest.org/en/latest/): + +```bash +poetry install +poetry run pytest +``` + +Specific tests can be run by file name. For example: + +```bash +poetry run pytest test/test_graph/test_graph.py +``` + +For more extensive tests, including tests for the [berkleydb](https://www.oracle.com/database/technologies/related/berkeleydb.html) backend, install extra requirements before executing the tests. + +```bash +poetry install --all-extras +poetry run pytest +``` + +By default, tests of the `SPARQLStore` against remote public endpoints are skipped, to enable them add the flag: + +```bash +poetry run pytest --public-endpoints +``` + +Or exclusively run the SPARQLStore tests: + +```bash +poetry run pytest test/test_store/test_store_sparqlstore_public.py --public-endpoints +``` + +### Writing tests + +New tests should be written for [pytest](https://docs.pytest.org/en/latest/) instead of for python's built-in `unittest` module as pytest provides advanced features such as parameterization and more flexibility in writing expected failure tests than `unittest`. + +A primer on how to write tests for pytest can be found [here](https://docs.pytest.org/en/latest/getting-started.html#create-your-first-test). + +The existing tests that use `unittest` work well with pytest, but they should ideally be updated to the pytest test-style when they are touched. + +Test should go into the `test/` directory, either into an existing test file with a name that is applicable to the test being written, or into a new test file with a name that is descriptive of the tests placed in it. Test files should be named `test_*.py` so that [pytest can discover them](https://docs.pytest.org/en/latest/explanation/goodpractices.html#conventions-for-python-test-discovery). + +## Running static checks + +Check formatting with [black](https://github.com/psf/black), making sure you use +our black.toml config file: + +```bash +poetry run black . +``` + +Check style and conventions with [ruff](https://docs.astral.sh/ruff/linter/): + +```bash +poetry run ruff check +``` + +Any issues that are found can potentially be fixed automatically using: + +```bash +poetry run ruff check --fix +``` + +Check types with [mypy](http://mypy-lang.org/): + +```bash +poetry run mypy --show-error-context --show-error-codes +``` + +## pre-commit and pre-commit ci + +We have [pre-commit](https://pre-commit.com/) configured with [black](https://github.com/psf/black) for formatting code. + +Some useful commands for using pre-commit: + +```bash +# Install pre-commit. +pip install --user --upgrade pre-commit + +# Install pre-commit hooks, this will run pre-commit +# every time you make a git commit. +pre-commit install + +# Run pre-commit on changed files. +pre-commit run + +# Run pre-commit on all files. +pre-commit run --all-files +``` + +There is also two tox environments for pre-commit: + +```bash +# run pre-commit on changed files. +tox -e precommit + +# run pre-commit on all files. +tox -e precommitall +``` + +There is no hard requirement for pull requests to be processed with pre-commit (or the underlying processors), however doing this makes for a less noisy codebase with cleaner history. + +We have enabled [https://pre-commit.ci/](https://pre-commit.ci/) and this can be used to automatically fix pull requests by commenting `pre-commit.ci autofix` on a pull request. + +## Using tox + +RDFLib has a [tox](https://tox.wiki/en/latest/index.html) config file that makes it easier to run validation on all supported python versions. + +```bash +# Install tox. +pip install tox + +# List the tox environments that run by default. +tox -e + +# Run the default environments. +tox + +# List all tox environments, including ones that don't run by default. +tox -a + +# Run a specific environment. +tox -e py39 # default environment with py39 +tox -e py311-extra # extra tests with py311 + +# Override the test command. +# the below command will run `pytest test/test_translate_algebra.py` +# instead of the default pytest command. +tox -e py39,py311 -- pytest test/test_translate_algebra.py +``` + +## `go-task` and `Taskfile.yml` + +A `Taskfile.yml` is provided for [go-task](https://taskfile.dev/#/) with various commands that facilitate development. + +Instructions for installing go-task can be seen in the [go-task installation guide](https://taskfile.dev/#/installation). + +Some useful commands for working with the task in the taskfile is given below: + +```bash +# List available tasks. +task -l + +# Configure the environment for development +task configure + +# Run basic validation +task validate + +# Build docs +task docs + +# Run live-preview on the docs +task docs:live-server + +# Run the py310 tox environment +task tox -- -e py310 +``` + +The [Taskfile usage documentation](https://taskfile.dev/#/usage) provides more information on how to work with taskfiles. + +## Development container + +To simplify the process of getting a working development environment to develop rdflib in we provide a [Development Container](https://devcontainers.github.io/containers.dev/) (*devcontainer*) that is configured in [Docker Compose](https://docs.docker.com/compose/). This container can be used directly to run various commands, or it can be used with [editors that support Development Containers](https://devcontainers.github.io/containers.dev/supporting). + +!!! bug "Rootless docker" + The devcontainer is intended to run with a + [rootless docker](https://docs.docker.com/engine/security/rootless/) + daemon so it can edit files owned by the invoking user without + an invovled configuration process. + + Using a rootless docker daemon also has general security benefits. + +To use the development container directly: + +```bash +# Build the devcontainer docker image. +docker-compose build + +# Configure the system for development. +docker-compose run --rm run task configure + +# Run the validate task inside the devtools container. +docker-compose run --rm run task validate + +# Run extensive tests inside the devtools container. +docker-compose run --rm run task EXTENSIVE=true test + +# To get a shell into the devcontainer docker image. +docker-compose run --rm run bash +``` + +The devcontainer also works with [Podman Compose](https://github.com/containers/podman-compose). + +Details on how to use the development container with [VSCode](https://code.visualstudio.com/) can found in the [Developing inside a Container](https://code.visualstudio.com/docs/remote/containers) page. With the VSCode [development container CLI](https://code.visualstudio.com/docs/remote/devcontainer-cli) installed the following command can be used to open the repository inside the development container: + +```bash +# Inside the repository base directory +cd ./rdflib/ + +# Build the development container. +devcontainer build . + +# Open the code inside the development container. +devcontainer open . +``` + +## Writing documentation + +We use mkdocs for generating HTML docs, see [docs](docs.md). + +## Continuous Integration + +We used GitHub Actions for CI, see: [https://github.com/RDFLib/rdflib/actions](https://github.com/RDFLib/rdflib/actions) + +If you make a pull-request to RDFLib on GitHub, GitHub Actions will automatically test your code and we will only merge code passing all tests. + +Please do *not* commit tests you know will fail, even if you're just pointing out a bug. If you commit such tests, flag them as expecting to fail. + +## Compatibility + +RDFlib 7.0.0 release and later only support Python 3.8.1 and newer. + +RDFlib 6.0.0 release and later only support Python 3.7 and newer. + +RDFLib 5.0.0 maintained compatibility with Python versions 2.7, 3.4, 3.5, 3.6, 3.7. + +## Releasing + +Create a release-preparation pull request with the following changes: + +* Updated version and date in `CITATION.cff`. +* Updated copyright year in the `LICENSE` file. +* Updated copyright year in the `docs/conf.py` file. +* Updated main branch version and current version in the `README.md` file. +* Updated version in the `pyproject.toml` file. +* Updated `__date__` in the `rdflib/__init__.py` file. +* Accurate `CHANGELOG.md` entry for the release. + +Once the PR is merged, switch to the main branch, build the release and upload it to PyPI: + +```bash +# Clean up any previous builds +rm -vf dist/* + +# Build artifacts +poetry build + +# Verify package metadata +bsdtar -xvf dist/rdflib-*.whl -O '*/METADATA' | view - +bsdtar -xvf dist/rdflib-*.tar.gz -O '*/PKG-INFO' | view - + +# Check that the built wheel and sdist works correctly: +## Ensure pipx is installed but not within RDFLib's environment +pipx run --no-cache --spec "$(readlink -f dist/rdflib*.whl)" rdfpipe --version +pipx run --no-cache --spec "$(readlink -f dist/rdflib*.whl)" rdfpipe https://github.com/RDFLib/rdflib/raw/main/test/data/defined_namespaces/rdfs.ttl +pipx run --no-cache --spec "$(readlink -f dist/rdflib*.tar.gz)" rdfpipe --version +pipx run --no-cache --spec "$(readlink -f dist/rdflib*.tar.gz)" rdfpipe https://github.com/RDFLib/rdflib/raw/main/test/data/defined_namespaces/rdfs.ttl + +# Dry run publishing +poetry publish --repository=testpypi --dry-run +poetry publish --dry-run + +# Publish to TestPyPI +## ensure you are authed as per https://pypi.org/help/#apitoken and https://github.com/python-poetry/poetry/issues/6320 +poetry publish --repository=testpypi + +# Publish to PyPI +poetry publish +## poetry publish -u __token__ -p pypi- +``` + +Once this is done, create a release tag from [GitHub releases](https://github.com/RDFLib/rdflib/releases/new). For a release of version 6.3.1 the tag should be `6.3.1` (without a "v" prefix), and the release title should be "RDFLib 6.3.1". The release notes for the latest version be added to the release description. The artifacts built with `poetry build` should be uploaded to the release as release artifacts. + +The resulting release will be available at https://github.com/RDFLib/rdflib/releases/tag/6.3.1 + +Once this is done, announce the release at the following locations: + +* Twitter: Just make a tweet from your own account linking to the latest release. +* RDFLib mailing list. +* RDFLib Gitter / matrix.org chat room. + +Once this is all done, create another post-release pull request with the following changes: + +* Set the just released version in `docker/latest/requirements.in` and run `task docker:prepare` to update the `docker/latest/requirements.txt` file. +* Set the version in the `pyproject.toml` file to the next minor release with a `a0` suffix to indicate alpha 0. diff --git a/docs/developers.rst b/docs/developers.rst deleted file mode 100644 index 7ca914fca..000000000 --- a/docs/developers.rst +++ /dev/null @@ -1,513 +0,0 @@ -.. developers: - -RDFLib developers guide -======================= - -Introduction ------------- - -This document describes the process and conventions to follow when -developing RDFLib code. - -* Please be as Pythonic as possible (:pep:`8`). -* Code should be formatted using `black `_ and we use Black v23.1.0, with the black config in ``pyproject.toml``. -* Code should also pass `flake8 `_ linting - and `mypy `_ type checking. -* You must supply tests for new code. -* RDFLib uses `Poetry `_ for dependency management and packaging. - -If you add a new cool feature, consider also adding an example in ``./examples``. - -Pull Requests Guidelines ------------------------- - -Contributions to RDFLib are made through pull requests (PRs). - -For changes that add features or affect the public API of RDFLib, it -is recommended to first open an issue to discuss the change before starting to -work on it. That way you can get feedback on the design of the feature before -spending time on it. - -In general, maintainers will only merge PRs if the following conditions are -met: - -* The PR has been sufficiently reviewed. - - Each PR should be reviewed and approved by at least two people other than the - author of the PR before it is merged and PRs will be processed faster if - they are easier to review and approve of. - - Reviews are open to everyone, but the weight assigned to any particular - review is at the discretion of maintainers. - -* Changes that have a runtime impact are covered by unit tests. - - There should either be existing tests that cover the changed code and - behaviour, or the PR should include tests. For more information about what is - considered adequate testing see the :ref:`Tests section `. - -* Documentation that covers something that changed has been updated. - -* Type checks and unit tests that are part of our continuous integration - workflow pass. - -In addition to these conditions, PRs that are easier to review and approve will -be processed quicker. The primary factors that determine this are the scope and -size of a PR. If there are few changes and the scope is limited, then there is -less that a reviewer has to understand and less that they can disagree with. It -is thus important to try to split up your changes into multiple independent PRs -if possible. No PR is too small. - -For PRs that introduce breaking changes, it is even more critical that they are -limited in size and scope, as they will likely have to be kept up to date with -the ``main`` branch of this project for some time before they are merged. - -It is also critical that your PR is understandable both in what it does and why -it does it, and how the change will impact the users of this project, for this -reason, it is essential that your PR's description explains the nature of the -PR, what the PR intends to do, why this is desirable, and how this will affect -the users of this project. - -Please note that while we would like all PRs to follow the guidelines given -here, we will not reject a PR just because it does not. - -Maintenance Guidelines ----------------------- - -This section contains guidelines for maintaining RDFLib. RDFLib maintainers -should try to follow these. These guidelines also serve as an indication to -RDFLib users what they can expect. - -Breaking changes -~~~~~~~~~~~~~~~~ - -Breaking changes to RDFLib's public API should be made incrementally, with small -pull requests to the main branch that change as few things as possible. - -Breaking changes should be discussed first in an issue before work is started, -as it is possible that the change is not necessary or that there is a better way -to achieve the same goal, in which case the work on the PR would have been -wasted. This will however not be strictly enforced, and no PR will be rejected -solely on the basis that it was not discussed upfront. - -RDFLib follows `semantic versioning `_ and `trunk-based development -`_, so if any breaking changes were -introduced into the main branch since the last release, then the next release -will be a major release with an incremented major version. - -Releases of RDFLib will not as a rule be conditioned on specific features, so -there may be new major releases that contain very few breaking changes, and -there could be no minor or patch releases between two major releases. - -.. _breaking_changes_rationale: - -Rationale -^^^^^^^^^ - -RDFLib has been around for more than a decade, and in this time both Python and -RDF have evolved, and RDFLib's API also has to evolve to keep up with these -changes and to make it easier for users to use. This will inevitably require -breaking changes. - -There are more or less two ways to introduce breaking changes to RDFLib's public -API: - -- Revolutionary: Create a new API from scratch and reimplement it, and when - ready, release a new version of RDFLib with the new API. -- Evolutionary: Incrementally improve the existing API with small changes and - release any breaking changes that were made at regular intervals. - -While the revolutionary approach seems appealing, it is also risky and -time-consuming. - -The evolutionary approach puts a lot of strain on the users of RDFLib as they -have to adapt to breaking changes more often, but the shortcomings of the RDFLib -public API also put a lot of strain on the users of RDFLib. On the other hand, a -major advantage of the evolutionary approach is that it is simple and achievable -from a maintenance and contributor perspective. - -Deprecating functionality -~~~~~~~~~~~~~~~~~~~~~~~~~ - -To whatever extent possible, classes, functions, variables, or parameters that -will be removed should be marked for deprecation in documentation, and if -possible, should be changed to raise deprecation warnings if used. - -There is however no hard requirement that something may only be removed after a -deprecation notice has been added, or only after a release was made with a -deprecation notice. - -Consequently, functionality may be removed without it ever being marked as -deprecated. - -.. _deprecation_rationale: - -Rationale -^^^^^^^^^ - -Current resource limitations and the backlog of issues make it impractical to -first release or incorporate deprecation notices before making quality of life -changes. - -RDFLib uses semantic versioning and provides type hints, and these are the -primary mechanisms for signalling breaking changes to our users. - -.. _tests: - -Tests ------ -Any new functionality being added to RDFLib *must* have unit tests and -should have doc tests supplied. - -Typically, you should add your functionality and new tests to a branch of -RDFlib and run all tests locally and see them pass. There are currently -close to 4,000 tests, with a some expected failures and skipped tests. -We won't merge pull requests unless the test suite completes successfully. - -Tests that you add should show how your new feature or bug fix is doing what -you say it is doing: if you remove your enhancement, your new tests should fail! - -Finally, please consider adding simple and more complex tests. It's good to see -the basic functionality of your feature tests and then also any tricky bits or -edge cases. - -Testing framework -~~~~~~~~~~~~~~~~~ -RDFLib uses the `pytest `_ testing framework. - -Running tests -~~~~~~~~~~~~~ - -To run RDFLib's test suite with `pytest `_: - -.. code-block:: console - - $ poetry install - $ poetry run pytest - -Specific tests can be run by file name. For example: - -.. code-block:: console - - $ poetry run pytest test/test_graph/test_graph.py - -For more extensive tests, including tests for the `berkleydb -`_ -backend, install extra requirements before -executing the tests. - -.. code-block:: console - - $ poetry install --all-extras - $ poetry run pytest - -Writing tests -~~~~~~~~~~~~~ - -New tests should be written for `pytest `_ -instead of for python's built-in `unittest` module as pytest provides advanced -features such as parameterization and more flexibility in writing expected -failure tests than `unittest`. - -A primer on how to write tests for pytest can be found `here -`_. - -The existing tests that use `unittest` work well with pytest, but they should -ideally be updated to the pytest test-style when they are touched. - -Test should go into the ``test/`` directory, either into an existing test file -with a name that is applicable to the test being written, or into a new test -file with a name that is descriptive of the tests placed in it. Test files -should be named ``test_*.py`` so that `pytest can discover them -`_. - -Running static checks ---------------------- - -Check formatting with `black `_, making sure you use -our black.toml config file: - -.. code-block:: bash - - poetry run black . - -Check style and conventions with `flake8 `_: - -.. code-block:: bash - - poetry run flake8 rdflib - -We also provide a `flakeheaven `_ -baseline that ignores existing flake8 errors and only reports on newly -introduced flake8 errors: - -.. code-block:: bash - - poetry run flakeheaven - - -Check types with `mypy `_: - -.. code-block:: bash - - poetry run mypy --show-error-context --show-error-codes - -pre-commit and pre-commit ci ----------------------------- - -We have `pre-commit `_ configured with `black -`_ for formatting code. - -Some useful commands for using pre-commit: - -.. code-block:: bash - - # Install pre-commit. - pip install --user --upgrade pre-commit - - # Install pre-commit hooks, this will run pre-commit - # every time you make a git commit. - pre-commit install - - # Run pre-commit on changed files. - pre-commit run - - # Run pre-commit on all files. - pre-commit run --all-files - -There is also two tox environments for pre-commit: - -.. code-block:: bash - - # run pre-commit on changed files. - tox -e precommit - - # run pre-commit on all files. - tox -e precommitall - - -There is no hard requirement for pull requests to be processed with pre-commit (or the underlying processors), however doing this makes for a less noisy codebase with cleaner history. - -We have enabled `https://pre-commit.ci/ `_ and this can -be used to automatically fix pull requests by commenting ``pre-commit.ci -autofix`` on a pull request. - -Using tox ---------------------- - -RDFLib has a `tox `_ config file that -makes it easier to run validation on all supported python versions. - -.. code-block:: bash - - # Install tox. - pip install tox - - # List the tox environments that run by default. - tox -e - - # Run the default environments. - tox - - # List all tox environments, including ones that don't run by default. - tox -a - - # Run a specific environment. - tox -e py38 # default environment with py37 - tox -e py39-extra # extra tests with py39 - - # Override the test command. - # the below command will run `pytest test/test_translate_algebra.py` - # instead of the default pytest command. - tox -e py38,py39 -- pytest test/test_translate_algebra.py - - -``go-task`` and ``Taskfile.yml`` --------------------------------- - -A ``Taskfile.yml`` is provided for `go-task `_ with -various commands that facilitate development. - -Instructions for installing go-task can be seen in the `go-task installation -guide `_. - -Some useful commands for working with the task in the taskfile is given below: - -.. code-block:: bash - - # List available tasks. - task -l - - # Configure the environment for development - task configure - - # Run basic validation - task validate - - # Build docs - task docs - - # Run live-preview on the docs - task docs:live-server - - # Run the py310 tox environment - task tox -- -e py310 - -The `Taskfile usage documentation `_ provides -more information on how to work with taskfiles. - -Development container ---------------------- - -To simplify the process of getting a working development environment to develop -rdflib in we provide a `Development Container -`_ (*devcontainer*) that is -configured in `Docker Compose `_. This -container can be used directly to run various commands, or it can be used with -`editors that support Development Containers -`_. - -.. important:: - The devcontainer is intended to run with a - `rootless docker `_ - daemon so it can edit files owned by the invoking user without - an invovled configuration process. - - Using a rootless docker daemon also has general security benefits. - -To use the development container directly: - -.. code-block:: bash - - # Build the devcontainer docker image. - docker-compose build - - # Configure the system for development. - docker-compose run --rm run task configure - - # Run the validate task inside the devtools container. - docker-compose run --rm run task validate - - # Run extensive tests inside the devtools container. - docker-compose run --rm run task EXTENSIVE=true test - - # To get a shell into the devcontainer docker image. - docker-compose run --rm run bash - -The devcontainer also works with `Podman Compose -`_. - -Details on how to use the development container with `VSCode -`_ can found in the `Developing inside a -Container `_ page. With -the VSCode `development container CLI -`_ installed the -following command can be used to open the repository inside the development -container: - -.. code-block:: bash - - # Inside the repository base directory - cd ./rdflib/ - - # Build the development container. - devcontainer build . - - # Open the code inside the development container. - devcontainer open . - -Writing documentation ---------------------- - -We use sphinx for generating HTML docs, see :ref:`docs`. - -Continuous Integration ----------------------- - -We used GitHub Actions for CI, see: - - https://github.com/RDFLib/rdflib/actions - -If you make a pull-request to RDFLib on GitHub, GitHub Actions will -automatically test your code and we will only merge code passing all tests. - -Please do *not* commit tests you know will fail, even if you're just pointing out a bug. If you commit such tests, -flag them as expecting to fail. - -Compatibility -------------- - -RDFlib 7.0.0 release and later only support Python 3.8.1 and newer. - -RDFlib 6.0.0 release and later only support Python 3.7 and newer. - -RDFLib 5.0.0 maintained compatibility with Python versions 2.7, 3.4, 3.5, 3.6, 3.7. - -Releasing ---------- - -Create a release-preparation pull request with the following changes: - -* Updated version and date in ``CITATION.cff``. -* Updated copyright year in the ``LICENSE`` file. -* Updated copyright year in the ``docs/conf.py`` file. -* Updated main branch version and current version in the ``README.md`` file. -* Updated version in the ``pyproject.toml`` file. -* Updated ``__date__`` in the ``rdflib/__init__.py`` file. -* Accurate ``CHANGELOG.md`` entry for the release. - -Once the PR is merged, switch to the main branch, build the release and upload it to PyPI: - -.. code-block:: bash - - # Clean up any previous builds - \rm -vf dist/* - - # Build artifacts - poetry build - - # Verify package metadata - bsdtar -xvf dist/rdflib-*.whl -O '*/METADATA' | view - - bsdtar -xvf dist/rdflib-*.tar.gz -O '*/PKG-INFO' | view - - - # Check that the built wheel and sdist works correctly: - ## Ensure pipx is installed but not within RDFLib's environment - pipx run --no-cache --spec "$(readlink -f dist/rdflib*.whl)" rdfpipe --version - pipx run --no-cache --spec "$(readlink -f dist/rdflib*.whl)" rdfpipe https://github.com/RDFLib/rdflib/raw/main/test/data/defined_namespaces/rdfs.ttl - pipx run --no-cache --spec "$(readlink -f dist/rdflib*.tar.gz)" rdfpipe --version - pipx run --no-cache --spec "$(readlink -f dist/rdflib*.tar.gz)" rdfpipe https://github.com/RDFLib/rdflib/raw/main/test/data/defined_namespaces/rdfs.ttl - - # Dry run publishing - poetry publish --repository=testpypi --dry-run - poetry publish --dry-run - - # Publish to TestPyPI - ## ensure you are authed as per https://pypi.org/help/#apitoken and https://github.com/python-poetry/poetry/issues/6320 - poetry publish --repository=testpypi - - # Publish to PyPI - poetry publish - ## poetry publish -u __token__ -p pypi- - - -Once this is done, create a release tag from `GitHub releases -`_. For a release of version -6.3.1 the tag should be ``6.3.1`` (without a "v" prefix), and the release title -should be "RDFLib 6.3.1". The release notes for the latest version be added to -the release description. The artifacts built with ``poetry build`` should be -uploaded to the release as release artifacts. - -The resulting release will be available at https://github.com/RDFLib/rdflib/releases/tag/6.3.1 - -Once this is done, announce the release at the following locations: - -* Twitter: Just make a tweet from your own account linking to the latest release. -* RDFLib mailing list. -* RDFLib Gitter / matrix.org chat room. - -Once this is all done, create another post-release pull request with the following changes: - -* Set the just released version in ``docker/latest/requirements.in`` and run - ``task docker:prepare`` to update the ``docker/latest/requirements.txt`` file. -* Set the version in the ``pyproject.toml`` file to the next minor release with - a ``a0`` suffix to indicate alpha 0. diff --git a/docs/docs.md b/docs/docs.md new file mode 100644 index 000000000..4ebe2e379 --- /dev/null +++ b/docs/docs.md @@ -0,0 +1,47 @@ +# Writing RDFLib Documentation + +These docs are generated with [Material for MkDocs](https://squidfunk.github.io/mkdocs-material). + +- When writing doc-strings use markdown and google style. +- API Docs are automatically generated with [`mkdocstring`](https://mkdocstrings.github.io). +- See the [supported admonitions here](https://squidfunk.github.io/mkdocs-material/reference/admonitions/#supported-types) + +## Building + +To build the documentation you can use `mkdocs` from within the poetry environment. To do this, run the following commands: + +```bash +# Install poetry venv +poetry install + +# Build the docs +poetry run mkdocs build +``` + +Built HTML docs will be generated in `site/` and API documentation, generated as markdown from doc-strings, will be placed in `docs/apidocs/`. + +API Docs are automatically generated with `mkdocstring` + +There is also a [tox](https://tox.wiki/en/latest/) environment for building documentation: + +```bash +tox -e docs +``` + +You can check the built documentation with: + +```bash +npx -p live-server live-server site/ +``` + +## Development + +Run development server with auto-reload on change to code: + +```bash +poetry run mkdocs serve +``` + +## Tables + +The tables in `plugin_*.rst` were generated with `plugintable.py` diff --git a/docs/docs.rst b/docs/docs.rst deleted file mode 100644 index 5ff917755..000000000 --- a/docs/docs.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _docs: - -================================ -Writing RDFLib Documentation -================================ - - -These docs are generated with Sphinx. - -Sphinx makes it very easy to pull in doc-strings from modules, -classes, methods, etc. When writing doc-strings, special reST fields -can be used to annotate parameters, return-types, etc. This makes for -pretty API docs. See `here `_ -for the Shinx documentation about these fields. - -Building --------- - -To build the documentation you can use Sphinx from within the poetry environment. To do this, run the following commands: - -.. code-block:: bash - - # Install poetry venv - poetry install - - # Build the sphinx docs - poetry run sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html - - -Docs will be generated in :file:`docs/_build/html` and API documentation, -generated from doc-strings, will be placed in :file:`docs/apidocs/`. - -There is also a `tox `_ environment for building -documentation: - -.. code-block:: bash - - tox -e docs - -API Docs --------- - -API Docs are automatically generated with ``sphinx-apidoc``: - -.. code-block:: bash - - poetry run sphinx-apidoc -f -d 10 -o docs/apidocs/ rdflib examples - -Note that ``rdflib.rst`` was manually tweaked so as to not include all - imports in ``rdflib/__init__.py``. - -Tables ------- - -The tables in ``plugin_*.rst`` were generated with ``plugintable.py`` diff --git a/docs/gen_ref_pages.py b/docs/gen_ref_pages.py new file mode 100644 index 000000000..0abb530d0 --- /dev/null +++ b/docs/gen_ref_pages.py @@ -0,0 +1,62 @@ +"""Generate the code reference pages.""" + +import importlib +import pkgutil +from pathlib import Path + +import mkdocs_gen_files + + +def generate_module_docs(module_path, output_path, nav, indent=0): + """Generate documentation for a module and its submodules.""" + try: + module = importlib.import_module(module_path) + doc_path = Path(output_path) + # Collect submodule information for parent modules + submodules = [] + if hasattr(module, "__path__"): + for _, submodule_name, is_pkg in pkgutil.iter_modules(module.__path__): + submodules.append((submodule_name, is_pkg)) + + # Create a .md file for the current module + if not module_path == "rdflib": + with mkdocs_gen_files.open(doc_path, "w") as fd: + fd.write(f"::: {module_path}\n\n") + # namespace module page gets too big, so we disable source code display + if module_path.startswith("rdflib.namespace"): + fd.write(" options:\n") + fd.write(" show_source: false\n") + fd.write(" show_if_no_docstring: false\n\n") + + mkdocs_gen_files.set_edit_path( + doc_path, Path(f"../{module_path.replace('.', '/')}.py") + ) + # Add to navigation - convert path to tuple of parts for nav + # parts = tuple(doc_path.with_suffix("").parts) + # nav[parts] = doc_path.as_posix() + # Process submodules + if hasattr(module, "__path__"): + for _, submodule_name, is_pkg in pkgutil.iter_modules(module.__path__): + full_submodule_path = f"{module_path}.{submodule_name}" + # Create path for submodule documentation + generate_module_docs( + full_submodule_path, + Path(f"apidocs/{full_submodule_path}.md"), + nav, + indent + 4, + ) + except (ImportError, AttributeError) as e: + print(f"Error processing {module_path}: {e}") + + +# Creating navigation structure requires mkdocs-literate-nav +# nav = mkdocs_gen_files.Nav() +nav = None + +# Generate all docs +generate_module_docs("rdflib", Path("apidocs/_index.md"), nav) +generate_module_docs("examples", Path("apidocs/examples.md"), nav) + +# # Write the navigation file for the literate-nav plugin +# with mkdocs_gen_files.open("SUMMARY.md", "w") as nav_file: +# nav_file.writelines(nav.build_literate_nav()) diff --git a/docs/gettingstarted.md b/docs/gettingstarted.md new file mode 100644 index 000000000..1e8cfd417 --- /dev/null +++ b/docs/gettingstarted.md @@ -0,0 +1,144 @@ +# Getting started with RDFLib + +## Installation + +RDFLib is open source and is maintained in a [GitHub](https://github.com/RDFLib/rdflib/) repository. RDFLib releases, current and previous, are listed on [PyPI](https://pypi.python.org/pypi/rdflib/) + +The best way to install RDFLib is to use `pip` (sudo as required): + +```bash +pip install rdflib +``` + +If you want the latest code to run, clone the `main` branch of the GitHub repo and use that or you can `pip install` directly from GitHub: + +```bash +pip install git+https://github.com/RDFLib/rdflib.git@main#egg=rdflib +``` + +## Support + +Usage support is available via questions tagged with `[rdflib]` on [StackOverflow](https://stackoverflow.com/questions/tagged/rdflib) and development support, notifications and detailed discussion through the rdflib-dev group (mailing list): [http://groups.google.com/group/rdflib-dev](http://groups.google.com/group/rdflib-dev) + +If you notice a bug or want to request an enhancement, please do so via our Issue Tracker in Github: [http://github.com/RDFLib/rdflib/issues](http://github.com/RDFLib/rdflib/issues) + +## How it all works + +*The package uses various Python idioms that offer an appropriate way to introduce RDF to a Python programmer who hasn't worked with RDF before.* + +The primary interface that RDFLib exposes for working with RDF is a [`Graph`][rdflib.graph.Graph]. + +RDFLib graphs are un-sorted containers; they have ordinary Python `set` operations (e.g. [`add()`][rdflib.graph.Graph.add] to add a triple) plus methods that search triples and return them in arbitrary order. + +RDFLib graphs also redefine certain built-in Python methods in order to behave in a predictable way. They do this by [emulating container types](https://docs.python.org/3.8/reference/datamodel.html#emulating-container-types) and are best thought of as a set of 3-item tuples ("triples", in RDF-speak): + +```python +[ + (subject0, predicate0, object0), + (subject1, predicate1, object1), + # ... + (subjectN, predicateN, objectN), +] +``` + +## A tiny example + +```python +from rdflib import Graph + +# Create a Graph +g = Graph() + +# Parse in an RDF file hosted on the Internet +g.parse("http://www.w3.org/People/Berners-Lee/card") + +# Loop through each triple in the graph (subj, pred, obj) +for subj, pred, obj in g: + # Check if there is at least one triple in the Graph + if (subj, pred, obj) not in g: + raise Exception("It better be!") + +# Print the number of "triples" in the Graph +print(f"Graph g has {len(g)} statements.") +# Prints: Graph g has 86 statements. + +# Print out the entire Graph in the RDF Turtle format +print(g.serialize(format="turtle")) +``` + +Here a [`Graph`][rdflib.graph.Graph] is created and then an RDF file online, Tim Berners-Lee's social network details, is parsed into that graph. The `print()` statement uses the `len()` function to count the number of triples in the graph. + +## A more extensive example + +```python +from rdflib import Graph, Literal, RDF, URIRef +# rdflib knows about quite a few popular namespaces, like W3C ontologies, schema.org etc. +from rdflib.namespace import FOAF , XSD + +# Create a Graph +g = Graph() + +# Create an RDF URI node to use as the subject for multiple triples +donna = URIRef("http://example.org/donna") + +# Add triples using store's add() method. +g.add((donna, RDF.type, FOAF.Person)) +g.add((donna, FOAF.nick, Literal("donna", lang="en"))) +g.add((donna, FOAF.name, Literal("Donna Fales"))) +g.add((donna, FOAF.mbox, URIRef("mailto:donna@example.org"))) + +# Add another person +ed = URIRef("http://example.org/edward") + +# Add triples using store's add() method. +g.add((ed, RDF.type, FOAF.Person)) +g.add((ed, FOAF.nick, Literal("ed", datatype=XSD.string))) +g.add((ed, FOAF.name, Literal("Edward Scissorhands"))) +g.add((ed, FOAF.mbox, Literal("e.scissorhands@example.org", datatype=XSD.anyURI))) + +# Iterate over triples in store and print them out. +print("--- printing raw triples ---") +for s, p, o in g: + print((s, p, o)) + +# For each foaf:Person in the store, print out their mbox property's value. +print("--- printing mboxes ---") +for person in g.subjects(RDF.type, FOAF.Person): + for mbox in g.objects(person, FOAF.mbox): + print(mbox) + +# Bind the FOAF namespace to a prefix for more readable output +g.bind("foaf", FOAF) + +# print all the data in the Notation3 format +print("--- printing mboxes ---") +print(g.serialize(format='n3')) +``` + +## A SPARQL query example + +```python +from rdflib import Graph + +# Create a Graph, parse in Internet data +g = Graph().parse("http://www.w3.org/People/Berners-Lee/card") + +# Query the data in g using SPARQL +# This query returns the 'name' of all `foaf:Person` instances +q = """ + PREFIX foaf: + + SELECT ?name + WHERE { + ?p rdf:type foaf:Person . + + ?p foaf:name ?name . + } +""" + +# Apply the query to the graph and iterate through results +for r in g.query(q): + print(r["name"]) + +# prints: Timothy Berners-Lee +``` diff --git a/docs/gettingstarted.rst b/docs/gettingstarted.rst deleted file mode 100644 index b3ee9572f..000000000 --- a/docs/gettingstarted.rst +++ /dev/null @@ -1,178 +0,0 @@ -.. _gettingstarted: - -=============================== -Getting started with RDFLib -=============================== - -Installation -============ - -RDFLib is open source and is maintained in a -`GitHub `_ repository. RDFLib releases, current and previous, -are listed on `PyPi `_ - -The best way to install RDFLib is to use ``pip`` (sudo as required): - -.. code-block :: bash - - $ pip install rdflib - -If you want the latest code to run, clone the ``main`` branch of the GitHub repo and use that or you can ``pip install`` -directly from GitHub: - -.. code-block :: bash - - $ pip install git+https://github.com/RDFLib/rdflib.git@main#egg=rdflib - - -Support -======= -Usage support is available via questions tagged with ``[rdflib]`` on `StackOverflow `__ -and development support, notifications and detailed discussion through the rdflib-dev group (mailing list): - - http://groups.google.com/group/rdflib-dev - -If you notice an bug or want to request an enhancement, please do so via our Issue Tracker in Github: - - ``_ - -How it all works -================ -*The package uses various Python idioms -that offer an appropriate way to introduce RDF to a Python programmer -who hasn't worked with RDF before.* - -The primary interface that RDFLib exposes for working with RDF is a -:class:`~rdflib.graph.Graph`. - -RDFLib graphs are un-sorted containers; they have ordinary Python ``set`` -operations (e.g. :meth:`~rdflib.Graph.add` to add a triple) plus -methods that search triples and return them in arbitrary order. - -RDFLib graphs also redefine certain built-in Python methods in order -to behave in a predictable way. They do this by `emulating container types -`_ and -are best thought of as a set of 3-item tuples ("triples", in RDF-speak): - -.. code-block:: text - - [ - (subject0, predicate0, object0), - (subject1, predicate1, object1), - ... - (subjectN, predicateN, objectN) - ] - -A tiny example -============== - -.. code-block:: python - - from rdflib import Graph - - # Create a Graph - g = Graph() - - # Parse in an RDF file hosted on the Internet - g.parse("http://www.w3.org/People/Berners-Lee/card") - - # Loop through each triple in the graph (subj, pred, obj) - for subj, pred, obj in g: - # Check if there is at least one triple in the Graph - if (subj, pred, obj) not in g: - raise Exception("It better be!") - - # Print the number of "triples" in the Graph - print(f"Graph g has {len(g)} statements.") - # Prints: Graph g has 86 statements. - - # Print out the entire Graph in the RDF Turtle format - print(g.serialize(format="turtle")) - -Here a :class:`~rdflib.graph.Graph` is created and then an RDF file online, Tim Berners-Lee's social network details, is -parsed into that graph. The ``print()`` statement uses the ``len()`` function to count the number of triples in the -graph. - -A more extensive example -======================== - -.. code-block:: python - - from rdflib import Graph, Literal, RDF, URIRef - # rdflib knows about quite a few popular namespaces, like W3C ontologies, schema.org etc. - from rdflib.namespace import FOAF , XSD - - # Create a Graph - g = Graph() - - # Create an RDF URI node to use as the subject for multiple triples - donna = URIRef("http://example.org/donna") - - # Add triples using store's add() method. - g.add((donna, RDF.type, FOAF.Person)) - g.add((donna, FOAF.nick, Literal("donna", lang="en"))) - g.add((donna, FOAF.name, Literal("Donna Fales"))) - g.add((donna, FOAF.mbox, URIRef("mailto:donna@example.org"))) - - # Add another person - ed = URIRef("http://example.org/edward") - - # Add triples using store's add() method. - g.add((ed, RDF.type, FOAF.Person)) - g.add((ed, FOAF.nick, Literal("ed", datatype=XSD.string))) - g.add((ed, FOAF.name, Literal("Edward Scissorhands"))) - g.add((ed, FOAF.mbox, Literal("e.scissorhands@example.org", datatype=XSD.anyURI))) - - # Iterate over triples in store and print them out. - print("--- printing raw triples ---") - for s, p, o in g: - print((s, p, o)) - - # For each foaf:Person in the store, print out their mbox property's value. - print("--- printing mboxes ---") - for person in g.subjects(RDF.type, FOAF.Person): - for mbox in g.objects(person, FOAF.mbox): - print(mbox) - - # Bind the FOAF namespace to a prefix for more readable output - g.bind("foaf", FOAF) - - # print all the data in the Notation3 format - print("--- printing mboxes ---") - print(g.serialize(format='n3')) - - -A SPARQL query example -====================== - -.. code-block:: python - - from rdflib import Graph - - # Create a Graph, parse in Internet data - g = Graph().parse("http://www.w3.org/People/Berners-Lee/card") - - # Query the data in g using SPARQL - # This query returns the 'name' of all ``foaf:Person`` instances - q = """ - PREFIX foaf: - - SELECT ?name - WHERE { - ?p rdf:type foaf:Person . - - ?p foaf:name ?name . - } - """ - - # Apply the query to the graph and iterate through results - for r in g.query(q): - print(r["name"]) - - # prints: Timothy Berners-Lee - - - -More examples -============= -There are many more :doc:`examples ` in the :file:`examples` folder in the source distribution. diff --git a/docs/includes/abbreviations.md b/docs/includes/abbreviations.md new file mode 100644 index 000000000..6cf8a7e15 --- /dev/null +++ b/docs/includes/abbreviations.md @@ -0,0 +1,31 @@ +*[HTML]: Hyper Text Markup Language +*[HTTP]: HyperText Transfer Protocol +*[HTTPS]: HyperText Transfer Protocol Secure +*[API]: Application Programming Interface +*[UI]: User Interface +*[CLI]: Command-Line Interface +*[PIP]: Pip Install Packages +*[PyPI]: Python Packaging Index +*[PyPA]: Python Packaging Authority +*[PEP]: Python Enhancement Proposal +*[RDF]: Resource Description Framework +*[N3]: Notation 3, an assertion and logic language which is a superset of RDF +*[TriX]: Triples in XML +*[TriG]: Triples in Graphs +*[RDFa]: Resource Description Framework in Attributes +*[JSON-LD]: JavaScript Object Notation - Linked Data +*[JSON]: JavaScript Object Notation +*[OWL]: Web Ontology Language +*[XML]: Extensible Markup Language +*[SPARQL]: SPARQL Protocol and RDF Query Language +*[URL]: Uniform Resource Locator +*[URI]: Uniform Resource Identifier +*[IRI]: Internationalized Resource Identifier +*[CSV]: Comma-Separated Value +*[TSV]: Tab-Separated Value +*[PSV]: Pipe-Separated Value +*[RegEx]: Regular Expression +*[OBO]: Open Biological and Biomedical Ontology +*[VSCode]: VisualStudio Code +*[PR]: Pull request +*[PRs]: Pull requests diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..f37fd1381 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,92 @@ +![RDFLib logo](_static/RDFlib.png) + +# RDFLib + +RDFLib is a pure Python package for working with [RDF](http://www.w3.org/RDF/). It contains: + +* **Parsers & Serializers** + * for RDF/XML, N3, NTriples, N-Quads, Turtle, TriG, TriX, JSON-LD, HexTuples, RDFa and Microdata + +* **Store implementations** + * memory stores + * persistent, on-disk stores, using databases such as BerkeleyDB + * remote SPARQL endpoints + +* **Graph interface** + * to a single graph + * or to multiple Named Graphs within a dataset + +* **SPARQL 1.1 implementation** + * both Queries and Updates are supported + +!!! warning "Security considerations" + RDFLib is designed to access arbitrary network and file resources, in some + cases these are directly requested resources, in other cases they are + indirectly referenced resources. + + If you are using RDFLib to process untrusted documents or queries you should + take measures to restrict file and network access. + + For information on available security measures, see the RDFLib + [Security Considerations](security_considerations.md) + documentation. + +## Getting started + +If you have never used RDFLib, the following will help get you started: + +* [Getting Started](gettingstarted.md) +* [Introduction to Parsing](intro_to_parsing.md) +* [Introduction to Creating RDF](intro_to_creating_rdf.md) +* [Introduction to Graphs](intro_to_graphs.md) +* [Introduction to SPARQL](intro_to_sparql.md) +* [Utilities](utilities.md) +* [Examples](apidocs/examples.md) + +## In depth + +If you are familiar with RDF and are looking for details on how RDFLib handles it, these are for you: + +* [RDF Terms](rdf_terms.md) +* [Namespaces and Bindings](namespaces_and_bindings.md) +* [Persistence](persistence.md) +* [Merging](merging.md) +* [Changelog](changelog.md) +* [Upgrade 6 to 7](upgrade6to7.md) +* [Upgrade 5 to 6](upgrade5to6.md) +* [Upgrade 4 to 5](upgrade4to5.md) +* [Security Considerations](security_considerations.md) + +## Versioning + +RDFLib follows [Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html), which can be summarized as follows: + +Given a version number `MAJOR.MINOR.PATCH`, increment the: + +1. `MAJOR` version when you make incompatible API changes +2. `MINOR` version when you add functionality in a backwards-compatible manner +3. `PATCH` version when you make backwards-compatible bug fixes + +## For developers + +* [Developers guide](developers.md) +* [Documentation guide](docs.md) +* [Contributing guide](CONTRIBUTING.md) +* [Code of Conduct](CODE_OF_CONDUCT.md) +* [Persisting N3 Terms](persisting_n3_terms.md) +* [Type Hints](type_hints.md) +* [Decisions](decisions.md) + +## Source Code + +The rdflib source code is hosted on GitHub at [https://github.com/RDFLib/rdflib](https://github.com/RDFLib/rdflib) where you can lodge Issues and create Pull Requests to help improve this community project! + +The RDFlib organisation on GitHub at [https://github.com/RDFLib](https://github.com/RDFLib) maintains this package and a number of other RDF and RDFlib-related packaged that you might also find useful. + +## Further help & Contact + +If you would like help with using RDFlib, rather than developing it, please post a question on StackOverflow using the tag `[rdflib]`. A list of existing `[rdflib]` tagged questions can be found [here](https://stackoverflow.com/questions/tagged/rdflib). + +You might also like to join RDFlib's [dev mailing list](https://groups.google.com/group/rdflib-dev) or use RDFLib's [GitHub discussions section](https://github.com/RDFLib/rdflib/discussions). + +The chat is available at [gitter](https://gitter.im/RDFLib/rdflib) or via matrix [#RDFLib_rdflib:gitter.im](https://matrix.to/#/#RDFLib_rdflib:gitter.im). diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index ad6e7c00d..000000000 --- a/docs/index.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. rdflib documentation documentation main file - -================ -rdflib |release| -================ - -RDFLib is a pure Python package for working with `RDF `_. It contains: - -* **Parsers & Serializers** - - * for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, JSON-LD, HexTuples, RDFa and Microdata - - -* **Store implementations** - - * memory stores - * persistent, on-disk stores, using databases such as BerkeleyDB - * remote SPARQL endpoints - -* **Graph interface** - - * to a single graph - * or to multiple Named Graphs within a dataset - -* **SPARQL 1.1 implementation** - - * both Queries and Updates are supported - -.. caution:: - - RDFLib is designed to access arbitrary network and file resources, in some - cases these are directly requested resources, in other cases they are - indirectly referenced resources. - - If you are using RDFLib to process untrusted documents or queries you should - take measures to restrict file and network access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - -Getting started ---------------- -If you have never used RDFLib, the following will help get you started: - -.. toctree:: - :maxdepth: 1 - - gettingstarted - intro_to_parsing - intro_to_creating_rdf - intro_to_graphs - intro_to_sparql - utilities - Examples - - -In depth --------- -If you are familiar with RDF and are looking for details on how RDFLib handles it, these are for you: - -.. toctree:: - :maxdepth: 1 - - rdf_terms - namespaces_and_bindings - persistence - merging - changelog - upgrade6to7 - upgrade5to6 - upgrade4to5 - security_considerations - - -Reference ---------- -The nitty-gritty details of everything. - -API reference: - -.. toctree:: - :maxdepth: 1 - - apidocs/modules - -.. toctree:: - :maxdepth: 2 - - plugins - -.. * :ref:`genindex` -.. * :ref:`modindex` - -Versioning ----------- -RDFLib follows `Semantic Versioning 2.0.0 `_, which can be summarized as follows: - - Given a version number ``MAJOR.MINOR.PATCH``, increment the: - - #. ``MAJOR`` version when you make incompatible API changes - #. ``MINOR`` version when you add functionality in a backwards-compatible - manner - #. ``PATCH`` version when you make backwards-compatible bug fixes - -For developers --------------- -.. toctree:: - :maxdepth: 1 - - developers - CODE_OF_CONDUCT - docs - persisting_n3_terms - type_hints - CONTRIBUTING - decisions/index - -Source Code ------------ -The rdflib source code is hosted on GitHub at ``__ where you can lodge Issues and -create Pull Requests to help improve this community project! - -The RDFlib organisation on GitHub at ``__ maintains this package and a number of other RDF -and RDFlib-related packaged that you might also find useful. - - -.. _further_help_and_contact: - -Further help & Contact ----------------------- - -If you would like help with using RDFlib, rather than developing it, please post -a question on StackOverflow using the tag ``[rdflib]``. A list of existing -``[rdflib]`` tagged questions can be found -`here `_. - -You might also like to join RDFlib's `dev mailing list -`_ or use RDFLib's `GitHub -discussions section `_. - -The chat is available at `gitter `_ or via -matrix `#RDFLib_rdflib:gitter.im -`_. diff --git a/docs/intro_to_creating_rdf.md b/docs/intro_to_creating_rdf.md new file mode 100644 index 000000000..9d4de9655 --- /dev/null +++ b/docs/intro_to_creating_rdf.md @@ -0,0 +1,167 @@ +# Creating RDF triples + +## Creating Nodes + +RDF data is a graph where the nodes are URI references, Blank Nodes or Literals. In RDFLib, these node types are represented by the classes [`URIRef`][rdflib.term.URIRef], [`BNode`][rdflib.term.BNode], and [`Literal`][rdflib.term.Literal]. `URIRefs` and `BNodes` can both be thought of as resources, such a person, a company, a website, etc. + +* A `BNode` is a node where the exact URI is not known - usually a node with identity only in relation to other nodes. +* A `URIRef` is a node where the exact URI is known. In addition to representing some subjects and predicates in RDF graphs, `URIRef`s are always used to represent properties/predicates +* `Literals` represent object values, such as a name, a date, a number, etc. The most common literal values are XML data types, e.g. string, int... but custom types can be declared too + +Nodes can be created by the constructors of the node classes: + +```python +from rdflib import URIRef, BNode, Literal + +bob = URIRef("http://example.org/people/Bob") +linda = BNode() # a GUID is generated + +name = Literal("Bob") # passing a string +age = Literal(24) # passing a python int +height = Literal(76.5) # passing a python float +``` + +Literals can be created from Python objects, this creates `data-typed literals`. For the details on the mapping see [rdflibliterals](rdf_terms.md). + +For creating many `URIRefs` in the same `namespace`, i.e. URIs with the same prefix, RDFLib has the [`Namespace`][rdflib.namespace.Namespace] class + +```python +from rdflib import Namespace + +n = Namespace("http://example.org/people/") + +n.bob # == rdflib.term.URIRef("http://example.org/people/bob") +n.eve # == rdflib.term.URIRef("http://example.org/people/eve") +``` + +This is very useful for schemas where all properties and classes have the same URI prefix. RDFLib defines Namespaces for some common RDF/OWL schemas, including most W3C ones: + +```python +from rdflib.namespace import CSVW, DC, DCAT, DCTERMS, DOAP, FOAF, ODRL2, ORG, OWL, \ + PROF, PROV, RDF, RDFS, SDO, SH, SKOS, SOSA, SSN, TIME, \ + VOID, XMLNS, XSD + +RDF.type +# == rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type") + +FOAF.knows +# == rdflib.term.URIRef("http://xmlns.com/foaf/0.1/knows") + +PROF.isProfileOf +# == rdflib.term.URIRef("http://www.w3.org/ns/dx/prof/isProfileOf") + +SOSA.Sensor +# == rdflib.term.URIRef("http://www.w3.org/ns/sosa/Sensor") +``` + + +## Adding Triples to a graph + +We already saw in [intro_to_parsing](intro_to_parsing.md), how triples can be added from files and online locations with the [`parse()`][rdflib.graph.Graph.parse] function. + +Triples can also be added within Python code directly, using the [`add()`][rdflib.graph.Graph.add] function: + +[`add()`][rdflib.graph.Graph.add] takes a 3-tuple (a "triple") of RDFLib nodes. Using the nodes and namespaces we defined previously: + +```python +from rdflib import Graph, URIRef, Literal, BNode +from rdflib.namespace import FOAF, RDF + +g = Graph() +g.bind("foaf", FOAF) + +bob = URIRef("http://example.org/people/Bob") +linda = BNode() # a GUID is generated + +name = Literal("Bob") +age = Literal(24) + +g.add((bob, RDF.type, FOAF.Person)) +g.add((bob, FOAF.name, name)) +g.add((bob, FOAF.age, age)) +g.add((bob, FOAF.knows, linda)) +g.add((linda, RDF.type, FOAF.Person)) +g.add((linda, FOAF.name, Literal("Linda"))) + +print(g.serialize()) +``` + +outputs: + +```turtle +@prefix foaf: . +@prefix xsd: . + + a foaf:Person ; + foaf:age 24 ; + foaf:knows [ a foaf:Person ; + foaf:name "Linda" ] ; + foaf:name "Bob" . +``` + +For some properties, only one value per resource makes sense (i.e they are *functional properties*, or have a max-cardinality of 1). The [`set()`][rdflib.graph.Graph.set] method is useful for this: + +```python +from rdflib import Graph, URIRef, Literal +from rdflib.namespace import FOAF + +g = Graph() +bob = URIRef("http://example.org/people/Bob") + +g.add((bob, FOAF.age, Literal(42))) +print(f"Bob is {g.value(bob, FOAF.age)}") +# prints: Bob is 42 + +g.set((bob, FOAF.age, Literal(43))) # replaces 42 set above +print(f"Bob is now {g.value(bob, FOAF.age)}") +# prints: Bob is now 43 +``` + + +[`value()`][rdflib.graph.Graph.value] is the matching query method. It will return a single value for a property, optionally raising an exception if there are more. + +You can also add triples by combining entire graphs, see [graph-setops](intro_to_graphs.md). + +## Removing Triples + +Similarly, triples can be removed by a call to [`remove()`][rdflib.graph.Graph.remove]: + +When removing, it is possible to leave parts of the triple unspecified (i.e. passing `None`), this will remove all matching triples: + +```python +g.remove((bob, None, None)) # remove all triples about bob +``` + + +## An example + +LiveJournal produces FOAF data for their users, but they seem to use `foaf:member_name` for a person's full name but `foaf:member_name` isn't in FOAF's namespace and perhaps they should have used `foaf:name` + +To retrieve some LiveJournal data, add a `foaf:name` for every `foaf:member_name` and then remove the `foaf:member_name` values to ensure the data actually aligns with other FOAF data, we could do this: + +```python +from rdflib import Graph +from rdflib.namespace import FOAF + +g = Graph() +# get the data +g.parse("http://danbri.livejournal.com/data/foaf") + +# for every foaf:member_name, add foaf:name and remove foaf:member_name +for s, p, o in g.triples((None, FOAF['member_name'], None)): + g.add((s, FOAF['name'], o)) + g.remove((s, FOAF['member_name'], o)) +``` + +!!! info "Foaf member name" + Since rdflib 5.0.0, using `foaf:member_name` is somewhat prevented in RDFlib since FOAF is declared as a [`ClosedNamespace`][rdflib.namespace.ClosedNamespace] class instance that has a closed set of members and `foaf:member_name` isn't one of them! If LiveJournal had used RDFlib 5.0.0, an error would have been raised for `foaf:member_name` when the triple was created. + + +## Creating Containers & Collections + +There are two convenience classes for RDF Containers & Collections which you can use instead of declaring each triple of a Containers or a Collections individually: + +* [`Container`][rdflib.container.Container] (also `Bag`, `Seq` & `Alt`) and +* [`Collection`][rdflib.collection.Collection] + +See their documentation for how. diff --git a/docs/intro_to_creating_rdf.rst b/docs/intro_to_creating_rdf.rst deleted file mode 100644 index 9409dfbe8..000000000 --- a/docs/intro_to_creating_rdf.rst +++ /dev/null @@ -1,201 +0,0 @@ -.. _intro_to_creating_rdf: - -==================== -Creating RDF triples -==================== - -Creating Nodes --------------- - -RDF data is a graph where the nodes are URI references, Blank Nodes or Literals. In RDFLib, these node types are -represented by the classes :class:`~rdflib.term.URIRef`, :class:`~rdflib.term.BNode`, and :class:`~rdflib.term.Literal`. -``URIRefs`` and ``BNodes`` can both be thought of as resources, such a person, a company, a website, etc. - -* A ``BNode`` is a node where the exact URI is not known - usually a node with identity only in relation to other nodes. -* A ``URIRef`` is a node where the exact URI is known. In addition to representing some subjects and predicates in RDF graphs, ``URIRef``\s are always used to represent properties/predicates -* ``Literals`` represent object values, such as a name, a date, a number, etc. The most common literal values are XML data types, e.g. string, int... but custom types can be declared too - -Nodes can be created by the constructors of the node classes: - -.. code-block:: python - - from rdflib import URIRef, BNode, Literal - - bob = URIRef("http://example.org/people/Bob") - linda = BNode() # a GUID is generated - - name = Literal("Bob") # passing a string - age = Literal(24) # passing a python int - height = Literal(76.5) # passing a python float - -Literals can be created from Python objects, this creates ``data-typed literals``. For the details on the mapping see -:ref:`rdflibliterals`. - -For creating many ``URIRefs`` in the same ``namespace``, i.e. URIs with the same prefix, RDFLib has the -:class:`rdflib.namespace.Namespace` class - -:: - - from rdflib import Namespace - - n = Namespace("http://example.org/people/") - - n.bob # == rdflib.term.URIRef("http://example.org/people/bob") - n.eve # == rdflib.term.URIRef("http://example.org/people/eve") - - -This is very useful for schemas where all properties and classes have the same URI prefix. RDFLib defines Namespaces for -some common RDF/OWL schemas, including most W3C ones: - -.. code-block:: python - - from rdflib.namespace import CSVW, DC, DCAT, DCTERMS, DOAP, FOAF, ODRL2, ORG, OWL, \ - PROF, PROV, RDF, RDFS, SDO, SH, SKOS, SOSA, SSN, TIME, \ - VOID, XMLNS, XSD - - RDF.type - # == rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type") - - FOAF.knows - # == rdflib.term.URIRef("http://xmlns.com/foaf/0.1/knows") - - PROF.isProfileOf - # == rdflib.term.URIRef("http://www.w3.org/ns/dx/prof/isProfileOf") - - SOSA.Sensor - # == rdflib.term.URIRef("http://www.w3.org/ns/sosa/Sensor") - - -Adding Triples to a graph -------------------------- - -We already saw in :doc:`intro_to_parsing`, how triples can be added from files and online locations with with the -:meth:`~rdflib.graph.Graph.parse` function. - -Triples can also be added within Python code directly, using the :meth:`~rdflib.graph.Graph.add` function: - -.. automethod:: rdflib.graph.Graph.add - :noindex: - -:meth:`~rdflib.graph.Graph.add` takes a 3-tuple (a "triple") of RDFLib nodes. Using the nodes and -namespaces we defined previously: - -.. code-block:: python - - from rdflib import Graph, URIRef, Literal, BNode - from rdflib.namespace import FOAF, RDF - - g = Graph() - g.bind("foaf", FOAF) - - bob = URIRef("http://example.org/people/Bob") - linda = BNode() # a GUID is generated - - name = Literal("Bob") - age = Literal(24) - - g.add((bob, RDF.type, FOAF.Person)) - g.add((bob, FOAF.name, name)) - g.add((bob, FOAF.age, age)) - g.add((bob, FOAF.knows, linda)) - g.add((linda, RDF.type, FOAF.Person)) - g.add((linda, FOAF.name, Literal("Linda"))) - - print(g.serialize()) - - -outputs: - -.. code-block:: Turtle - - @prefix foaf: . - @prefix xsd: . - - a foaf:Person ; - foaf:age 24 ; - foaf:knows [ a foaf:Person ; - foaf:name "Linda" ] ; - foaf:name "Bob" . - -For some properties, only one value per resource makes sense (i.e they are *functional properties*, or have a -max-cardinality of 1). The :meth:`~rdflib.graph.Graph.set` method is useful for this: - -.. code-block:: python - - from rdflib import Graph, URIRef, Literal - from rdflib.namespace import FOAF - - g = Graph() - bob = URIRef("http://example.org/people/Bob") - - g.add((bob, FOAF.age, Literal(42))) - print(f"Bob is {g.value(bob, FOAF.age)}") - # prints: Bob is 42 - - g.set((bob, FOAF.age, Literal(43))) # replaces 42 set above - print(f"Bob is now {g.value(bob, FOAF.age)}") - # prints: Bob is now 43 - - -:meth:`rdflib.graph.Graph.value` is the matching query method. It will return a single value for a property, optionally -raising an exception if there are more. - -You can also add triples by combining entire graphs, see :ref:`graph-setops`. - - -Removing Triples ----------------- - -Similarly, triples can be removed by a call to :meth:`~rdflib.graph.Graph.remove`: - -.. automethod:: rdflib.graph.Graph.remove - :noindex: - -When removing, it is possible to leave parts of the triple unspecified (i.e. passing ``None``), this will remove all -matching triples: - -.. code-block:: python - - g.remove((bob, None, None)) # remove all triples about bob - - -An example ----------- - -LiveJournal produces FOAF data for their users, but they seem to use -``foaf:member_name`` for a person's full name but ``foaf:member_name`` -isn't in FOAF's namespace and perhaps they should have used ``foaf:name`` - -To retrieve some LiveJournal data, add a ``foaf:name`` for every -``foaf:member_name`` and then remove the ``foaf:member_name`` values to -ensure the data actually aligns with other FOAF data, we could do this: - -.. code-block:: python - - from rdflib import Graph - from rdflib.namespace import FOAF - - g = Graph() - # get the data - g.parse("http://danbri.livejournal.com/data/foaf") - - # for every foaf:member_name, add foaf:name and remove foaf:member_name - for s, p, o in g.triples((None, FOAF['member_name'], None)): - g.add((s, FOAF['name'], o)) - g.remove((s, FOAF['member_name'], o)) - -.. note:: Since rdflib 5.0.0, using ``foaf:member_name`` is somewhat prevented in RDFlib since FOAF is declared - as a :meth:`~rdflib.namespace.ClosedNamespace` class instance that has a closed set of members and - ``foaf:member_name`` isn't one of them! If LiveJournal had used RDFlib 5.0.0, an error would have been raised for - ``foaf:member_name`` when the triple was created. - - -Creating Containers & Collections ---------------------------------- -There are two convenience classes for RDF Containers & Collections which you can use instead of declaring each -triple of a Containers or a Collections individually: - - * :meth:`~rdflib.container.Container` (also ``Bag``, ``Seq`` & ``Alt``) and - * :meth:`~rdflib.collection.Collection` - -See their documentation for how. diff --git a/docs/intro_to_graphs.md b/docs/intro_to_graphs.md new file mode 100644 index 000000000..115bb1e65 --- /dev/null +++ b/docs/intro_to_graphs.md @@ -0,0 +1,101 @@ +# Navigating Graphs + +An RDF Graph is a set of RDF triples, and we try to mirror exactly this in RDFLib. The Python [`Graph`][rdflib.graph.Graph] tries to emulate a container type. + +## Graphs as Iterators + +RDFLib graphs override [`__iter__()`][rdflib.graph.Graph.__iter__] in order to support iteration over the contained triples: + +```python +for s, p, o in someGraph: + if not (s, p, o) in someGraph: + raise Exception("Iterator / Container Protocols are Broken!!") +``` + +This loop iterates through all the subjects(s), predicates (p) & objects (o) in `someGraph`. + +## Contains check + +Graphs implement [`__contains__()`][rdflib.graph.Graph.__contains__], so you can check if a triple is in a graph with a `triple in graph` syntax: + +```python +from rdflib import URIRef +from rdflib.namespace import RDF + +bob = URIRef("http://example.org/people/bob") +if (bob, RDF.type, FOAF.Person) in graph: + print("This graph knows that Bob is a person!") +``` + +Note that this triple does not have to be completely bound: + +```python +if (bob, None, None) in graph: + print("This graph contains triples about Bob!") +``` + +## Set Operations on RDFLib Graphs + +Graphs override several pythons operators: [`__iadd__()`][rdflib.graph.Graph.__iadd__], [`__isub__()`][rdflib.graph.Graph.__isub__], etc. This supports addition, subtraction and other set-operations on Graphs: + +| operation | effect | +|-----------|--------| +| `G1 + G2` | return new graph with union (triples on both) | +| `G1 += G2` | in place union / addition | +| `G1 - G2` | return new graph with difference (triples in G1, not in G2) | +| `G1 -= G2` | in place difference / subtraction | +| `G1 & G2` | intersection (triples in both graphs) | +| `G1 ^ G2` | xor (triples in either G1 or G2, but not in both) | + +!!! warning + Set-operations on graphs assume Blank Nodes are shared between graphs. This may or may not be what you want. See [merging](merging.md) for details. + +## Basic Triple Matching + +Instead of iterating through all triples, RDFLib graphs support basic triple pattern matching with a [`triples()`][rdflib.graph.Graph.triples] function. This function is a generator of triples that match a pattern given by arguments, i.e. arguments restrict the triples that are returned. Terms that are `None` are treated as a wildcard. For example: + +```python +g.parse("some_foaf.ttl") +# find all subjects (s) of type (rdf:type) person (foaf:Person) +for s, p, o in g.triples((None, RDF.type, FOAF.Person)): + print(f"{s} is a person") + +# find all subjects of any type +for s, p, o in g.triples((None, RDF.type, None)): + print(f"{s} is a {o}") + +# create a graph +bobgraph = Graph() +# add all triples with subject 'bob' +bobgraph += g.triples((bob, None, None)) +``` + +If you are not interested in whole triples, you can get only the bits you want with the methods [`objects()`][rdflib.graph.Graph.objects], [`subjects()`][rdflib.graph.Graph.subjects], [`predicates()`][rdflib.graph.Graph.predicates], [`predicate_objects()`][rdflib.graph.Graph.predicate_objects], etc. Each take parameters for the components of the triple to constraint: + +```python +for person in g.subjects(RDF.type, FOAF.Person): + print("{} is a person".format(person)) +``` + +Finally, for some properties, only one value per resource makes sense (i.e they are *functional properties*, or have a max-cardinality of 1). The [`value()`][rdflib.graph.Graph.value] method is useful for this, as it returns just a single node, not a generator: + +```python +# get any name of bob +name = g.value(bob, FOAF.name) +# get the one person that knows bob and raise an exception if more are found +person = g.value(predicate=FOAF.knows, object=bob, any=False) +``` + + +## Graph methods for accessing triples + +Here is a list of all convenience methods for querying Graphs: + +* [`triples()`][rdflib.graph.Graph.triples] +* [`value()`][rdflib.graph.Graph.value] +* [`subjects()`][rdflib.graph.Graph.subjects] +* [`objects()`][rdflib.graph.Graph.objects] +* [`predicates()`][rdflib.graph.Graph.predicates] +* [`subject_objects()`][rdflib.graph.Graph.subject_objects] +* [`subject_predicates()`][rdflib.graph.Graph.subject_predicates] +* [`predicate_objects()`][rdflib.graph.Graph.predicate_objects] diff --git a/docs/intro_to_graphs.rst b/docs/intro_to_graphs.rst deleted file mode 100644 index c061a3c7b..000000000 --- a/docs/intro_to_graphs.rst +++ /dev/null @@ -1,131 +0,0 @@ -.. _rdflib_graph: Navigating Graphs - -================= -Navigating Graphs -================= - -An RDF Graph is a set of RDF triples, and we try to mirror exactly this in RDFLib. The Python -:meth:`~rdflib.graph.Graph` tries to emulate a container type. - -Graphs as Iterators -------------------- - -RDFLib graphs override :meth:`~rdflib.graph.Graph.__iter__` in order to support iteration over the contained triples: - -.. code-block:: python - - for s, p, o in someGraph: - if not (s, p, o) in someGraph: - raise Exception("Iterator / Container Protocols are Broken!!") - -This loop iterates through all the subjects(s), predicates (p) & objects (o) in ``someGraph``. - -Contains check --------------- - -Graphs implement :meth:`~rdflib.graph.Graph.__contains__`, so you can check if a triple is in a graph with a -``triple in graph`` syntax: - -.. code-block:: python - - from rdflib import URIRef - from rdflib.namespace import RDF - - bob = URIRef("http://example.org/people/bob") - if (bob, RDF.type, FOAF.Person) in graph: - print("This graph knows that Bob is a person!") - -Note that this triple does not have to be completely bound: - -.. code-block:: python - - if (bob, None, None) in graph: - print("This graph contains triples about Bob!") - -.. _graph-setops: - -Set Operations on RDFLib Graphs -------------------------------- - -Graphs override several pythons operators: :meth:`~rdflib.graph.Graph.__iadd__`, :meth:`~rdflib.graph.Graph.__isub__`, -etc. This supports addition, subtraction and other set-operations on Graphs: - -============ ============================================================= -operation effect -============ ============================================================= -``G1 + G2`` return new graph with union (triples on both) -``G1 += G2`` in place union / addition -``G1 - G2`` return new graph with difference (triples in G1, not in G2) -``G1 -= G2`` in place difference / subtraction -``G1 & G2`` intersection (triples in both graphs) -``G1 ^ G2`` xor (triples in either G1 or G2, but not in both) -============ ============================================================= - -.. warning:: Set-operations on graphs assume Blank Nodes are shared between graphs. This may or may not be what you want. See :doc:`merging` for details. - -Basic Triple Matching ---------------------- - -Instead of iterating through all triples, RDFLib graphs support basic triple pattern matching with a -:meth:`~rdflib.graph.Graph.triples` function. This function is a generator of triples that match a pattern given by -arguments, i.e. arguments restrict the triples that are returned. Terms that are :data:`None` are treated as a wildcard. -For example: - -.. code-block:: python - - g.parse("some_foaf.ttl") - # find all subjects (s) of type (rdf:type) person (foaf:Person) - for s, p, o in g.triples((None, RDF.type, FOAF.Person)): - print(f"{s} is a person") - - # find all subjects of any type - for s, p, o in g.triples((None, RDF.type, None)): - print(f"{s} is a {o}") - - # create a graph - bobgraph = Graph() - # add all triples with subject 'bob' - bobgraph += g.triples((bob, None, None)) - -If you are not interested in whole triples, you can get only the bits you want with the methods -:meth:`~rdflib.graph.Graph.objects`, :meth:`~rdflib.graph.Graph.subjects`, :meth:`~rdflib.graph.Graph.predicates`, -:meth:`~rdflib.graph.Graph.predicate_objects`, etc. Each take parameters for the components of the triple to constraint: - -.. code-block:: python - - for person in g.subjects(RDF.type, FOAF.Person): - print("{} is a person".format(person)) - -Finally, for some properties, only one value per resource makes sense (i.e they are *functional properties*, or have a -max-cardinality of 1). The :meth:`~rdflib.graph.Graph.value` method is useful for this, as it returns just a single -node, not a generator: - -.. code-block:: python - - # get any name of bob - name = g.value(bob, FOAF.name) - # get the one person that knows bob and raise an exception if more are found - person = g.value(predicate=FOAF.knows, object=bob, any=False) - - -:class:`~rdflib.graph.Graph` methods for accessing triples ------------------------------------------------------------ - -Here is a list of all convenience methods for querying Graphs: - -.. automethod:: rdflib.graph.Graph.triples - :noindex: -.. automethod:: rdflib.graph.Graph.value - :noindex: -.. automethod:: rdflib.graph.Graph.subjects - :noindex: -.. automethod:: rdflib.graph.Graph.objects - :noindex: -.. automethod:: rdflib.graph.Graph.predicates - :noindex: -.. automethod:: rdflib.graph.Graph.subject_objects - :noindex: -.. automethod:: rdflib.graph.Graph.subject_predicates - :noindex: -.. automethod:: rdflib.graph.Graph.predicate_objects - :noindex: diff --git a/docs/intro_to_parsing.md b/docs/intro_to_parsing.md new file mode 100644 index 000000000..92b672da7 --- /dev/null +++ b/docs/intro_to_parsing.md @@ -0,0 +1,134 @@ +# Loading and saving RDF + +## Reading RDF files + +RDF data can be represented using various syntaxes (`turtle`, `rdf/xml`, `n3`, `n-triples`, `trix`, `JSON-LD`, etc.). The simplest format is `ntriples`, which is a triple-per-line format. + +Create the file `demo.nt` in the current directory with these two lines in it: + +```turtle + . + "Hello World" . +``` + +On line 1 this file says "drewp is a FOAF Person:. On line 2 it says "drep says "Hello World"". + +RDFLib can guess what format the file is by the file ending (".nt" is commonly used for n-triples) so you can just use [`parse()`][rdflib.graph.Graph.parse] to read in the file. If the file had a non-standard RDF file ending, you could set the keyword-parameter `format` to specify either an Internet Media Type or the format name (a [list of available parsers][rdflib.plugins.parsers] is available). + +In an interactive python interpreter, try this: + +```python +from rdflib import Graph + +g = Graph() +g.parse("demo.nt") + +print(len(g)) +# prints: 2 + +import pprint +for stmt in g: + pprint.pprint(stmt) +# prints: +# (rdflib.term.URIRef('http://example.com/drewp'), +# rdflib.term.URIRef('http://example.com/says'), +# rdflib.term.Literal('Hello World')) +# (rdflib.term.URIRef('http://example.com/drewp'), +# rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), +# rdflib.term.URIRef('http://xmlns.com/foaf/0.1/Person')) +``` + +The final lines show how RDFLib represents the two statements in the file: the statements themselves are just length-3 tuples ("triples") and the subjects, predicates, and objects of the triples are all rdflib types. + +## Reading remote RDF + +Reading graphs from the Internet is easy: + +```python +from rdflib import Graph + +g = Graph() +g.parse("http://www.w3.org/People/Berners-Lee/card") +print(len(g)) +# prints: 86 +``` + +[`parse()`][rdflib.Graph.parse] can process local files, remote data via a URL, as in this example, or RDF data in a string (using the `data` parameter). + +## Saving RDF + +To store a graph in a file, use the [`serialize()`][rdflib.Graph.serialize] function: + +```python +from rdflib import Graph + +g = Graph() +g.parse("http://www.w3.org/People/Berners-Lee/card") +g.serialize(destination="tbl.ttl") +``` + +This parses data from http://www.w3.org/People/Berners-Lee/card and stores it in a file `tbl.ttl` in this directory using the turtle format, which is the default RDF serialization (as of rdflib 6.0.0). + +To read the same data and to save it as an RDF/XML format string in the variable `v`, do this: + +```python +from rdflib import Graph + +g = Graph() +g.parse("http://www.w3.org/People/Berners-Lee/card") +v = g.serialize(format="xml") +``` + +The following table lists the RDF formats you can serialize data to with rdflib, out of the box, and the `format=KEYWORD` keyword used to reference them within `serialize()`: + +| RDF Format | Keyword | Notes | +|------------|---------|-------| +| Turtle | turtle, ttl or turtle2 | turtle2 is just turtle with more spacing & linebreaks | +| RDF/XML | xml or pretty-xml | Was the default format, rdflib < 6.0.0 | +| JSON-LD | json-ld | There are further options for compact syntax and other JSON-LD variants | +| N-Triples | ntriples, nt or nt11 | nt11 is exactly like nt, only utf8 encoded | +| Notation-3 | n3 | N3 is a superset of Turtle that also caters for rules and a few other things | +| Trig | trig | Turtle-like format for RDF triples + context (RDF quads) and thus multiple graphs | +| Trix | trix | RDF/XML-like format for RDF quads | +| N-Quads | nquads | N-Triples-like format for RDF quads | + +## Working with multi-graphs + +To read and query multi-graphs, that is RDF data that is context-aware, you need to use rdflib's [`Dataset`][rdflib.Dataset] class. This an extension to [`Graph`][rdflib.Graph] that know all about quads (triples + graph IDs). + +If you had this multi-graph data file (in the `trig` format, using new-style `PREFIX` statement (not the older `@prefix`): + +```turtle +PREFIX eg: +PREFIX foaf: + +eg:graph-1 { + eg:drewp a foaf:Person . + eg:drewp eg:says "Hello World" . +} + +eg:graph-2 { + eg:nick a foaf:Person . + eg:nick eg:says "Hi World" . +} +``` + +You could parse the file and query it like this: + +```python +from rdflib import Dataset +from rdflib.namespace import RDF + +g = Dataset() +g.parse("demo.trig") + +for s, p, o, g in g.quads((None, RDF.type, None, None)): + print(s, g) +``` + +This will print out: + +``` +http://example.com/person/drewp http://example.com/person/graph-1 +http://example.com/person/nick http://example.com/person/graph-2 +``` diff --git a/docs/intro_to_parsing.rst b/docs/intro_to_parsing.rst deleted file mode 100644 index 8b011c53f..000000000 --- a/docs/intro_to_parsing.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. _intro_to_parsing: - -====================== -Loading and saving RDF -====================== - -Reading RDF files ------------------ - -RDF data can be represented using various syntaxes (``turtle``, ``rdf/xml``, ``n3``, ``n-triples``, -``trix``, ``JSON-LD``, etc.). The simplest format is -``ntriples``, which is a triple-per-line format. - -Create the file :file:`demo.nt` in the current directory with these two lines in it: - -.. code-block:: Turtle - - . - "Hello World" . - -On line 1 this file says "drewp is a FOAF Person:. On line 2 it says "drep says "Hello World"". - -RDFLib can guess what format the file is by the file ending (".nt" is commonly used for n-triples) so you can just use -:meth:`~rdflib.graph.Graph.parse` to read in the file. If the file had a non-standard RDF file ending, you could set the -keyword-parameter ``format`` to specify either an Internet Media Type or the format name (a :doc:`list of available -parsers ` is available). - -In an interactive python interpreter, try this: - -.. code-block:: python - - from rdflib import Graph - - g = Graph() - g.parse("demo.nt") - - print(len(g)) - # prints: 2 - - import pprint - for stmt in g: - pprint.pprint(stmt) - # prints: - # (rdflib.term.URIRef('http://example.com/drewp'), - # rdflib.term.URIRef('http://example.com/says'), - # rdflib.term.Literal('Hello World')) - # (rdflib.term.URIRef('http://example.com/drewp'), - # rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), - # rdflib.term.URIRef('http://xmlns.com/foaf/0.1/Person')) - -The final lines show how RDFLib represents the two statements in the -file: the statements themselves are just length-3 tuples ("triples") and the -subjects, predicates, and objects of the triples are all rdflib types. - -Reading remote RDF ------------------- - -Reading graphs from the Internet is easy: - -.. code-block:: python - - from rdflib import Graph - - g = Graph() - g.parse("http://www.w3.org/People/Berners-Lee/card") - print(len(g)) - # prints: 86 - -:func:`rdflib.Graph.parse` can process local files, remote data via a URL, as in this example, or RDF data in a string -(using the ``data`` parameter). - - -Saving RDF ----------- - -To store a graph in a file, use the :func:`rdflib.Graph.serialize` function: - -.. code-block:: python - - from rdflib import Graph - - g = Graph() - g.parse("http://www.w3.org/People/Berners-Lee/card") - g.serialize(destination="tbl.ttl") - -This parses data from http://www.w3.org/People/Berners-Lee/card and stores it in a file ``tbl.ttl`` in this directory -using the turtle format, which is the default RDF serialization (as of rdflib 6.0.0). - -To read the same data and to save it as an RDF/XML format string in the variable ``v``, do this: - -.. code-block:: python - - from rdflib import Graph - - g = Graph() - g.parse("http://www.w3.org/People/Berners-Lee/card") - v = g.serialize(format="xml") - - -The following table lists the RDF formats you can serialize data to with rdflib, out of the box, and the ``format=KEYWORD`` keyword used to reference them within ``serialize()``: - -.. csv-table:: - :header: "RDF Format", "Keyword", "Notes" - - "Turtle", "turtle, ttl or turtle2", "turtle2 is just turtle with more spacing & linebreaks" - "RDF/XML", "xml or pretty-xml", "Was the default format, rdflib < 6.0.0" - "JSON-LD", "json-ld", "There are further options for compact syntax and other JSON-LD variants" - "N-Triples", "ntriples, nt or nt11", "nt11 is exactly like nt, only utf8 encoded" - "Notation-3","n3", "N3 is a superset of Turtle that also caters for rules and a few other things" - - "Trig", "trig", "Turtle-like format for RDF triples + context (RDF quads) and thus multiple graphs" - "Trix", "trix", "RDF/XML-like format for RDF quads" - "N-Quads", "nquads", "N-Triples-like format for RDF quads" - -Working with multi-graphs -------------------------- - -To read and query multi-graphs, that is RDF data that is context-aware, you need to use rdflib's -:class:`rdflib.Dataset` class. This an extension to :class:`rdflib.Graph` that -know all about quads (triples + graph IDs). - -If you had this multi-graph data file (in the ``trig`` format, using new-style ``PREFIX`` statement (not the older -``@prefix``): - -.. code-block:: Turtle - - PREFIX eg: - PREFIX foaf: - - eg:graph-1 { - eg:drewp a foaf:Person . - eg:drewp eg:says "Hello World" . - } - - eg:graph-2 { - eg:nick a foaf:Person . - eg:nick eg:says "Hi World" . - } - -You could parse the file and query it like this: - -.. code-block:: python - - from rdflib import Dataset - from rdflib.namespace import RDF - - g = Dataset() - g.parse("demo.trig") - - for s, p, o, g in g.quads((None, RDF.type, None, None)): - print(s, g) - -This will print out: - -.. code-block:: - - http://example.com/person/drewp http://example.com/person/graph-1 - http://example.com/person/nick http://example.com/person/graph-2 diff --git a/docs/intro_to_sparql.md b/docs/intro_to_sparql.md new file mode 100644 index 000000000..f4cdf0ea6 --- /dev/null +++ b/docs/intro_to_sparql.md @@ -0,0 +1,159 @@ +# Querying with SPARQL + +## Run a Query + +The RDFLib comes with an implementation of the [SPARQL 1.1 Query](http://www.w3.org/TR/sparql11-query/) and [SPARQL 1.1 Update](http://www.w3.org/TR/sparql11-update/) query languages. + +Queries can be evaluated against a graph with the [`query()`][rdflib.graph.Graph.query] method, and updates with [`update()`][rdflib.graph.Graph.update]. + +The query method returns a [`Result`][rdflib.query.Result] instance. For SELECT queries, iterating over this returns [`ResultRow`][rdflib.query.ResultRow] instances, each containing a set of variable bindings. For `CONSTRUCT`/`DESCRIBE` queries, iterating over the result object gives the triples. For `ASK` queries, iterating will yield the single boolean answer, or evaluating the result object in a boolean-context (i.e. `bool(result)`) + +For example... + +```python +import rdflib +g = rdflib.Graph() +g.parse("http://danbri.org/foaf.rdf#") + +knows_query = """ +SELECT DISTINCT ?aname ?bname +WHERE { + ?a foaf:knows ?b . + ?a foaf:name ?aname . + ?b foaf:name ?bname . +}""" + +qres = g.query(knows_query) +for row in qres: + print(f"{row.aname} knows {row.bname}") +``` + +The results are tuples of values in the same order as your `SELECT` arguments. Alternatively, the values can be accessed by variable name, either as attributes, or as items, e.g. `row.b` and `row["b"]` are equivalent. The above, given the appropriate data, would print something like: + +```text +Timothy Berners-Lee knows Edd Dumbill +Timothy Berners-Lee knows Jennifer Golbeck +Timothy Berners-Lee knows Nicholas Gibbins +... +``` + +As an alternative to using `SPARQL`'s `PREFIX`, namespace bindings can be passed in with the `initNs` kwarg, see [namespaces_and_bindings](namespaces_and_bindings.md). + +Variables can also be pre-bound, using the `initBindings` kwarg which can pass in a `dict` of initial bindings. This is particularly useful for prepared queries, as described below. + +## Update Queries + +Update queries are performed just like reading queries but using the [`update()`][rdflib.graph.Graph.update] method. An example: + +```python +from rdflib import Graph + +# Create a Graph, add in some test data +g = Graph() +g.parse( + data=""" + a . + a . + """, + format="turtle" +) + +# Select all the things (s) that are of type (rdf:type) c: +qres = g.query("""SELECT ?s WHERE { ?s a }""") + +for row in qres: + print(f"{row.s}") +# prints: +# x: +# y: + +# Add in a new triple using SPARQL UPDATE +g.update("""INSERT DATA { a }""") + +# Select all the things (s) that are of type (rdf:type) c: +qres = g.query("""SELECT ?s WHERE { ?s a }""") + +print("After update:") +for row in qres: + print(f"{row.s}") +# prints: +# x: +# y: +# z: + +# Change type of from to +g.update(""" + DELETE { a } + INSERT { a } + WHERE { a } + """) +print("After second update:") +qres = g.query("""SELECT ?s ?o WHERE { ?s a ?o }""") +for row in qres: + print(f"{row.s} a {row.o}") +# prints: +# x: a c: +# z: a c: +# y: a d: +``` + +## Querying a Remote Service + +The `SERVICE` keyword of SPARQL 1.1 can send a query to a remote SPARQL endpoint. + +```python +import rdflib + +g = rdflib.Graph() +qres = g.query( + """ + SELECT ?s + WHERE { + SERVICE { + ?s a ?o . + } + } + LIMIT 3 + """ +) + +for row in qres: + print(row.s) +``` + +This example sends a query to [DBPedia](https://dbpedia.org/)'s SPARQL endpoint service so that it can run the query and then send back the result: + +```text + + + +``` + +## Prepared Queries + +RDFLib lets you *prepare* queries before execution, this saves re-parsing and translating the query into SPARQL Algebra each time. + +The method [`prepareQuery()`][rdflib.plugins.sparql.prepareQuery] takes a query as a string and will return a [`Query`][rdflib.plugins.sparql.sparql.Query] object. This can then be passed to the [`query()`][rdflib.graph.Graph.query] method. + +The `initBindings` kwarg can be used to pass in a `dict` of initial bindings: + +```python +q = prepareQuery( + "SELECT ?s WHERE { ?person foaf:knows ?s .}", + initNs = { "foaf": FOAF } +) + +g = rdflib.Graph() +g.parse("foaf.rdf") + +tim = rdflib.URIRef("http://www.w3.org/People/Berners-Lee/card#i") + +for row in g.query(q, initBindings={'person': tim}): + print(row) +``` + +## Custom Evaluation Functions + +For experts, it is possible to override how bits of SPARQL algebra are evaluated. By using the [setuptools entry-point](http://pythonhosted.org/distribute/setuptools.html#dynamic-discovery-of-services-and-plugins) `rdf.plugins.sparqleval`, or simply adding to an entry to [`CUSTOM_EVALS`][rdflib.plugins.sparql.CUSTOM_EVALS], a custom function can be registered. The function will be called for each algebra component and may raise `NotImplementedError` to indicate that this part should be handled by the default implementation. + +See [`examples/custom_eval.py`][examples.custom_eval] diff --git a/docs/intro_to_sparql.rst b/docs/intro_to_sparql.rst deleted file mode 100644 index f2cbf5a69..000000000 --- a/docs/intro_to_sparql.rst +++ /dev/null @@ -1,207 +0,0 @@ -.. _intro_to_using_sparql: - -==================== -Querying with SPARQL -==================== - - -Run a Query -^^^^^^^^^^^ - -The RDFLib comes with an implementation of the `SPARQL 1.1 Query -`_ and `SPARQL 1.1 Update -`_ query languages. - -Queries can be evaluated against a graph with the -:meth:`rdflib.graph.Graph.query` method, and updates with -:meth:`rdflib.graph.Graph.update`. - -The query method returns a :class:`rdflib.query.Result` instance. For -SELECT queries, iterating over this returns -:class:`rdflib.query.ResultRow` instances, each containing a set of -variable bindings. For ``CONSTRUCT``/``DESCRIBE`` queries, iterating over the -result object gives the triples. For ``ASK`` queries, iterating will yield -the single boolean answer, or evaluating the result object in a -boolean-context (i.e. ``bool(result)``) - -For example... - -.. code-block:: python - - import rdflib - g = rdflib.Graph() - g.parse("http://danbri.org/foaf.rdf#") - - knows_query = """ - SELECT DISTINCT ?aname ?bname - WHERE { - ?a foaf:knows ?b . - ?a foaf:name ?aname . - ?b foaf:name ?bname . - }""" - - qres = g.query(knows_query) - for row in qres: - print(f"{row.aname} knows {row.bname}") - - - -The results are tuples of values in the same order as your ``SELECT`` -arguments. Alternatively, the values can be accessed by variable -name, either as attributes, or as items, e.g. ``row.b`` and ``row["b"]`` are -equivalent. The above, given the appropriate data, would print something like: - -.. code-block:: text - - Timothy Berners-Lee knows Edd Dumbill - Timothy Berners-Lee knows Jennifer Golbeck - Timothy Berners-Lee knows Nicholas Gibbins - ... - -As an alternative to using ``SPARQL``\s ``PREFIX``, namespace -bindings can be passed in with the ``initNs`` kwarg, see -:doc:`namespaces_and_bindings`. - -Variables can also be pre-bound, using the ``initBindings`` kwarg which can -pass in a ``dict`` of initial bindings. This is particularly -useful for prepared queries, as described below. - -Update Queries -^^^^^^^^^^^^^^ - -Update queries are performed just like reading queries but using the :meth:`rdflib.graph.Graph.update` method. An -example: - -.. code-block:: python - - from rdflib import Graph - - # Create a Graph, add in some test data - g = Graph() - g.parse( - data=""" - a . - a . - """, - format="turtle" - ) - - # Select all the things (s) that are of type (rdf:type) c: - qres = g.query("""SELECT ?s WHERE { ?s a }""") - - for row in qres: - print(f"{row.s}") - # prints: - # x: - # y: - - # Add in a new triple using SPARQL UPDATE - g.update("""INSERT DATA { a }""") - - # Select all the things (s) that are of type (rdf:type) c: - qres = g.query("""SELECT ?s WHERE { ?s a }""") - - print("After update:") - for row in qres: - print(f"{row.s}") - # prints: - # x: - # y: - # z: - - # Change type of from to - g.update(""" - DELETE { a } - INSERT { a } - WHERE { a } - """) - print("After second update:") - qres = g.query("""SELECT ?s ?o WHERE { ?s a ?o }""") - for row in qres: - print(f"{row.s} a {row.o}") - # prints: - # x: a c: - # z: a c: - # y: a d: - - - -Querying a Remote Service -^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``SERVICE`` keyword of SPARQL 1.1 can send a query to a remote SPARQL endpoint. - -.. code-block:: python - - import rdflib - - g = rdflib.Graph() - qres = g.query( - """ - SELECT ?s - WHERE { - SERVICE { - ?s a ?o . - } - } - LIMIT 3 - """ - ) - - for row in qres: - print(row.s) - - - -This example sends a query to `DBPedia `_'s SPARQL endpoint service so that it can run the query -and then send back the result: - -.. code-block:: text - - - - - -Prepared Queries -^^^^^^^^^^^^^^^^ - -RDFLib lets you *prepare* queries before execution, this saves -re-parsing and translating the query into SPARQL Algebra each time. - -The method :meth:`rdflib.plugins.sparql.prepareQuery` takes a query as -a string and will return a :class:`rdflib.plugins.sparql.sparql.Query` -object. This can then be passed to the -:meth:`rdflib.graph.Graph.query` method. - -The ``initBindings`` kwarg can be used to pass in a ``dict`` of -initial bindings: - -.. code-block:: python - - q = prepareQuery( - "SELECT ?s WHERE { ?person foaf:knows ?s .}", - initNs = { "foaf": FOAF } - ) - - g = rdflib.Graph() - g.parse("foaf.rdf") - - tim = rdflib.URIRef("http://www.w3.org/People/Berners-Lee/card#i") - - for row in g.query(q, initBindings={'person': tim}): - print(row) - - -Custom Evaluation Functions -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For experts, it is possible to override how bits of SPARQL algebra are -evaluated. By using the `setuptools entry-point -`_ -``rdf.plugins.sparqleval``, or simply adding to an entry to -:data:`rdflib.plugins.sparql.CUSTOM_EVALS`, a custom function can be -registered. The function will be called for each algebra component and -may raise ``NotImplementedError`` to indicate that this part should be -handled by the default implementation. - -See :file:`examples/custom_eval.py` diff --git a/docs/merging.md b/docs/merging.md new file mode 100644 index 000000000..25a970baf --- /dev/null +++ b/docs/merging.md @@ -0,0 +1,39 @@ +# Merging graphs + +Graphs share blank nodes only if they are derived from graphs described by documents or other structures (such as an RDF dataset) that explicitly provide for the sharing of blank nodes between different RDF graphs. Simply downloading a web document does not mean that the blank nodes in a resulting RDF graph are the same as the blank nodes coming from other downloads of the same document or from the same RDF source. + +RDF applications which manipulate concrete syntaxes for RDF which use blank node identifiers should take care to keep track of the identity of the blank nodes they identify. Blank node identifiers often have a local scope, so when RDF from different sources is combined, identifiers may have to be changed in order to avoid accidental conflation of distinct blank nodes. + +For example, two documents may both use the blank node identifier "_:x" to identify a blank node, but unless these documents are in a shared identifier scope or are derived from a common source, the occurrences of "_:x" in one document will identify a different blank node than the one in the graph described by the other document. When graphs are formed by combining RDF from multiple sources, it may be necessary to standardize apart the blank node identifiers by replacing them by others which do not occur in the other document(s). + +_(copied directly from _ + +In RDFLib, blank nodes are given unique IDs when parsing, so graph merging can be done by simply reading several files into the same graph: + +```python +from rdflib import Graph + +graph = Graph() + +graph.parse(input1) +graph.parse(input2) +``` + +`graph` now contains the merged graph of `input1` and `input2`. + +!!! warning "Blank Node Collision" + However, the set-theoretic graph operations in RDFLib are assumed to be performed in sub-graphs of some larger data-base (for instance, in the context of a [`Dataset`][rdflib.graph.Dataset]) and assume shared blank node IDs, and therefore do NOT do _correct_ merging, i.e.: + + ```python + from rdflib import Graph + + g1 = Graph() + g1.parse(input1) + + g2 = Graph() + g2.parse(input2) + + graph = g1 + g2 + ``` + + May cause unwanted collisions of blank-nodes in `graph`. diff --git a/docs/merging.rst b/docs/merging.rst deleted file mode 100644 index 1721d9206..000000000 --- a/docs/merging.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. _merging_graphs: - -============== -Merging graphs -============== - - Graphs share blank nodes only if they are derived from graphs described by documents or other structures (such as an RDF dataset) that explicitly provide for the sharing of blank nodes between different RDF graphs. Simply downloading a web document does not mean that the blank nodes in a resulting RDF graph are the same as the blank nodes coming from other downloads of the same document or from the same RDF source. - -RDF applications which manipulate concrete syntaxes for RDF which use blank node identifiers should take care to keep track of the identity of the blank nodes they identify. Blank node identifiers often have a local scope, so when RDF from different sources is combined, identifiers may have to be changed in order to avoid accidental conflation of distinct blank nodes. - -For example, two documents may both use the blank node identifier "_:x" to identify a blank node, but unless these documents are in a shared identifier scope or are derived from a common source, the occurrences of "_:x" in one document will identify a different blank node than the one in the graph described by the other document. When graphs are formed by combining RDF from multiple sources, it may be necessary to standardize apart the blank node identifiers by replacing them by others which do not occur in the other document(s). - -*(copied directly from https://www.w3.org/TR/rdf11-mt/#shared-blank-nodes-unions-and-merges)* - - -In RDFLib, blank nodes are given unique IDs when parsing, so graph merging can be done by simply reading several files into the same graph:: - - from rdflib import Graph - - graph = Graph() - - graph.parse(input1) - graph.parse(input2) - -``graph`` now contains the merged graph of ``input1`` and ``input2``. - - -.. note:: However, the set-theoretic graph operations in RDFLib are assumed to be performed in sub-graphs of some larger data-base (for instance, in the context of a :class:`~rdflib.graph.Dataset`) and assume shared blank node IDs, and therefore do NOT do *correct* merging, i.e.:: - - from rdflib import Graph - - g1 = Graph() - g1.parse(input1) - - g2 = Graph() - g2.parse(input2) - - graph = g1 + g2 - - May cause unwanted collisions of blank-nodes in - ``graph``. - - - diff --git a/docs/namespaces_and_bindings.md b/docs/namespaces_and_bindings.md new file mode 100644 index 000000000..8efea994b --- /dev/null +++ b/docs/namespaces_and_bindings.md @@ -0,0 +1,143 @@ +# Namespaces and Bindings + +RDFLib provides several short-cuts to working with many URIs in the same namespace. + +The [`rdflib.namespace`][rdflib.namespace] module defines the [`Namespace`][rdflib.namespace.Namespace] class which lets you easily create URIs in a namespace: + +```python +from rdflib import Namespace + +EX = Namespace("http://example.org/") +EX.Person # a Python attribute for EX. This example is equivalent to rdflib.term.URIRef("http://example.org/Person") + +# use dict notation for things that are not valid Python identifiers, e.g.: +n['first%20name'] # as rdflib.term.URIRef("http://example.org/first%20name") +``` + +These two styles of namespace creation - object attribute and dict - are equivalent and are made available just to allow for valid RDF namespaces and URIs that are not valid Python identifiers. This isn't just for syntactic things like spaces, as per the example of `first%20name` above, but also for Python reserved words like `class` or `while`, so for the URI `http://example.org/class`, create it with `EX['class']`, not `EX.class`. + +## Common Namespaces + +The `namespace` module defines many common namespaces such as RDF, RDFS, OWL, FOAF, SKOS, PROF, etc. The list of the namespaces provided grows with user contributions to RDFLib. + +These Namespaces, and any others that users define, can also be associated with prefixes using the [`NamespaceManager`][rdflib.namespace.NamespaceManager], e.g. using `foaf` for `http://xmlns.com/foaf/0.1/`. + +Each RDFLib graph has a [`namespace_manager`][rdflib.graph.Graph.namespace_manager] that keeps a list of namespace to prefix mappings. The namespace manager is populated when reading in RDF, and these prefixes are used when serialising RDF, or when parsing SPARQL queries. Prefixes can be bound with the [`bind()`][rdflib.graph.Graph.bind] method: + +```python +from rdflib import Graph, Namespace +from rdflib.namespace import FOAF + +EX = Namespace("http://example.org/") + +g = Graph() +g.bind("foaf", FOAF) # bind an RDFLib-provided namespace to a prefix +g.bind("ex", EX) # bind a user-declared namespace to a prefix +``` + + +The [`bind()`][rdflib.graph.Graph.bind] method is actually supplied by the [`NamespaceManager`][rdflib.namespace.NamespaceManager] class - see next. + +## NamespaceManager + +Each RDFLib graph comes with a [`NamespaceManager`][rdflib.namespace.NamespaceManager] instance in the [`namespace_manager`][rdflib.graph.Graph.namespace_manager] field; you can use the [`bind()`][rdflib.namespace.NamespaceManager.bind] method of this instance to bind a prefix to a namespace URI, as above, however note that the [`NamespaceManager`][rdflib.namespace.NamespaceManager] automatically performs some bindings according to a selected strategy. + +Namespace binding strategies are indicated with the `bind_namespaces` input parameter to [`NamespaceManager`][rdflib.namespace.NamespaceManager] instances and may be set via `Graph` also: + +```python +from rdflib import Graph +from rdflib.namespace import NamespaceManager + +g = Graph(bind_namespaces="rdflib") # bind via Graph + +g2 = Graph() +nm = NamespaceManager(g2, bind_namespaces="rdflib") # bind via NamespaceManager +``` + + +Valid strategies are: + +- core: + - binds several core RDF prefixes only + - owl, rdf, rdfs, xsd, xml from the NAMESPACE_PREFIXES_CORE object + - this is default +- rdflib: + - binds all the namespaces shipped with RDFLib as DefinedNamespace instances + - all the core namespaces and all the following: brick, csvw, dc, dcat + - dcmitype, dcterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, sdo + - sh, skos, sosa, ssn, time, vann, void + - see the NAMESPACE_PREFIXES_RDFLIB object in [`rdflib.namespace`][rdflib.namespace] for up-to-date list +- none: + - binds no namespaces to prefixes + - note this is NOT default behaviour +- cc: + - using prefix bindings from prefix.cc which is a online prefixes database + - not implemented yet - this is aspirational + +### Re-binding + +Note that regardless of the strategy employed, prefixes for namespaces can be overwritten with users preferred prefixes, for example: + +```python +from rdflib import Graph +from rdflib.namespace import GEO # imports GeoSPARQL's namespace + +g = Graph(bind_namespaces="rdflib") # binds GeoSPARQL's namespace to prefix 'geo' + +g.bind('geosp', GEO, override=True) +``` + +[`NamespaceManager`][rdflib.namespace.NamespaceManager] also has a method to normalize a given url: + +```python +from rdflib.namespace import NamespaceManager + +nm = NamespaceManager(Graph()) +nm.normalizeUri(t) +``` + +For simple output, or simple serialisation, you often want a nice readable representation of a term. All RDFLib terms have a `.n3()` method, which will return a suitable N3 format and into which you can supply a NamespaceManager instance to provide prefixes, i.e. `.n3(namespace_manager=some_nm)`: + +```python +>>> from rdflib import Graph, URIRef, Literal, BNode +>>> from rdflib.namespace import FOAF, NamespaceManager + +>>> person = URIRef("http://xmlns.com/foaf/0.1/Person") +>>> person.n3() +'' + +>>> g = Graph() +>>> g.bind("foaf", FOAF) + +>>> person.n3(g.namespace_manager) +'foaf:Person' + +>>> l = Literal(2) +>>> l.n3() +'"2"^^' + +>>> l.n3(NamespaceManager(Graph(), bind_namespaces="core")) +'"2"^^xsd:integer' +``` + +The namespace manager also has a useful method `compute_qname`. `g.namespace_manager.compute_qname(x)` (or just `g.compute_qname(x)`) which takes a URI and decomposes it into the parts: + +```python +self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")), + ("ns2", URIRef("http://foo/bar#"), "baz")) +``` + +## Namespaces in SPARQL Queries + +The `initNs` argument supplied to [`query()`][rdflib.graph.Graph.query] is a dictionary of namespaces to be expanded in the query string. If you pass no `initNs` argument, the namespaces registered with the graphs namespace_manager are used: + +```python +from rdflib.namespace import FOAF +graph.query('SELECT * WHERE { ?p a foaf:Person }', initNs={'foaf': FOAF}) +``` + +In order to use an empty prefix (e.g. `?a :knows ?b`), use a `PREFIX` directive with no prefix in the SPARQL query to set a default namespace: + +```sparql +PREFIX : +``` diff --git a/docs/namespaces_and_bindings.rst b/docs/namespaces_and_bindings.rst deleted file mode 100644 index ef7458661..000000000 --- a/docs/namespaces_and_bindings.rst +++ /dev/null @@ -1,156 +0,0 @@ -.. _namespaces_and_bindings: Namespaces and Bindings - -======================= -Namespaces and Bindings -======================= - -RDFLib provides several short-cuts to working with many URIs in the same namespace. - -The :mod:`rdflib.namespace` defines the :class:`rdflib.namespace.Namespace` class which lets you easily create URIs in a namespace:: - - from rdflib import Namespace - - EX = Namespace("http://example.org/") - EX.Person # a Python attribute for EX. This example is equivalent to rdflib.term.URIRef("http://example.org/Person") - - # use dict notation for things that are not valid Python identifiers, e.g.: - n['first%20name'] # as rdflib.term.URIRef("http://example.org/first%20name") - -These two styles of namespace creation - object attribute and dict - are equivalent and are made available just to allow for valid -RDF namespaces and URIs that are not valid Python identifiers. This isn't just for syntactic things like spaces, as per -the example of ``first%20name`` above, but also for Python reserved words like ``class`` or ``while``, so for the URI -``http://example.org/class``, create it with ``EX['class']``, not ``EX.class``. - -Common Namespaces ------------------ - -The ``namespace`` module defines many common namespaces such as RDF, RDFS, OWL, FOAF, SKOS, PROF, etc. The list of the -namespaces provided grows with user contributions to RDFLib. - -These Namespaces, and any others that users define, can also be associated with prefixes using the :class:`rdflib.namespace.NamespaceManager`, e.g. using ``foaf`` for ``http://xmlns.com/foaf/0.1/``. - -Each RDFLib graph has a :attr:`~rdflib.graph.Graph.namespace_manager` that keeps a list of namespace to prefix mappings. The namespace manager is populated when reading in RDF, and these prefixes are used when serialising RDF, or when parsing SPARQL queries. Prefixes can be bound with the :meth:`rdflib.graph.Graph.bind` method:: - - from rdflib import Graph, Namespace - from rdflib.namespace import FOAF - - EX = Namespace("http://example.org/") - - g = Graph() - g.bind("foaf", FOAF) # bind an RDFLib-provided namespace to a prefix - g.bind("ex", EX) # bind a user-declared namespace to a prefix - - -The :meth:`rdflib.graph.Graph.bind` method is actually supplied by the :class:`rdflib.namespace.NamespaceManager` class - see next. - -NamespaceManager ----------------- - -Each RDFLib graph comes with a :class:`rdflib.namespace.NamespaceManager` instance in the :attr:`~rdflib.graph.Graph.namespace_manager` field; you can use the :meth:`~rdflib.namespace.NamespaceManager.bind` method of this instance to bind a prefix to a namespace URI, -as above, however note that the :class:`~rdflib.namespace.NamespaceManager` automatically performs some bindings according to a selected strategy. - -Namespace binding strategies are indicated with the ``bind_namespaces`` input parameter to :class:`~rdflib.namespace.NamespaceManager` instances -and may be set via ``Graph`` also:: - - from rdflib import Graph - from rdflib.namespace import NamespaceManager - - g = Graph(bind_namespaces="rdflib") # bind via Graph - - g2 = Graph() - nm = NamespaceManager(g2, bind_namespaces="rdflib") # bind via NamespaceManager - - -Valid strategies are: - -* core: - * binds several core RDF prefixes only - * owl, rdf, rdfs, xsd, xml from the NAMESPACE_PREFIXES_CORE object - * this is default -* rdflib: - * binds all the namespaces shipped with RDFLib as DefinedNamespace instances - * all the core namespaces and all the following: brick, csvw, dc, dcat - * dcmitype, dcterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, sdo - * sh, skos, sosa, ssn, time, vann, void - * see the NAMESPACE_PREFIXES_RDFLIB object in :class:`rdflib.namespace` for up-to-date list -* none: - * binds no namespaces to prefixes - * note this is NOT default behaviour -* cc: - * using prefix bindings from prefix.cc which is a online prefixes database - * not implemented yet - this is aspirational - -Re-binding -^^^^^^^^^^ - -Note that regardless of the strategy employed, prefixes for namespaces can be overwritten with users preferred prefixes, -for example:: - - from rdflib import Graph - from rdflib.namespace import GEO # imports GeoSPARQL's namespace - - g = Graph(bind_namespaces="rdflib") # binds GeoSPARQL's namespace to prefix 'geo' - - g.bind('geosp', GEO, override=True) - - - -:class:`~rdflib.namespace.NamespaceManager` also has a method to normalize a given url:: - - from rdflib.namespace import NamespaceManager - - nm = NamespaceManager(Graph()) - nm.normalizeUri(t) - - -For simple output, or simple serialisation, you often want a nice -readable representation of a term. All RDFLib terms have a -``.n3()`` method, which will return a suitable N3 format and into which you can supply a NamespaceManager instance -to provide prefixes, i.e. ``.n3(namespace_manager=some_nm)``:: - - >>> from rdflib import Graph, URIRef, Literal, BNode - >>> from rdflib.namespace import FOAF, NamespaceManager - - >>> person = URIRef("http://xmlns.com/foaf/0.1/Person") - >>> person.n3() - '' - - >>> g = Graph() - >>> g.bind("foaf", FOAF) - - >>> person.n3(g.namespace_manager) - 'foaf:Person' - - >>> l = Literal(2) - >>> l.n3() - '"2"^^' - - >>> l.n3(NamespaceManager(Graph(), bind_namespaces="core")) - '"2"^^xsd:integer' - -The namespace manage also has a useful method ``compute_qname`` -``g.namespace_manager.compute_qname(x)`` (or just ``g.compute_qname(x)``) which takes a URI and decomposes it into the parts:: - - self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")), - ("ns2", URIRef("http://foo/bar#"), "baz")) - - - -Namespaces in SPARQL Queries ----------------------------- - -The ``initNs`` argument supplied to :meth:`~rdflib.graph.Graph.query` is a dictionary of namespaces to be expanded in the query string. -If you pass no ``initNs`` argument, the namespaces registered with the graphs namespace_manager are used:: - - from rdflib.namespace import FOAF - graph.query('SELECT * WHERE { ?p a foaf:Person }', initNs={'foaf': FOAF}) - - -In order to use an empty prefix (e.g. ``?a :knows ?b``), use a ``PREFIX`` directive with no prefix in the SPARQL query to set a default namespace: - -.. code-block:: sparql - - PREFIX : - - - diff --git a/docs/persistence.md b/docs/persistence.md new file mode 100644 index 000000000..aa81f66b8 --- /dev/null +++ b/docs/persistence.md @@ -0,0 +1,60 @@ +# Persistence + +RDFLib provides an [`abstracted Store API`][rdflib.store.Store] +for persistence of RDF and Notation 3. The [`Graph`][rdflib.graph.Graph] class works with instances of this API (as the first argument to its constructor) for triple-based management of an RDF store including: garbage collection, transaction management, update, pattern matching, removal, length, and database management ([`Graph.open()`][rdflib.graph.Graph.open] / [`Graph.close()`][rdflib.graph.Graph.close] / [`Graph.destroy()`][rdflib.graph.Graph.destroy]). + +Additional persistence mechanisms can be supported by implementing this API for a different store. + +## Stores currently shipped with core RDFLib + +* [`Memory`][rdflib.plugins.stores.memory.Memory] - not persistent! +* [`BerkeleyDB`][rdflib.plugins.stores.berkeleydb.BerkeleyDB] - on disk persistence via Python's [berkeleydb package](https://pypi.org/project/berkeleydb/) +* [`SPARQLStore`][rdflib.plugins.stores.sparqlstore.SPARQLStore] - a read-only wrapper around a remote SPARQL Query endpoint +* [`SPARQLUpdateStore`][rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore] - a read-write wrapper around a remote SPARQL query/update endpoint pair + +## Usage + +In most cases, passing the name of the store to the Graph constructor is enough: + +```python +from rdflib import Graph + +graph = Graph(store='BerkeleyDB') +``` + +Most stores offering on-disk persistence will need to be opened before reading or writing. When peristing a triplestore, rather than a ConjuntiveGraph quadstore, you need to specify an identifier with which you can open the graph: + +```python +graph = Graph('BerkeleyDB', identifier='mygraph') + +# first time create the store: +graph.open('/home/user/data/myRDFLibStore', create=True) + +# work with the graph: +data = """ +PREFIX : + +:a :b :c . +:d :e :f . +:d :g :h . +""" +graph.parse(data=data, format="ttl") + +# when done! +graph.close() +``` + +When done, [`close()`][rdflib.graph.Graph.close] must be called to free the resources associated with the store. + +## Additional store plugins + +More store implementations are available in RDFLib extension projects: + +* [rdflib-sqlalchemy](https://github.com/RDFLib/rdflib-sqlalchemy) – a store which supports a wide-variety of RDBMS backends +* [rdflib-leveldb](https://github.com/RDFLib/rdflib-leveldb) – a store on top of Google's [LevelDB](https://code.google.com/p/leveldb/) key-value store +* [rdflib-kyotocabinet](https://github.com/RDFLib/rdflib-kyotocabinet) – a store on top of the [Kyoto Cabinet](http://fallabs.com/kyotocabinet/) key-value store + +## Example + +* [`examples.berkeleydb_example`][examples.berkeleydb_example] contains an example for using a BerkeleyDB store. +* [`examples.sparqlstore_example`][examples.sparqlstore_example] contains an example for using a SPARQLStore. diff --git a/docs/persistence.rst b/docs/persistence.rst deleted file mode 100644 index ca7449ed5..000000000 --- a/docs/persistence.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. _persistence: Persistence - -=========== -Persistence -=========== - -RDFLib provides an :class:`abstracted Store API ` -for persistence of RDF and Notation 3. The -:class:`~rdflib.graph.Graph` class works with instances of this API -(as the first argument to its constructor) for triple-based management -of an RDF store including: garbage collection, transaction management, -update, pattern matching, removal, length, and database management -(:meth:`~rdflib.graph.Graph.open` / :meth:`~rdflib.graph.Graph.close` -/ :meth:`~rdflib.graph.Graph.destroy`). - -Additional persistence mechanisms can be supported by implementing -this API for a different store. - -Stores currently shipped with core RDFLib -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* :class:`Memory ` - not persistent! -* :class:`~rdflib.plugins.stores.berkeleydb.BerkeleyDB` - on disk persistence via Python's `berkeleydb package `_ -* :class:`~rdflib.plugins.stores.sparqlstore.SPARQLStore` - a read-only wrapper around a remote SPARQL Query endpoint -* :class:`~rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore` - a read-write wrapper around a remote SPARQL query/update endpoint pair - -Usage -^^^^^ - -In most cases, passing the name of the store to the Graph constructor is enough: - -.. code-block:: python - - from rdflib import Graph - - graph = Graph(store='BerkeleyDB') - - -Most stores offering on-disk persistence will need to be opened before reading or writing. -When peristing a triplestore, rather than a ConjuntiveGraph quadstore, you need to specify -an identifier with which you can open the graph: - -.. code-block:: python - - graph = Graph('BerkeleyDB', identifier='mygraph') - - # first time create the store: - graph.open('/home/user/data/myRDFLibStore', create=True) - - # work with the graph: - data = """ - PREFIX : - - :a :b :c . - :d :e :f . - :d :g :h . - """ - graph.parse(data=data, format="ttl") - - # when done! - graph.close() - - - -When done, :meth:`~rdflib.graph.Graph.close` must be called to free the resources associated with the store. - - -Additional store plugins -^^^^^^^^^^^^^^^^^^^^^^^^ - -More store implementations are available in RDFLib extension projects: - - * `rdflib-sqlalchemy `_ – a store which supports a wide-variety of RDBMS backends, - * `rdflib-leveldb `_ – a store on top of Google's `LevelDB `_ key-value store. - * `rdflib-kyotocabinet `_ – a store on top of the `Kyoto Cabinet `_ key-value store. - -Example -^^^^^^^ - -* :mod:`examples.berkeleydb_example` contains an example for using a BerkeleyDB store. -* :mod:`examples.sparqlstore_example` contains an example for using a SPARQLStore. diff --git a/docs/persisting_n3_terms.md b/docs/persisting_n3_terms.md new file mode 100644 index 000000000..5cf59dfdb --- /dev/null +++ b/docs/persisting_n3_terms.md @@ -0,0 +1,89 @@ +# Persisting Notation 3 Terms + +## Using N3 Syntax for Persistence + +Blank Nodes, Literals, URI References, and Variables can be distinguished in persistence by relying on Notation 3 syntax convention. + +All URI References can be expanded and persisted as: + +```turtle +<..URI..> +``` + +All Literals can be expanded and persisted as: + +```turtle +"..value.."@lang or "..value.."^^dtype_uri +``` + +!!! abstract "Language tag" + `@lang` is a language tag and `^^dtype_uri` is the URI of a data type associated with the Literal + +Blank Nodes can be expanded and persisted as: + +```turtle +_:Id +``` + +!!! info "About skolemization" + Where Id is an identifier as determined by skolemization. Skolemization is a syntactic transformation routinely used in automatic inference systems in which existential variables are replaced by 'new' functions - function names not used elsewhere - applied to any enclosing universal variables. In RDF, Skolemization amounts to replacing every blank node in a graph by a 'new' name, i.e. a URI reference which is guaranteed to not occur anywhere else. In effect, it gives 'arbitrary' names to the anonymous entities whose existence was asserted by the use of blank nodes: the arbitrariness of the names ensures that nothing can be inferred that would not follow from the bare assertion of existence represented by the blank node. (Using a literal would not do. Literals are never 'new' in the required sense.) + +Variables can be persisted as they appear in their serialization `(?varName)` - since they only need be unique within their scope (the context of their associated statements) + +These syntactic conventions can facilitate term round-tripping. + +## Variables by Scope + +Would an interface be needed in order to facilitate a quick way to aggregate all the variables in a scope (given by a formula identifier)? An interface such as: + +```python +def variables(formula_identifier) +``` + +## The Need to Skolemize Formula Identifiers + +It would seem reasonable to assume that a formula-aware store would assign Blank Node identifiers as names of formulae that appear in a N3 serialization. So for instance, the following bit of N3: + +``` +{?x a :N3Programmer} => {?x :has :Migrane} +``` + +Could be interpreted as the assertion of the following statement: + +```turtle +_:a log:implies _:b +``` + +However, how are `_:a` and `_:b` distinguished from other Blank Nodes? A formula-aware store would be expected to persist the first set of statements as quoted statements in a formula named `_:a` and the second set as quoted statements in a formula named `_:b`, but it would not be cost-effective for a serializer to have to query the store for all statements in a context named `_:a` in order to determine if `_:a` was associated with a formula (so that it could be serialized properly). + +## Relying on `log:Formula` Membership + +The store could rely on explicit `log:Formula` membership (via `rdf:type` statements) to model the distinction of Blank Nodes associated with formulae. However, would these statements be expected from an N3 parser or known implicitly by the store? i.e., would all such Blank Nodes match the following pattern: + +```turtle +?formula rdf:type log:Formula +``` + +## Relying on an Explicit Interface + +A formula-aware store could also support the persistence of this distinction by implementing a method that returns an iterator over all the formulae in the store: + +```python +def formulae(triple=None) +``` + +This function would return all the Blank Node identifiers assigned to formulae or just those that contain statements matching the given triple pattern and would be the way a serializer determines if a term refers to a formula (in order to properly serializer it). + +How much would such an interface reduce the need to model formulae terms as first class objects (perhaps to be returned by the [`triples()`][rdflib.Graph.triples] function)? Would it be more useful for the [`Graph`][rdflib.Graph] (or the store itself) to return a Context object in place of a formula term (using the formulae interface to make this determination)? + +Conversely, would these interfaces (variables and formulae) be considered optimizations only since you have the distinction by the kinds of terms triples returns (which would be expanded to include variables and formulae)? + +## Persisting Formula Identifiers + +This is the most straight forward way to maintain this distinction - without relying on extra interfaces. Formula identifiers could be persisted distinctly from other terms by using the following notation: + +``` +{_:bnode} or {<.. URI ..>} +``` + +This would facilitate their persistence round-trip - same as the other terms that rely on N3 syntax to distinguish between each other. diff --git a/docs/persisting_n3_terms.rst b/docs/persisting_n3_terms.rst deleted file mode 100644 index 1138b4c3f..000000000 --- a/docs/persisting_n3_terms.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. _persisting_n3_terms: - -=========================== -Persisting Notation 3 Terms -=========================== - -Using N3 Syntax for Persistence -------------------------------- -Blank Nodes, Literals, URI References, and Variables can be distinguished in persistence by relying on Notation 3 syntax convention. - -All URI References can be expanded and persisted as: - -.. code-block:: text - - <..URI..> - -All Literals can be expanded and persisted as: - -.. code-block:: text - - "..value.."@lang or "..value.."^^dtype_uri - -.. note:: ``@lang`` is a language tag and ``^^dtype_uri`` is the URI of a data type associated with the Literal - -Blank Nodes can be expanded and persisted as: - -.. code-block:: text - - _:Id - -.. note:: where Id is an identifier as determined by skolemization. Skolemization is a syntactic transformation routinely used in automatic inference systems in which existential variables are replaced by 'new' functions - function names not used elsewhere - applied to any enclosing universal variables. In RDF, Skolemization amounts to replacing every blank node in a graph by a 'new' name, i.e. a URI reference which is guaranteed to not occur anywhere else. In effect, it gives 'arbitrary' names to the anonymous entities whose existence was asserted by the use of blank nodes: the arbitrariness of the names ensures that nothing can be inferred that would not follow from the bare assertion of existence represented by the blank node. (Using a literal would not do. Literals are never 'new' in the required sense.) - -Variables can be persisted as they appear in their serialization ``(?varName)`` - since they only need be unique within their scope (the context of their associated statements) - -These syntactic conventions can facilitate term round-tripping. - -Variables by Scope ------------------- -Would an interface be needed in order to facilitate a quick way to aggregate all the variables in a scope (given by a formula identifier)? An interface such as: - -.. code-block:: python - - def variables(formula_identifier) - -The Need to Skolemize Formula Identifiers ------------------------------------------ -It would seem reasonable to assume that a formula-aware store would assign Blank Node identifiers as names of formulae that appear in a N3 serialization. So for instance, the following bit of N3: - -.. code-block:: text - - {?x a :N3Programmer} => {?x :has :Migrane} - -Could be interpreted as the assertion of the following statement: - -.. code-block:: text - - _:a log:implies _:b - -However, how are ``_:a`` and ``_:b`` distinguished from other Blank Nodes? A formula-aware store would be expected to persist the first set of statements as quoted statements in a formula named ``_:a`` and the second set as quoted statements in a formula named ``_:b``, but it would not be cost-effective for a serializer to have to query the store for all statements in a context named ``_:a`` in order to determine if ``_:a`` was associated with a formula (so that it could be serialized properly). - -Relying on ``log:Formula`` Membership -------------------------------------- - -The store could rely on explicit ``log:Formula`` membership (via ``rdf:type`` statements) to model the distinction of Blank Nodes associated with formulae. However, would these statements be expected from an N3 parser or known implicitly by the store? i.e., would all such Blank Nodes match the following pattern: - -.. code-block:: text - - ?formula rdf:type log:Formula - -Relying on an Explicit Interface --------------------------------- -A formula-aware store could also support the persistence of this distinction by implementing a method that returns an iterator over all the formulae in the store: - -.. code-block:: python - - def formulae(triple=None) - -This function would return all the Blank Node identifiers assigned to formulae or just those that contain statements matching the given triple pattern and would be the way a serializer determines if a term refers to a formula (in order to properly serializer it). - -How much would such an interface reduce the need to model formulae terms as first class objects (perhaps to be returned by the :meth:`~rdflib.Graph.triples` function)? Would it be more useful for the :class:`~rdflib.Graph` (or the store itself) to return a Context object in place of a formula term (using the formulae interface to make this determination)? - -Conversely, would these interfaces (variables and formulae) be considered optimizations only since you have the distinction by the kinds of terms triples returns (which would be expanded to include variables and formulae)? - -Persisting Formula Identifiers ------------------------------- -This is the most straight forward way to maintain this distinction - without relying on extra interfaces. Formula identifiers could be persisted distinctly from other terms by using the following notation: - -.. code-block:: text - - {_:bnode} or {<.. URI ..>} - -This would facilitate their persistence round-trip - same as the other terms that rely on N3 syntax to distinguish between each other. - diff --git a/docs/plugin_parsers.rst b/docs/plugin_parsers.rst deleted file mode 100644 index 56cb5d1eb..000000000 --- a/docs/plugin_parsers.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _plugin_parsers: Plugin parsers - -============== -Plugin parsers -============== - -These serializers are available in default RDFLib, you can use them by -passing the name to graph's :meth:`~rdflib.graph.Graph.parse` method:: - - graph.parse(my_url, format='n3') - -The ``html`` parser will auto-detect RDFa, HTurtle or Microdata. - -It is also possible to pass a mime-type for the ``format`` parameter:: - - graph.parse(my_url, format='application/rdf+xml') - -If you are not sure what format your file will be, you can use :func:`rdflib.util.guess_format` which will guess based on the file extension. - -========= ==================================================================== -Name Class -========= ==================================================================== -json-ld :class:`~rdflib.plugins.parsers.jsonld.JsonLDParser` -hext :class:`~rdflib.plugins.parsers.hext.HextuplesParser` -n3 :class:`~rdflib.plugins.parsers.notation3.N3Parser` -nquads :class:`~rdflib.plugins.parsers.nquads.NQuadsParser` -patch :class:`~rdflib.plugins.parsers.patch.RDFPatchParser` -nt :class:`~rdflib.plugins.parsers.ntriples.NTParser` -trix :class:`~rdflib.plugins.parsers.trix.TriXParser` -turtle :class:`~rdflib.plugins.parsers.notation3.TurtleParser` -xml :class:`~rdflib.plugins.parsers.rdfxml.RDFXMLParser` -========= ==================================================================== - -Multi-graph IDs ---------------- -Note that for correct parsing of multi-graph data, e.g. Trig, HexT, etc., into a ``Dataset``, -as opposed to a context-unaware ``Graph``, you will need to set the ``publicID`` of the ``Dataset`` to the identifier of the ``default_context`` (default graph), for example:: - - d = Dataset() - d.parse( - data=""" ... """, - format="trig", - publicID=d.default_context.identifier - ) - -(from the file tests/test_serializer_hext.py) diff --git a/docs/plugin_query_results.rst b/docs/plugin_query_results.rst deleted file mode 100644 index f44c27687..000000000 --- a/docs/plugin_query_results.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _plugin_query_results: Plugin query results - -==================== -Plugin query results -==================== - -Plugins for reading and writing of (SPARQL) :class:`~rdflib.query.Result` - pass ``name`` to either :meth:`~rdflib.query.Result.parse` or :meth:`~rdflib.query.Result.serialize` - - -Parsers -------- - -==== ==================================================================== -Name Class -==== ==================================================================== -csv :class:`~rdflib.plugins.sparql.results.csvresults.CSVResultParser` -json :class:`~rdflib.plugins.sparql.results.jsonresults.JSONResultParser` -tsv :class:`~rdflib.plugins.sparql.results.tsvresults.TSVResultParser` -xml :class:`~rdflib.plugins.sparql.results.xmlresults.XMLResultParser` -==== ==================================================================== - -Serializers ------------ - -==== ======================================================================== -Name Class -==== ======================================================================== -csv :class:`~rdflib.plugins.sparql.results.csvresults.CSVResultSerializer` -json :class:`~rdflib.plugins.sparql.results.jsonresults.JSONResultSerializer` -txt :class:`~rdflib.plugins.sparql.results.txtresults.TXTResultSerializer` -xml :class:`~rdflib.plugins.sparql.results.xmlresults.XMLResultSerializer` -==== ======================================================================== diff --git a/docs/plugin_serializers.rst b/docs/plugin_serializers.rst deleted file mode 100644 index 3721bb9f8..000000000 --- a/docs/plugin_serializers.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. _plugin_serializers: Plugin serializers - -================== -Plugin serializers -================== - -These serializers are available in default RDFLib, you can use them by -passing the name to a graph's :meth:`~rdflib.graph.Graph.serialize` method:: - - print graph.serialize(format='n3') - -It is also possible to pass a mime-type for the ``format`` parameter:: - - graph.serialize(my_url, format='application/rdf+xml') - -========== =============================================================== -Name Class -========== =============================================================== -json-ld :class:`~rdflib.plugins.serializers.jsonld.JsonLDSerializer` -n3 :class:`~rdflib.plugins.serializers.n3.N3Serializer` -nquads :class:`~rdflib.plugins.serializers.nquads.NQuadsSerializer` -nt :class:`~rdflib.plugins.serializers.nt.NTSerializer` -hext :class:`~rdflib.plugins.serializers.hext.HextuplesSerializer` -patch :class:`~rdflib.plugins.serializers.patch.PatchSerializer` -pretty-xml :class:`~rdflib.plugins.serializers.rdfxml.PrettyXMLSerializer` -trig :class:`~rdflib.plugins.serializers.trig.TrigSerializer` -trix :class:`~rdflib.plugins.serializers.trix.TriXSerializer` -turtle :class:`~rdflib.plugins.serializers.turtle.TurtleSerializer` -longturtle :class:`~rdflib.plugins.serializers.longturtle.LongTurtleSerializer` -xml :class:`~rdflib.plugins.serializers.rdfxml.XMLSerializer` -========== =============================================================== - - -JSON-LD -------- -JSON-LD - 'json-ld' - has been incorporated into RDFLib since v6.0.0. - -RDF Patch ---------- - -The RDF Patch Serializer - 'patch' - uses the RDF Patch format defined at https://afs.github.io/rdf-patch/. It supports serializing context aware stores as either addition or deletion patches; and also supports serializing the difference between two context aware stores as a Patch of additions and deletions. - -HexTuples ---------- -The HexTuples Serializer - 'hext' - uses the HexTuples format defined at https://github.com/ontola/hextuples. - -For serialization of non-context-aware data sources, e.g. a single ``Graph``, the 'graph' field (6th variable in the -Hextuple) will be an empty string. - -For context-aware (multi-graph) serialization, the 'graph' field of the default graph will be an empty string and -the values for other graphs will be Blank Node IDs or IRIs. - -Longturtle ----------- -Longturtle is just the turtle format with newlines preferred over compactness - multiple nodes on the same line -to enhance the format's text file version control (think Git) friendliness - and more modern forms of prefix markers - -PREFIX instead of @prefix - to make it as similar to SPARQL as possible. - -Longturtle is Turtle 1.1 compliant and will work wherever ordinary turtle works, however some very old parsers don't -understand PREFIX, only @prefix... diff --git a/docs/plugin_stores.rst b/docs/plugin_stores.rst deleted file mode 100644 index 1a9fc506d..000000000 --- a/docs/plugin_stores.rst +++ /dev/null @@ -1,70 +0,0 @@ -.. _plugin_stores: Plugin stores - -============= -Plugin stores -============= - -Built In --------- - -The following Stores are contained within the rdflib core package: - -================= ============================================================ -Name Class -================= ============================================================ -Auditable :class:`~rdflib.plugins.stores.auditable.AuditableStore` -Concurrent :class:`~rdflib.plugins.stores.concurrent.ConcurrentStore` -SimpleMemory :class:`~rdflib.plugins.stores.memory.SimpleMemory` -Memory :class:`~rdflib.plugins.stores.memory.Memory` -SPARQLStore :class:`~rdflib.plugins.stores.sparqlstore.SPARQLStore` -SPARQLUpdateStore :class:`~rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore` -BerkeleyDB :class:`~rdflib.plugins.stores.berkeleydb.BerkeleyDB` -default :class:`~rdflib.plugins.stores.memory.Memory` -================= ============================================================ - -External --------- - -The following Stores are defined externally to rdflib's core package, so look to their documentation elsewhere for -specific details of use. - -================= ==================================================== ============================================================================================= -Name Repository Notes -================= ==================================================== ============================================================================================= -SQLAlchemy ``_ An SQLAlchemy-backed, formula-aware RDFLib Store. Tested dialects are: SQLite, MySQL & PostgreSQL -leveldb ``_ An adaptation of RDFLib BerkeleyDB Store’s key-value approach, using LevelDB as a back-end -Kyoto Cabinet ``_ An adaptation of RDFLib BerkeleyDB Store’s key-value approach, using Kyoto Cabinet as a back-end -HDT ``_ A Store back-end for rdflib to allow for reading and querying `HDT `_ documents -Oxigraph ``_ Works with the `Pyoxigraph `_ Python graph database library -================= ==================================================== ============================================================================================= - -*If you have, or know of a Store implementation and would like it listed here, please submit a Pull Request!* - -Use ---- - -You can use these stores like this: - -.. code-block:: python - - from rdflib import Graph - - # use the default memory Store - graph = Graph() - - # use the BerkeleyDB Store - graph = Graph(store="BerkeleyDB") - - -In some cases, you must explicitly *open* and *close* a store, for example: - -.. code-block:: python - - from rdflib import Graph - - # use the BerkeleyDB Store - graph = Graph(store="BerkeleyDB") - graph.open("/some/folder/location") - # do things ... - graph.close() - diff --git a/docs/plugins.md b/docs/plugins.md new file mode 100644 index 000000000..a73eb725a --- /dev/null +++ b/docs/plugins.md @@ -0,0 +1,187 @@ +# Plugins + +![rdflib plugin "architecture"](_static/plugins-diagram.svg) + +Many parts of RDFLib are extensible with plugins, [see setuptools' 'Creating and discovering plugins'](https://packaging.python.org/guides/creating-and-discovering-plugins/). These pages list the plugins included in RDFLib core. + +* [`Parser Plugins`][rdflib.plugins.parsers] +* [`Serializer Plugins`][rdflib.plugins.serializers] +* [`Store Plugins`][rdflib.plugins.stores] +* [`Query Results Plugins`][rdflib.plugins.sparql.results] + +## Plugin stores + +### Built In + +The following Stores are contained within the rdflib core package: + +| Name | Class | +| --- | --- | +| Auditable | [`AuditableStore`][rdflib.plugins.stores.auditable.AuditableStore] | +| Concurrent | [`ConcurrentStore`][rdflib.plugins.stores.concurrent.ConcurrentStore] | +| SimpleMemory | [`SimpleMemory`][rdflib.plugins.stores.memory.SimpleMemory] | +| Memory | [`Memory`][rdflib.plugins.stores.memory.Memory] | +| SPARQLStore | [`SPARQLStore`][rdflib.plugins.stores.sparqlstore.SPARQLStore] | +| SPARQLUpdateStore | [`SPARQLUpdateStore`][rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore] | +| BerkeleyDB | [`BerkeleyDB`][rdflib.plugins.stores.berkeleydb.BerkeleyDB] | +| default | [`Memory`][rdflib.plugins.stores.memory.Memory] | + +### External + +The following Stores are defined externally to rdflib's core package, so look to their documentation elsewhere for specific details of use. + +| Name | Repository | Notes | +| --- | --- | --- | +| SQLAlchemy | [github.com/RDFLib/rdflib-sqlalchemy](https://github.com/RDFLib/rdflib-sqlalchemy) | An SQLAlchemy-backed, formula-aware RDFLib Store. Tested dialects are: SQLite, MySQL & PostgreSQL | +| leveldb | [github.com/RDFLib/rdflib-leveldb](https://github.com/RDFLib/rdflib-leveldb) | An adaptation of RDFLib BerkeleyDB Store's key-value approach, using LevelDB as a back-end | +| Kyoto Cabinet | [github.com/RDFLib/rdflib-kyotocabinet](https://github.com/RDFLib/rdflib-kyotocabinet) | An adaptation of RDFLib BerkeleyDB Store's key-value approach, using Kyoto Cabinet as a back-end | +| HDT | [github.com/RDFLib/rdflib-hdt](https://github.com/RDFLib/rdflib-hdt) | A Store back-end for rdflib to allow for reading and querying [HDT](https://www.rdfhdt.org/) documents | +| Oxigraph | [github.com/oxigraph/oxrdflib](https://github.com/oxigraph/oxrdflib) | Works with the [Pyoxigraph](https://oxigraph.org/pyoxigraph) Python graph database library | +| pycottas | [github.com/arenas-guerrero-julian/pycottas](https://github.com/arenas-guerrero-julian/pycottas) | A Store backend for querying compressed [COTTAS](https://pycottas.readthedocs.io/#cottas-files) files | + +*If you have, or know of a Store implementation and would like it listed here, please submit a Pull Request!* + +### Use + +You can use these stores like this: + +```python +from rdflib import Graph + +# use the default memory Store +graph = Graph() + +# use the BerkeleyDB Store +graph = Graph(store="BerkeleyDB") +``` + +In some cases, you must explicitly *open* and *close* a store, for example: + +```python +from rdflib import Graph + +# use the BerkeleyDB Store +graph = Graph(store="BerkeleyDB") +graph.open("/some/folder/location") +# do things ... +graph.close() +``` + +## Plugin parsers + +These serializers are available in default RDFLib, you can use them by passing the name to graph's [`parse()`][rdflib.graph.Graph.parse] method: + +```python +graph.parse(my_url, format='n3') +``` + +The `html` parser will auto-detect RDFa, HTurtle or Microdata. + +It is also possible to pass a mime-type for the `format` parameter: + +```python +graph.parse(my_url, format='application/rdf+xml') +``` + +If you are not sure what format your file will be, you can use [`guess_format()`][rdflib.util.guess_format] which will guess based on the file extension. + +| Name | Class | +|---------|---------------------------------------------------------------| +| json-ld | [`JsonLDParser`][rdflib.plugins.parsers.jsonld.JsonLDParser] | +| hext | [`HextuplesParser`][rdflib.plugins.parsers.hext.HextuplesParser] | +| n3 | [`N3Parser`][rdflib.plugins.parsers.notation3.N3Parser] | +| nquads | [`NQuadsParser`][rdflib.plugins.parsers.nquads.NQuadsParser] | +| patch | [`RDFPatchParser`][rdflib.plugins.parsers.patch.RDFPatchParser] | +| nt | [`NTParser`][rdflib.plugins.parsers.ntriples.NTParser] | +| trix | [`TriXParser`][rdflib.plugins.parsers.trix.TriXParser] | +| turtle | [`TurtleParser`][rdflib.plugins.parsers.notation3.TurtleParser] | +| xml | [`RDFXMLParser`][rdflib.plugins.parsers.rdfxml.RDFXMLParser] | + +### Multi-graph IDs + +Note that for correct parsing of multi-graph data, e.g. TriG, HexTuple, etc., into a `Dataset`, as opposed to a context-unaware `Graph`, you will need to set the `publicID` of the `Dataset` to the identifier of the `default_context` (default graph), for example: + +```python +d = Dataset() +d.parse( + data=""" ... """, + format="trig", + publicID=d.default_context.identifier +) +``` + +(from the file tests/test_serializer_hext.py) + +## Plugin serializers + +These serializers are available in default RDFLib, you can use them by +passing the name to a graph's [`serialize()`][rdflib.graph.Graph.serialize] method: + +```python +print graph.serialize(format='n3') +``` + +It is also possible to pass a mime-type for the `format` parameter: + +```python +graph.serialize(my_url, format='application/rdf+xml') +``` + +| Name | Class | +|------|-------| +| json-ld | [`JsonLDSerializer`][rdflib.plugins.serializers.jsonld.JsonLDSerializer] | +| n3 | [`N3Serializer`][rdflib.plugins.serializers.n3.N3Serializer] | +| nquads | [`NQuadsSerializer`][rdflib.plugins.serializers.nquads.NQuadsSerializer] | +| nt | [`NTSerializer`][rdflib.plugins.serializers.nt.NTSerializer] | +| hext | [`HextuplesSerializer`][rdflib.plugins.serializers.hext.HextuplesSerializer] | +| patch | [`PatchSerializer`][rdflib.plugins.serializers.patch.PatchSerializer] | +| pretty-xml | [`PrettyXMLSerializer`][rdflib.plugins.serializers.rdfxml.PrettyXMLSerializer] | +| trig | [`TrigSerializer`][rdflib.plugins.serializers.trig.TrigSerializer] | +| trix | [`TriXSerializer`][rdflib.plugins.serializers.trix.TriXSerializer] | +| turtle | [`TurtleSerializer`][rdflib.plugins.serializers.turtle.TurtleSerializer] | +| longturtle | [`LongTurtleSerializer`][rdflib.plugins.serializers.longturtle.LongTurtleSerializer] | +| xml | [`XMLSerializer`][rdflib.plugins.serializers.rdfxml.XMLSerializer] | + +### JSON-LD + +JSON-LD - 'json-ld' - has been incorporated into RDFLib since v6.0.0. + +### RDF Patch + +The RDF Patch Serializer - 'patch' - uses the RDF Patch format defined at https://afs.github.io/rdf-patch/. It supports serializing context aware stores as either addition or deletion patches; and also supports serializing the difference between two context aware stores as a Patch of additions and deletions. + +### HexTuples + +The HexTuples Serializer - 'hext' - uses the HexTuples format defined at https://github.com/ontola/hextuples. + +For serialization of non-context-aware data sources, e.g. a single `Graph`, the 'graph' field (6th variable in the Hextuple) will be an empty string. + +For context-aware (multi-graph) serialization, the 'graph' field of the default graph will be an empty string and the values for other graphs will be Blank Node IDs or IRIs. + +### Longturtle + +Longturtle is just the turtle format with newlines preferred over compactness - multiple nodes on the same line to enhance the format's text file version control (think Git) friendliness - and more modern forms of prefix markers - PREFIX instead of @prefix - to make it as similar to SPARQL as possible. + +Longturtle is Turtle 1.1 compliant and will work wherever ordinary turtle works, however some very old parsers don't understand PREFIX, only @prefix... + +## Plugin query results + +Plugins for reading and writing of (SPARQL) [`Result`][rdflib.query.Result] - pass `name` to either [`parse()`][rdflib.query.Result.parse] or [`serialize()`][rdflib.query.Result.serialize] + +### Parsers + +| Name | Class | +| ---- | ----- | +| csv | [`CSVResultParser`][rdflib.plugins.sparql.results.csvresults.CSVResultParser] | +| json | [`JSONResultParser`][rdflib.plugins.sparql.results.jsonresults.JSONResultParser] | +| tsv | [`TSVResultParser`][rdflib.plugins.sparql.results.tsvresults.TSVResultParser] | +| xml | [`XMLResultParser`][rdflib.plugins.sparql.results.xmlresults.XMLResultParser] | + +### Serializers + +| Name | Class | +| ---- | ----- | +| csv | [`CSVResultSerializer`][rdflib.plugins.sparql.results.csvresults.CSVResultSerializer] | +| json | [`JSONResultSerializer`][rdflib.plugins.sparql.results.jsonresults.JSONResultSerializer] | +| txt | [`TXTResultSerializer`][rdflib.plugins.sparql.results.txtresults.TXTResultSerializer] | +| xml | [`XMLResultSerializer`][rdflib.plugins.sparql.results.xmlresults.XMLResultSerializer] | diff --git a/docs/plugins.rst b/docs/plugins.rst deleted file mode 100644 index fd3ef5073..000000000 --- a/docs/plugins.rst +++ /dev/null @@ -1,21 +0,0 @@ - -Plugins -======= - -.. image:: /_static/plugins-diagram.* - :alt: rdflib plugin "architecture" - :width: 450px - :target: _static/plugins-diagram.svg - - -Many parts of RDFLib are extensible with plugins, `see setuptools' 'Creating and discovering plugins' `_. These pages list the plugins included in RDFLib core. - - - -.. toctree:: - :maxdepth: 1 - - plugin_parsers - plugin_serializers - plugin_stores - plugin_query_results diff --git a/docs/rdf_terms.md b/docs/rdf_terms.md new file mode 100644 index 000000000..b1d29325b --- /dev/null +++ b/docs/rdf_terms.md @@ -0,0 +1,154 @@ +# RDF terms in rdflib + +Terms are the kinds of objects that can appear in a RDFLib's graph's triples. Those that are part of core RDF concepts are: `IRIs`, `Blank Node` and `Literal`, the latter consisting of a literal value and either a [datatype](https://www.w3.org/TR/xmlschema-2/#built-in-datatypes) or an [RFC 3066](https://tools.ietf.org/html/rfc3066) language tag. + +!!! info "Origins" + RDFLib's class for representing IRIs/URIs is called "URIRef" because, at the time it was implemented, that was what the then current RDF specification called URIs/IRIs. We preserve that class name but refer to the RDF object as "IRI". + +## Class hierarchy + +All terms in RDFLib are sub-classes of the [`Identifier`][rdflib.term.Identifier] class. A class diagram of the various terms is: + +![Term Class Hierarchy](_static/term_class_hierarchy.svg) + +Nodes are a subset of the Terms that underlying stores actually persist. + +The set of such Terms depends on whether or not the store is formula-aware. Stores that aren't formula-aware only persist those terms core to the RDF Model but those that are formula-aware also persist the N3 extensions. However, utility terms that only serve the purpose of matching nodes by term-patterns will probably only be terms and not nodes. + +## Python Classes + +The three main RDF objects - *IRI*, *Blank Node* and *Literal* are represented in RDFLib by these three main Python classes: + +### URIRef + +An IRI (Internationalized Resource Identifier) is represented within RDFLib using the [`URIRef class`][rdflib.term.URIRef]. From [the RDF 1.1 specification's IRI section](https://www.w3.org/TR/rdf11-concepts/#section-IRIs): + +```python +>>> from rdflib import URIRef +>>> uri = URIRef() # doctest: +SKIP +Traceback (most recent call last): + File "", line 1, in +TypeError: __new__() missing 1 required positional argument: 'value' +>>> uri = URIRef('') +>>> uri +rdflib.term.URIRef('') +>>> uri = URIRef('http://example.com') +>>> uri +rdflib.term.URIRef('http://example.com') +>>> uri.n3() +'' +``` + +### BNodes + +In RDF, a blank node (also called BNode) is a node in an RDF graph representing a resource for which an IRI or literal is not given. The resource represented by a blank node is also called an anonymous resource. According to the RDF standard, a blank node can only be used as subject or object in a triple, although in some syntaxes like Notation 3 it is acceptable to use a blank node as a predicate. If a blank node has a node ID (not all blank nodes are labelled in all RDF serializations), it is limited in scope to a particular serialization of the RDF graph, i.e. the node p1 in one graph does not represent the same node as a node named p1 in any other graph -- [wikipedia](http://en.wikipedia.org/wiki/Blank_node) + +See the [`BNode`][rdflib.term.BNode] class' documentation. + +```python +>>> from rdflib import BNode +>>> bn = BNode() +>>> bn # doctest: +SKIP +rdflib.term.BNode('AFwALAKU0') +>>> bn.n3() # doctest: +SKIP +'_:AFwALAKU0' +``` + +### Literals + +Literals are attribute values in RDF, for instance, a person's name, the date of birth, height, etc. and are stored using simple data types, e.g. *string*, *double*, *dateTime* etc. This usually looks something like this: + +```python +name = Literal("Nicholas") # the name 'Nicholas', as a string + +age = Literal(39, datatype=XSD.integer) # the number 39, as an integer +``` + +A slightly special case is a *langString* which is a *string* with a language tag, e.g.: + +```python +name = Literal("Nicholas", lang="en") # the name 'Nicholas', as an English string +imie = Literal("Mikołaj", lang="pl") # the Polish version of the name 'Nicholas' +``` + +Special literal types indicated by use of a custom IRI for a literal's `datatype` value, for example the [GeoSPARQL RDF standard](https://opengeospatial.github.io/ogc-geosparql/geosparql11/spec.html#_geometry_serializations) invents a custom datatype, `geoJSONLiteral` to indicate [GeoJSON geometry serlializations](https://opengeospatial.github.io/ogc-geosparql/geosparql11/spec.html#_rdfs_datatype_geogeojsonliteral) like this: + +```python +GEO = Namespace("http://www.opengis.net/ont/geosparql#") + +geojson_geometry = Literal( + '''{"type": "Point", "coordinates": [-83.38,33.95]}''', + datatype=GEO.geoJSONLiteral +``` + +See the [`Literal`][rdflib.term.Literal] class' documentation, followed by notes on Literal from the [RDF 1.1 specification 'Literals' section](https://www.w3.org/TR/rdf11-concepts/#section-Graph-Literal). + +A literal in an RDF graph contains one or two named components. + +All literals have a lexical form being a Unicode string, which SHOULD be in Normal Form C. + +Plain literals have a lexical form and optionally a language tag as defined by [RFC 3066](https://tools.ietf.org/html/rfc3066), normalized to lowercase. An exception will be raised if illegal language-tags are passed to [\_\_new\_\_()][rdflib.term.Literal.__new__]. + +Typed literals have a lexical form and a datatype URI being an RDF URI reference. + +!!! abstract "Language vs. locale" + When using the language tag, care must be taken not to confuse language with locale. The language tag relates only to human language text. Presentational issues should be addressed in end-user applications. + +!!! quote "Case sensitive" + The case normalization of language tags is part of the description of the abstract syntax, and consequently the abstract behaviour of RDF applications. It does not constrain an RDF implementation to actually normalize the case. Crucially, the result of comparing two language tags should not be sensitive to the case of the original input. -- [RDF Concepts and Abstract Syntax](http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref) + +#### Common XSD datatypes + +Most simple literals such as *string* or *integer* have XML Schema (XSD) datatypes defined for them, see the figure below. Additionally, these XSD datatypes are listed in the [XSD Namespace class][rdflib.namespace.XSD] that ships with RDFLib, so many Python code editors will prompt you with autocomplete for them when using it. + +Remember, you don't *have* to use XSD datatypes and can always make up your own, as GeoSPARQL does, as described above. + +![datatype hierarchy](_static/datatype_hierarchy.png) + +#### Python conversions + +RDFLib Literals essentially behave like unicode characters with an XML Schema datatype or language attribute. + +The class provides a mechanism to both convert Python literals (and their built-ins such as time/date/datetime) into equivalent RDF Literals and (conversely) convert Literals to their Python equivalent. This mapping to and from Python literals is done as follows: + +| XML Datatype | Python type | +|--------------|-------------| +| None | None [^1] | +| xsd:time | time [^2] | +| xsd:date | date | +| xsd:dateTime | datetime | +| xsd:string | None | +| xsd:normalizedString | None | +| xsd:token | None | +| xsd:language | None | +| xsd:boolean | boolean | +| xsd:decimal | Decimal | +| xsd:integer | long | +| xsd:nonPositiveInteger | int | +| xsd:long | long | +| xsd:nonNegativeInteger | int | +| xsd:negativeInteger | int | +| xsd:int | long | +| xsd:unsignedLong | long | +| xsd:positiveInteger | int | +| xsd:short | int | +| xsd:unsignedInt | long | +| xsd:byte | int | +| xsd:unsignedShort | int | +| xsd:unsignedByte | int | +| xsd:float | float | +| xsd:double | float | +| xsd:base64Binary | base64 | +| xsd:anyURI | None | +| rdf:XMLLiteral | Document (xml.dom.minidom.Document [^3] | +| rdf:HTML | DocumentFragment (xml.dom.minidom.DocumentFragment) | + +[^1]: plain literals map directly to value space +[^2]: Date, time and datetime literals are mapped to Python instances using the RDFlib xsd_datetime module, that is based on the [isodate](http://pypi.python.org/pypi/isodate/) package). +[^3]: this is a bit dirty - by accident the `html5lib` parser produces `DocumentFragments`, and the xml parser `Documents`, letting us use this to decide what datatype when round-tripping. + +An appropriate data-type and lexical representation can be found using `_castPythonToLiteral`, and the other direction with `_castLexicalToPython`. + +All this happens automatically when creating `Literal` objects by passing Python objects to the constructor, and you never have to do this manually. + +You can add custom data-types with [`bind()`][rdflib.term.bind], see also [`custom_datatype example`][examples.custom_datatype] diff --git a/docs/rdf_terms.rst b/docs/rdf_terms.rst deleted file mode 100644 index f83127da8..000000000 --- a/docs/rdf_terms.rst +++ /dev/null @@ -1,230 +0,0 @@ -.. _rdf_terms: RDF terms in rdflib - -=================== -RDF terms in rdflib -=================== - -Terms are the kinds of objects that can appear in a RDFLib's graph's triples. -Those that are part of core RDF concepts are: ``IRIs``, ``Blank Node`` -and ``Literal``, the latter consisting of a literal value and either a `datatype `_ -or an :rfc:`3066` language tag. - -.. note:: RDFLib's class for representing IRIs/URIs is called "URIRef" because, at the time it was implemented, that was what the then current RDF specification called URIs/IRIs. We preserve that class name but refer to the RDF object as "IRI". - -Class hierarchy -=============== - -All terms in RDFLib are sub-classes of the :class:`rdflib.term.Identifier` class. A class diagram of the various terms is: - -.. _term_class_hierarchy: -.. figure:: /_static/term_class_hierarchy.svg - :alt: Term Class Hierarchy - - Term Class Hierarchy - - -Nodes are a subset of the Terms that underlying stores actually persist. - -The set of such Terms depends on whether or not the store is formula-aware. -Stores that aren't formula-aware only persist those terms core to the -RDF Model but those that are formula-aware also persist the N3 -extensions. However, utility terms that only serve the purpose of -matching nodes by term-patterns will probably only be terms and not nodes. - -Python Classes -============== - -The three main RDF objects - *IRI*, *Blank Node* and *Literal* are represented in RDFLib by these three main Python classes: - -URIRef ------- - -An IRI (Internationalized Resource Identifier) is represented within RDFLib using the URIRef class. From `the RDF 1.1 specification's IRI section `_: - -Here is the *URIRef* class' auto-built documentation: - -.. autoclass:: rdflib.term.URIRef - :noindex: - -.. code-block:: python - - >>> from rdflib import URIRef - >>> uri = URIRef() # doctest: +SKIP - Traceback (most recent call last): - File "", line 1, in - TypeError: __new__() missing 1 required positional argument: 'value' - >>> uri = URIRef('') - >>> uri - rdflib.term.URIRef('') - >>> uri = URIRef('http://example.com') - >>> uri - rdflib.term.URIRef('http://example.com') - >>> uri.n3() - '' - - -BNodes ------- - -In RDF, a blank node (also called BNode) is a node in an RDF graph representing a resource for which an IRI or literal is not given. The resource represented by a blank node is also called an anonymous resource. According to the RDF standard, a blank node can only be used as subject or object in a triple, although in some syntaxes like Notation 3 it is acceptable to use a blank node as a predicate. If a blank node has a node ID (not all blank nodes are labelled in all RDF serializations), it is limited in scope to a particular serialization of the RDF graph, i.e. the node p1 in one graph does not represent the same node as a node named p1 in any other graph -- `wikipedia`__ - - -.. __: http://en.wikipedia.org/wiki/Blank_node - -Here is the *BNode* class' auto-built documentation: - -.. autoclass:: rdflib.term.BNode - :noindex: - -.. code-block:: python - - >>> from rdflib import BNode - >>> bn = BNode() - >>> bn # doctest: +SKIP - rdflib.term.BNode('AFwALAKU0') - >>> bn.n3() # doctest: +SKIP - '_:AFwALAKU0' - - -.. _rdflibliterals: - -Literals --------- - -Literals are attribute values in RDF, for instance, a person's name, the date of birth, height, etc. -and are stored using simple data types, e.g. *string*, *double*, *dateTime* etc. This usually looks -something like this: - -.. code-block:: python - - name = Literal("Nicholas") # the name 'Nicholas', as a string - - age = Literal(39, datatype=XSD.integer) # the number 39, as an integer - - - -A slightly special case is a *langString* which is a *string* with a language tag, e.g.: - -.. code-block:: python - - name = Literal("Nicholas", lang="en") # the name 'Nicholas', as an English string - imie = Literal("Mikołaj", lang="pl") # the Polish version of the name 'Nicholas' - - -Special literal types indicated by use of a custom IRI for a literal's ``datatype`` value, -for example the `GeoSPARQL RDF standard `_ -invents a custom datatype, ``geoJSONLiteral`` to indicate `GeoJSON geometry serlializations `_ -like this: - -.. code-block:: python - - GEO = Namespace("http://www.opengis.net/ont/geosparql#") - - geojson_geometry = Literal( - '''{"type": "Point", "coordinates": [-83.38,33.95]}''', - datatype=GEO.geoJSONLiteral - - -Here is the ``Literal`` class' auto-built documentation, followed by notes on Literal from the `RDF 1.1 specification 'Literals' section `_. - -.. autoclass:: rdflib.term.Literal - :noindex: - -A literal in an RDF graph contains one or two named components. - -All literals have a lexical form being a Unicode string, which SHOULD be in Normal Form C. - -Plain literals have a lexical form and optionally a language tag as defined by :rfc:`3066`, normalized to lowercase. An exception will be raised if illegal language-tags are passed to :meth:`rdflib.term.Literal.__new__`. - -Typed literals have a lexical form and a datatype URI being an RDF URI reference. - -.. note:: When using the language tag, care must be taken not to confuse language with locale. The language tag relates only to human language text. Presentational issues should be addressed in end-user applications. - -.. note:: The case normalization of language tags is part of the description of the abstract syntax, and consequently the abstract behaviour of RDF applications. It does not constrain an RDF implementation to actually normalize the case. Crucially, the result of comparing two language tags should not be sensitive to the case of the original input. -- `RDF Concepts and Abstract Syntax`__ - - - -.. __: http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref - -Common XSD datatypes -^^^^^^^^^^^^^^^^^^^^ - -Most simple literals such as *string* or *integer* have XML Schema (XSD) datatypes defined for them, see the figure -below. Additionally, these XSD datatypes are listed in the :class:`XSD Namespace class ` that -ships with RDFLib, so many Python code editors will prompt you with autocomplete for them when using it. - -Remember, you don't *have* to use XSD datatypes and can always make up your own, as GeoSPARQL does, as described above. - -.. image:: /_static/datatype_hierarchy.png - :alt: datatype hierarchy - :align: center - :width: 629 - :height: 717 - -Python conversions -^^^^^^^^^^^^^^^^^^ - -RDFLib Literals essentially behave like unicode characters with an XML Schema datatype or language attribute. - -The class provides a mechanism to both convert Python literals (and their built-ins such as time/date/datetime) -into equivalent RDF Literals and (conversely) convert Literals to their Python equivalent. This mapping to and -from Python literals is done as follows: - -====================== =========== -XML Datatype Python type -====================== =========== -None None [#f1]_ -xsd:time time [#f2]_ -xsd:date date -xsd:dateTime datetime -xsd:string None -xsd:normalizedString None -xsd:token None -xsd:language None -xsd:boolean boolean -xsd:decimal Decimal -xsd:integer long -xsd:nonPositiveInteger int -xsd:long long -xsd:nonNegativeInteger int -xsd:negativeInteger int -xsd:int long -xsd:unsignedLong long -xsd:positiveInteger int -xsd:short int -xsd:unsignedInt long -xsd:byte int -xsd:unsignedShort int -xsd:unsignedByte int -xsd:float float -xsd:double float -xsd:base64Binary :mod:`base64` -xsd:anyURI None -rdf:XMLLiteral :class:`xml.dom.minidom.Document` [#f3]_ -rdf:HTML :class:`xml.dom.minidom.DocumentFragment` -====================== =========== - -.. [#f1] plain literals map directly to value space - -.. [#f2] Date, time and datetime literals are mapped to Python - instances using the RDFlib xsd_datetime module, that is based - on the `isodate `_ - package). - -.. [#f3] this is a bit dirty - by accident the ``html5lib`` parser - produces ``DocumentFragments``, and the xml parser ``Documents``, - letting us use this to decide what datatype when round-tripping. - -An appropriate data-type and lexical representation can be found using: - -.. autofunction:: rdflib.term._castPythonToLiteral - -and the other direction with - -.. autofunction:: rdflib.term._castLexicalToPython - -All this happens automatically when creating ``Literal`` objects by passing Python objects to the constructor, -and you never have to do this manually. - -You can add custom data-types with :func:`rdflib.term.bind`, see also :mod:`examples.custom_datatype` - diff --git a/docs/security_considerations.md b/docs/security_considerations.md new file mode 100644 index 000000000..811432570 --- /dev/null +++ b/docs/security_considerations.md @@ -0,0 +1,78 @@ +# Security Considerations + +RDFLib is designed to access arbitrary network and file resources, in some cases these are directly requested resources, in other cases they are indirectly referenced resources. + +An example of where indirect resources are accessed is JSON-LD processing, where network or file resources referenced by `@context` values will be loaded and processed. + +RDFLib also supports SPARQL, which has federated query capabilities that allow +queries to query arbitrary remote endpoints. + +If you are using RDFLib to process untrusted documents or queries, you should +take measures to restrict file and network access. + +Some measures that can be taken to restrict file and network access are: + +* [Operating System Security Measures](#operating-system-security-measures) +* [Python Runtime Audit Hooks](#python-runtime-audit-hooks) +* [Custom URL Openers](#custom-url-openers) + +Of these, operating system security measures are recommended. The other measures work, but they are not as effective as operating system security measures, and even if they are used, they should be used in conjunction with operating system security measures. + +## Operating System Security Measures + +Most operating systems provide functionality that can be used to restrict network and file access of a process. + +Some examples of these include: + +* [Open Container Initiative (OCI) Containers](https://www.opencontainers.org/) (aka Docker containers). + + Most OCI runtimes provide mechanisms to restrict network and file + access of containers. For example, using Docker, you can limit your + container to only being able to access files explicitly mapped into + the container and only access the network through a firewall. For more + information, refer to the documentation of the tool you use to manage + your OCI containers: + + * [Kubernetes](https://kubernetes.io/docs/home/) + * [Docker](https://docs.docker.com/) + * [Podman](https://podman.io/) + +* [firejail](https://firejail.wordpress.com/) can be used to + sandbox a process on Linux and restrict its network and file access. + +* File and network access restrictions. + + Most operating systems provide a way to restrict operating system users to + only being able to access files and network resources that are explicitly + allowed. Applications that process untrusted input could be run as a user with + these restrictions in place. + +Many other measures are available, however, listing them is outside the scope of this document. + +Of the listed measures, OCI containers are recommended. In most cases, OCI containers are constrained by default and can't access the loopback interface and can only access files that are explicitly mapped into the container. + +## Python Runtime Audit Hooks + +From Python 3.8 onwards, Python provides a mechanism to install runtime audit hooks that can be used to limit access to files and network resources. + +The runtime audit hook system is described in more detail in [PEP 578 – Python Runtime Audit Hooks](https://peps.python.org/pep-0578/). + +Runtime audit hooks can be installed using the [sys.addaudithook](https://docs.python.org/3/library/sys.html#sys.addaudithook) function, and will then get called when audit events occur. The audit events raised by the Python runtime and standard library are described in Python's [audit events table](https://docs.python.org/3/library/audit_events.html). + +RDFLib uses `urllib.request.urlopen` for HTTP, HTTPS and other network access, and this function raises a `urllib.Request` audit event. For file access, RDFLib uses `open`, which raises an `open` audit event. + +Users of RDFLib can install audit hooks that react to these audit events and raise an exception when an attempt is made to access files or network resources that are not explicitly allowed. + +RDFLib's test suite includes tests which verify that audit hooks can block access to network and file resources. + +RDFLib also includes an example that shows how runtime audit hooks can be used to restrict network and file access in [`secure_with_audit`][examples.secure_with_audit]. + +## Custom URL Openers + +RDFLib uses the `urllib.request.urlopen` for HTTP, HTTPS and other network access. This function will use a `urllib.request.OpenerDirector` installed with `urllib.request.install_opener` to open the URLs. + +Users of RDFLib can install a custom URL opener that raises an exception when an attempt is made to access network resources that are not explicitly allowed. + +RDFLib's test suite includes tests which verify that custom URL openers can be used to block access to network resources. + +RDFLib also includes an example that shows how a custom opener can be used to restrict network access in [`secure_with_urlopen`][examples.secure_with_urlopen]. diff --git a/docs/security_considerations.rst b/docs/security_considerations.rst deleted file mode 100644 index 77925a0f5..000000000 --- a/docs/security_considerations.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. _security_considerations: Security Considerations - -======================= -Security Considerations -======================= - -RDFLib is designed to access arbitrary network and file resources, in some cases -these are directly requested resources, in other cases they are indirectly -referenced resources. - -An example of where indirect resources are accessed is JSON-LD processing, where -network or file resources referenced by ``@context`` values will be loaded and -processed. - -RDFLib also supports SPARQL, which has federated query capabilities that allow -queries to query arbitrary remote endpoints. - -If you are using RDFLib to process untrusted documents or queries, you should -take measures to restrict file and network access. - -Some measures that can be taken to restrict file and network access are: - -* `Operating System Security Measures`_. -* `Python Runtime Audit Hooks`_. -* `Custom URL Openers`_. - -Of these, operating system security measures are recommended. The other -measures work, but they are not as effective as operating system security -measures, and even if they are used, they should be used in conjunction with -operating system security measures. - -Operating System Security Measures -================================== - -Most operating systems provide functionality that can be used to restrict -network and file access of a process. - -Some examples of these include: - -* `Open Container Initiative (OCI) Containers - `_ (aka Docker containers). - - Most OCI runtimes provide mechanisms to restrict network and file - access of containers. For example, using Docker, you can limit your - container to only being able to access files explicitly mapped into - the container and only access the network through a firewall. For more - information, refer to the documentation of the tool you use to manage - your OCI containers: - - * `Kubernetes `_ - * `Docker `_ - * `Podman `_ - -* `firejail `_ can be used to - sandbox a process on Linux and restrict its network and file access. - -* File and network access restrictions. - - Most operating systems provide a way to restrict operating system users to - only being able to access files and network resources that are explicitly - allowed. Applications that process untrusted input could be run as a user with - these restrictions in place. - -Many other measures are available, however, listing them is outside -the scope of this document. - -Of the listed measures, OCI containers are recommended. In most cases, OCI -containers are constrained by default and can't access the loopback interface -and can only access files that are explicitly mapped into the container. - -Python Runtime Audit Hooks -========================== - -From Python 3.8 onwards, Python provides a mechanism to install runtime audit -hooks that can be used to limit access to files and network resources. - -The runtime audit hook system is described in more detail in `PEP 578 – Python -Runtime Audit Hooks `_. - -Runtime audit hooks can be installed using the `sys.addaudithook -`_ function, and -will then get called when audit events occur. The audit events raised by the -Python runtime and standard library are described in Python's `audit events -table `_. - -RDFLib uses `urllib.request.urlopen` for HTTP, HTTPS and other network access, -and this function raises a ``urllib.Request`` audit event. For file access, -RDFLib uses `open`, which raises an ``open`` audit event. - -Users of RDFLib can install audit hooks that react to these audit events and -raise an exception when an attempt is made to access files or network resources -that are not explicitly allowed. - -RDFLib's test suite includes tests which verify that audit hooks can block -access to network and file resources. - -RDFLib also includes an example that shows how runtime audit hooks can be -used to restrict network and file access in :mod:`~examples.secure_with_audit`. - -Custom URL Openers -================== - -RDFLib uses the `urllib.request.urlopen` for HTTP, HTTPS and other network -access. This function will use a `urllib.request.OpenerDirector` installed with -`urllib.request.install_opener` to open the URLs. - -Users of RDFLib can install a custom URL opener that raises an exception when an -attempt is made to access network resources that are not explicitly allowed. - -RDFLib's test suite includes tests which verify that custom URL openers can be -used to block access to network resources. - -RDFLib also includes an example that shows how a custom opener can be used to -restrict network access in :mod:`~examples.secure_with_urlopen`. diff --git a/docs/type_hints.rst b/docs/type_hints.md similarity index 56% rename from docs/type_hints.rst rename to docs/type_hints.md index 31eed6ee7..b949fd11a 100644 --- a/docs/type_hints.rst +++ b/docs/type_hints.md @@ -1,29 +1,22 @@ -.. _type_hints: Type Hints +# Type Hints -========== -Type Hints -========== +This document provides some details about the type hints for RDFLib. More information about type hints can be found [here](https://docs.python.org/3/library/typing.html) -This document provides some details about the type hints for RDFLib. More information about type hints can be found `here `_ +## Rationale for Type Hints -Rationale for Type Hints -======================== - -Type hints are code annotations that describe the types of variables, function parameters and function return value types in a way that can be understood by humans, static type checkers like `mypy `_, code editors like VSCode, documentation generators like Sphinx, and other tools. +Type hints are code annotations that describe the types of variables, function parameters and function return value types in a way that can be understood by humans, static type checkers like [mypy](http://mypy-lang.org/), code editors like VSCode, documentation generators like mkdocstring, and other tools. Static type checkers can use type hints to detect certain classes of errors by inspection. Code editors and IDEs can use type hints to provide better auto-completion and documentation generators can use type hints to generate better documentation. These capabilities make it easier to develop a defect-free RDFLib and they also make it easier for users of RDFLib who can now use static type checkers to detect type errors in code that uses RDFLib. -Gradual Typing Process -====================== +## Gradual Typing Process -Type hints are being added to RDFLib through a process called `gradual typing `_. This process involves adding type hints to some parts of RDFLib while leaving the rest without type hints. Gradual typing is being applied to many, long-lived, Python code bases. +Type hints are being added to RDFLib through a process called [gradual typing](https://en.wikipedia.org/wiki/Gradual_typing). This process involves adding type hints to some parts of RDFLib while leaving the rest without type hints. Gradual typing is being applied to many, long-lived, Python code bases. This process is beneficial in that we can realize some of the benefits of type hints without requiring that the whole codebase have type hints. -Intended Type Hints -=================== +## Intended Type Hints The intent is to have type hints in place for all of RDFLib and to have these type hints be as accurate as possible. @@ -31,33 +24,32 @@ The accuracy of type hints is determined by both the standards that RDFLib aims There may be cases where some functionality of RDFLib may work perfectly well with values of types that are excluded by the type hints, but if these additional types violate the relevant standards we will consider the correct type hints to be those that exclude values of these types. -Public Type Aliases -=================== -In python, type hints are specified in annotations. Type hints are different from type aliases which are normal python variables that are not intended to provide runtime utility and are instead intended for use in static type checking. +## Public Type Aliases -For clarity, the following is an example of a function ``foo`` with type hints: +In python, type hints are specified in annotations. Type hints are different from type aliases which are normal python variables that are not intended to provide runtime utility and are instead intended for use in static type checking. -.. code-block:: python - - def foo(a: int) -> int: - return a + 1 +For clarity, the following is an example of a function `foo` with type hints: -In the function ``foo``, the input variable ``a`` is indicated to be of type ``int`` and the function is indicated to return an ``int``. +```python +def foo(a: int) -> int: + return a + 1 +``` -The following is an example of a type alias ``Bar``: +In the function `foo`, the input variable `a` is indicated to be of type `int` and the function is indicated to return an `int`. -.. code-block:: python +The following is an example of a type alias `Bar`: - from typing import Tuple +```python +from typing import Tuple - Bar = Tuple[int, str] +Bar = tuple[int, str] +``` -RDFLib will provide public type aliases under the ``rdflib.typing`` package, for example, ``rdflib.typing.Triple``, ``rdflib.typing.Quad``. Type aliases in the rest of RDFLib should be private (i.e. being with an underscore). +RDFLib will provide public type aliases under the `rdflib.typing` package, for example, `rdflib.typing.Triple`, `rdflib.typing.Quad`. Type aliases in the rest of RDFLib should be private (i.e. being with an underscore). -Versioning, Compatibility and Stability -======================================= +## Versioning, Compatibility and Stability -RDFLib attempts to adhere to `semver 2.0 `_ which is concerned with the public API of software. +RDFLib attempts to adhere to [semver 2.0](https://semver.org/spec/v2.0.0.html) which is concerned with the public API of software. Ignoring type hints, the public API of RDFLib exists implicitly as a consequence of the code of RDFLib and the actual behaviour this entails, the relevant standards that RDFLib is trying to implement, and the documentation of RDFLib, with some interplay between all three of these. RDFLib's public API includes public type aliases, as these are normal python variables and not annotations. @@ -70,18 +62,17 @@ Changes to type hints can broadly be classified as follow: **Type Declaration** Adding type hints to existing code that had no explicit type hints, for example, changing - .. code-block:: python - - def foo(val): - return val + 1 - - to +```python +def foo(val): + return val + 1 +``` - .. code-block:: python - - def foo(val: int) -> int: - return val + 1 +to +```python +def foo(val: int) -> int: + return val + 1 +``` **Type Refinement** Refining existing type hints to be narrower, for example, changing a type hint of `typing.Collection` to `typing.Sequence`. @@ -89,33 +80,13 @@ Changes to type hints can broadly be classified as follow: **Type Corrections** Correcting existing type hints which contradict the behaviour of the code or relevant specifications, for example, changing `typing.Sequence` from `typing.Set` -Given semver version components ``MAJOR.MINOR.PATCH``, RDFLib will attempt to constrain type hint changes as follow: - -.. list-table:: - :widths: 1 1 1 1 - :header-rows: 1 - - * - Version Component - - Type Declaration - - Type Refinement - - Type Corrections - - * - MAJOR - - YES - - YES - - YES - - * - MINOR - - YES - - YES - - YES - - * - PATCH - - NO - - NO - - YES - -.. CAUTION:: - A caveat worth nothing here is that code that passed type validation on one version of RDFLib can fail type validation on a later version of RDFLib that only differs in ``PATCH`` version component. This is as a consequence of potential *Type Corrections*. +Given semver version components `MAJOR.MINOR.PATCH`, RDFLib will attempt to constrain type hint changes as follow: +| Version Component | Type Declaration | Type Refinement | Type Corrections | +|------------------|-----------------|----------------|-----------------| +| MAJOR | YES | YES | YES | +| MINOR | YES | YES | YES | +| PATCH | NO | NO | YES | +!!! caution "Type Corrections" + A caveat worth nothing here is that code that passed type validation on one version of RDFLib can fail type validation on a later version of RDFLib that only differs in `PATCH` version component. This is as a consequence of potential *Type Corrections*. diff --git a/docs/upgrade4to5.md b/docs/upgrade4to5.md new file mode 100644 index 000000000..2d42c85d6 --- /dev/null +++ b/docs/upgrade4to5.md @@ -0,0 +1,203 @@ +# Upgrading from RDFLib version 4.2.2 to 5.0.0 + +RDFLib version 5.0.0 appeared over 3 years after the previous release, 4.2.2 and contains a large number of both enhancements and bug fixes. Fundamentally though, 5.0.0 is compatible with 4.2.2. + +## Major Changes + +### Literal Ordering + +Literal total ordering [PR #793](https://github.com/RDFLib/rdflib/pull/793) is implemented. That means all literals can now be compared to be greater than or less than any other literal. This is required for implementing some specific SPARQL features, but it is counter-intuitive to those who are expecting a TypeError when certain normally-incompatible types are compared. For example, comparing a `Literal(int(1), datatype=xsd:integer)` to `Literal(datetime.date(10,01,2020), datatype=xsd:date)` using a `>` or `<` operator in rdflib 4.2.2 and earlier, would normally throw a TypeError, however in rdflib 5.0.0 this operation now returns a True or False according to the Literal Total Ordering according the rules outlined in [PR #793](https://github.com/RDFLib/rdflib/pull/793) + +### Removed RDF Parsers + +The RDFa and Microdata format RDF parsers were removed from rdflib. There are still other python libraries available to implement these parsers. + +## All Changes + +This list has been assembled from Pull Request and commit information. + +### General Bugs Fixed + +* Pr 451 redux + [PR #978](https://github.com/RDFLib/rdflib/pull/978) +* NTriples fails to parse URIs with only a scheme + [ISSUE #920](https://github.com/RDFLib/rdflib/issues/920) + [PR #974](https://github.com/RDFLib/rdflib/pull/974) +* cannot clone it on windows - Remove colons from test result files. Fix #901. + [ISSUE #901](https://github.com/RDFLib/rdflib/issues/901) + [PR #971](https://github.com/RDFLib/rdflib/pull/971) +* Add requirement for requests to setup.py + [PR #969](https://github.com/RDFLib/rdflib/pull/969) +* fixed URIRef including native unicode characters + [PR #961](https://github.com/RDFLib/rdflib/pull/961) +* DCTERMS.format not working + [ISSUE #932](https://github.com/RDFLib/rdflib/issues/932) +* infixowl.manchesterSyntax do not encode strings + [PR #906](https://github.com/RDFLib/rdflib/pull/906) +* Fix blank node label to not contain '_:' during parsing + [PR #886](https://github.com/RDFLib/rdflib/pull/886) +* rename new SPARQLWrapper to SPARQLConnector + [PR #872](https://github.com/RDFLib/rdflib/pull/872) +* Fix #859. Unquote and Uriquote Literal Datatype. + [PR #860](https://github.com/RDFLib/rdflib/pull/860) +* Parsing nquads + [ISSUE #786](https://github.com/RDFLib/rdflib/issues/786) +* ntriples spec allows for upper-cased lang tag, fixes #782 + [PR #784](https://github.com/RDFLib/rdflib/pull/784) +* Error parsing N-Triple file using RDFlib + [ISSUE #782](https://github.com/RDFLib/rdflib/issues/782) +* Adds escaped single quote to literal parser + [PR #736](https://github.com/RDFLib/rdflib/pull/736) +* N3 parse error on single quote within single quotes + [ISSUE #732](https://github.com/RDFLib/rdflib/issues/732) +* Fixed #725 + [PR #730](https://github.com/RDFLib/rdflib/pull/730) +* test for issue #725: canonicalization collapses BNodes + [PR #726](https://github.com/RDFLib/rdflib/pull/726) +* RGDA1 graph canonicalization sometimes still collapses distinct BNodes + [ISSUE #725](https://github.com/RDFLib/rdflib/issues/725) +* Accept header should use a q parameter + [PR #720](https://github.com/RDFLib/rdflib/pull/720) +* Added test for Issue #682 and fixed. + [PR #718](https://github.com/RDFLib/rdflib/pull/718) +* Incompatibility with Python3: unichr + [ISSUE #687](https://github.com/RDFLib/rdflib/issues/687) +* namespace.py include colon in ALLOWED_NAME_CHARS + [PR #663](https://github.com/RDFLib/rdflib/pull/663) +* namespace.py fix compute_qname missing namespaces + [PR #649](https://github.com/RDFLib/rdflib/pull/649) +* RDFa parsing Error! `__init__()` got an unexpected keyword argument 'encoding' + [ISSUE #639](https://github.com/RDFLib/rdflib/issues/639) +* Bugfix: `term.Literal.__add__` + [PR #451](https://github.com/RDFLib/rdflib/pull/451) +* fixup of #443 + [PR #445](https://github.com/RDFLib/rdflib/pull/445) +* Microdata to rdf second edition bak + [PR #444](https://github.com/RDFLib/rdflib/pull/444) + +### Enhanced Features + +* Register additional serializer plugins for SPARQL mime types. + [PR #987](https://github.com/RDFLib/rdflib/pull/987) +* Pr 388 redux + [PR #979](https://github.com/RDFLib/rdflib/pull/979) +* Allows RDF terms introduced by JSON-LD 1.1 + [PR #970](https://github.com/RDFLib/rdflib/pull/970) +* make SPARQLConnector work with DBpedia + [PR #941](https://github.com/RDFLib/rdflib/pull/941) +* ClosedNamespace returns right exception for way of access + [PR #866](https://github.com/RDFLib/rdflib/pull/866) +* Not adding all namespaces for n3 serializer + [PR #832](https://github.com/RDFLib/rdflib/pull/832) +* Adds basic support of xsd:duration + [PR #808](https://github.com/RDFLib/rdflib/pull/808) +* Add possibility to set authority and basepath to skolemize graph + [PR #807](https://github.com/RDFLib/rdflib/pull/807) +* Change notation3 list realization to non-recursive function. + [PR #805](https://github.com/RDFLib/rdflib/pull/805) +* Suppress warning for not using custom encoding. + [PR #800](https://github.com/RDFLib/rdflib/pull/800) +* Add support to parsing large xml inputs + [ISSUE #749](https://github.com/RDFLib/rdflib/issues/749) + [PR #750](https://github.com/RDFLib/rdflib/pull/750) +* improve hash efficiency by directly using str/unicode hash + [PR #746](https://github.com/RDFLib/rdflib/pull/746) +* Added the csvw prefix to the RDFa initial context. + [PR #594](https://github.com/RDFLib/rdflib/pull/594) +* syncing changes from pyMicrodata + [PR #587](https://github.com/RDFLib/rdflib/pull/587) +* Microdata parser: updated the parser to the latest version of the microdata->rdf note (published in December 2014) + [PR #443](https://github.com/RDFLib/rdflib/pull/443) +* Literal.toPython() support for xsd:hexBinary + [PR #388](https://github.com/RDFLib/rdflib/pull/388) + +### SPARQL Fixes + +* Total order patch patch + [PR #862](https://github.com/RDFLib/rdflib/pull/862) +* use <<= instead of deprecated << + [PR #861](https://github.com/RDFLib/rdflib/pull/861) +* Fix #847 + [PR #856](https://github.com/RDFLib/rdflib/pull/856) +* RDF Literal "1"^^xsd:boolean should _not_ coerce to True + [ISSUE #847](https://github.com/RDFLib/rdflib/issues/847) +* Makes NOW() return an UTC date + [PR #844](https://github.com/RDFLib/rdflib/pull/844) +* NOW() SPARQL should return an xsd:dateTime with a timezone + [ISSUE #843](https://github.com/RDFLib/rdflib/issues/843) +* fix property paths bug: issue #715 + [PR #822](https://github.com/RDFLib/rdflib/pull/822) + [ISSUE #715](https://github.com/RDFLib/rdflib/issues/715) +* MulPath: correct behaviour of n3() + [PR #820](https://github.com/RDFLib/rdflib/pull/820) +* Literal total ordering + [PR #793](https://github.com/RDFLib/rdflib/pull/793) +* Remove SPARQLWrapper dependency + [PR #744](https://github.com/RDFLib/rdflib/pull/744) +* made UNION faster by not preventing duplicates + [PR #741](https://github.com/RDFLib/rdflib/pull/741) +* added a hook to add custom functions to SPARQL + [PR #723](https://github.com/RDFLib/rdflib/pull/723) +* Issue714 + [PR #717](https://github.com/RDFLib/rdflib/pull/717) +* Use <<= instead of deprecated << in SPARQL parser + [PR #417](https://github.com/RDFLib/rdflib/pull/417) +* Custom FILTER function for SPARQL engine + [ISSUE #274](https://github.com/RDFLib/rdflib/issues/274) + +### Code Quality and Cleanups + +* a slightly opinionated autopep8 run + [PR #870](https://github.com/RDFLib/rdflib/pull/870) +* remove rdfa and microdata parsers from core RDFLib + [PR #828](https://github.com/RDFLib/rdflib/pull/828) +* ClosedNamespace KeyError -> AttributeError + [PR #827](https://github.com/RDFLib/rdflib/pull/827) +* typo in rdflib/plugins/sparql/update.py + [ISSUE #760](https://github.com/RDFLib/rdflib/issues/760) +* Fix logging in interactive mode + [PR #731](https://github.com/RDFLib/rdflib/pull/731) +* make namespace module flake8-compliant, change exceptions in that mod… + [PR #711](https://github.com/RDFLib/rdflib/pull/711) +* delete ez_setup.py? + [ISSUE #669](https://github.com/RDFLib/rdflib/issues/669) +* code duplication issue between rdflib and pymicrodata + [ISSUE #582](https://github.com/RDFLib/rdflib/issues/582) +* Transition from 2to3 to use of six.py to be merged in 5.0.0-dev + [PR #519](https://github.com/RDFLib/rdflib/pull/519) +* sparqlstore drop deprecated methods and args + [PR #516](https://github.com/RDFLib/rdflib/pull/516) +* python3 code seems shockingly inefficient + [ISSUE #440](https://github.com/RDFLib/rdflib/issues/440) +* removed md5_term_hash, fixes #240 + [PR #439](https://github.com/RDFLib/rdflib/pull/439) + [ISSUE #240](https://github.com/RDFLib/rdflib/issues/240) + +### Testing + +* 3.7 for travis + [PR #864](https://github.com/RDFLib/rdflib/pull/864) +* Added trig unit tests to highlight some current parsing/serializing issues + [PR #431](https://github.com/RDFLib/rdflib/pull/431) + +### Documentation Fixes + +* Fix a doc string in the query module + [PR #976](https://github.com/RDFLib/rdflib/pull/976) +* setup.py: Make the license field use an SPDX identifier + [PR #789](https://github.com/RDFLib/rdflib/pull/789) +* Update README.md + [PR #764](https://github.com/RDFLib/rdflib/pull/764) +* Update namespaces_and_bindings.rst + [PR #757](https://github.com/RDFLib/rdflib/pull/757) +* DOC: README.md: rdflib-jsonld, https uris + [PR #712](https://github.com/RDFLib/rdflib/pull/712) +* make doctest support py2/py3 + [ISSUE #707](https://github.com/RDFLib/rdflib/issues/707) +* `pip install rdflib` (as per README.md) gets OSError on Mint 18.1 + [ISSUE #704](https://github.com/RDFLib/rdflib/issues/704) + [PR #717](https://github.com/RDFLib/rdflib/pull/717) +* Use <<= instead of deprecated << in SPARQL parser + [PR #417](https://github.com/RDFLib/rdflib/pull/417) +* Custom FILTER function for SPARQL engine + [ISSUE #274](https://github.com/RDFLib/rdflib/issues/274) diff --git a/docs/upgrade4to5.rst b/docs/upgrade4to5.rst deleted file mode 100644 index f6ae19a10..000000000 --- a/docs/upgrade4to5.rst +++ /dev/null @@ -1,213 +0,0 @@ -.. _upgrade4to5: Upgrading from RDFLib version 4.2.2 to 5.0.0 - -============================================ -Upgrading 4.2.2 to 5.0.0 -============================================ - -RDFLib version 5.0.0 appeared over 3 years after the previous release, 4.2.2 and contains a large number of both enhancements and bug fixes. Fundamentally though, 5.0.0 is compatible with 4.2.2. - - -Major Changes -------------- - -Literal Ordering -^^^^^^^^^^^^^^^^ -Literal total ordering `PR #793 `_ is implemented. That means all literals can now be compared to be greater than or less than any other literal. -This is required for implementing some specific SPARQL features, but it is counter-intuitive to those who are expecting a TypeError when certain normally-incompatible types are compared. -For example, comparing a ``Literal(int(1), datatype=xsd:integer)`` to ``Literal(datetime.date(10,01,2020), datatype=xsd:date)`` using a ``>`` or ``<`` operator in rdflib 4.2.2 and earlier, would normally throw a TypeError, -however in rdflib 5.0.0 this operation now returns a True or False according to the Literal Total Ordering according the rules outlined in `PR #793 `_ - -Removed RDF Parsers -^^^^^^^^^^^^^^^^^^^ -The RDFa and Microdata format RDF parsers were removed from rdflib. There are still other python libraries available to implement these parsers. - -All Changes ------------ - -This list has been assembled from Pull Request and commit information. - -General Bugs Fixed: -^^^^^^^^^^^^^^^^^^^ -* Pr 451 redux - `PR #978 `_ -* NTriples fails to parse URIs with only a scheme - `ISSUE #920 `_ - `PR #974 `_ -* cannot clone it on windows - Remove colons from test result files. Fix #901. - `ISSUE #901 `_ - `PR #971 `_ -* Add requirement for requests to setup.py - `PR #969 `_ -* fixed URIRef including native unicode characters - `PR #961 `_ -* DCTERMS.format not working - `ISSUE #932 `_ -* infixowl.manchesterSyntax do not encode strings - `PR #906 `_ -* Fix blank node label to not contain '_:' during parsing - `PR #886 `_ -* rename new SPARQLWrapper to SPARQLConnector - `PR #872 `_ -* Fix #859. Unquote and Uriquote Literal Datatype. - `PR #860 `_ -* Parsing nquads - `ISSUE #786 `_ -* ntriples spec allows for upper-cased lang tag, fixes #782 - `PR #784 `_ -* Error parsing N-Triple file using RDFlib - `ISSUE #782 `_ -* Adds escaped single quote to literal parser - `PR #736 `_ -* N3 parse error on single quote within single quotes - `ISSUE #732 `_ -* Fixed #725 - `PR #730 `_ -* test for issue #725: canonicalization collapses BNodes - `PR #726 `_ -* RGDA1 graph canonicalization sometimes still collapses distinct BNodes - `ISSUE #725 `_ -* Accept header should use a q parameter - `PR #720 `_ -* Added test for Issue #682 and fixed. - `PR #718 `_ -* Incompatibility with Python3: unichr - `ISSUE #687 `_ -* namespace.py include colon in ALLOWED_NAME_CHARS - `PR #663 `_ -* namespace.py fix compute_qname missing namespaces - `PR #649 `_ -* RDFa parsing Error! ``__init__()`` got an unexpected keyword argument 'encoding' - `ISSUE #639 `_ -* Bugfix: ``term.Literal.__add__`` - `PR #451 `_ -* fixup of #443 - `PR #445 `_ -* Microdata to rdf second edition bak - `PR #444 `_ - -Enhanced Features: -^^^^^^^^^^^^^^^^^^ -* Register additional serializer plugins for SPARQL mime types. - `PR #987 `_ -* Pr 388 redux - `PR #979 `_ -* Allows RDF terms introduced by JSON-LD 1.1 - `PR #970 `_ -* make SPARQLConnector work with DBpedia - `PR #941 `_ -* ClosedNamespace returns right exception for way of access - `PR #866 `_ -* Not adding all namespaces for n3 serializer - `PR #832 `_ -* Adds basic support of xsd:duration - `PR #808 `_ -* Add possibility to set authority and basepath to skolemize graph - `PR #807 `_ -* Change notation3 list realization to non-recursive function. - `PR #805 `_ -* Suppress warning for not using custom encoding. - `PR #800 `_ -* Add support to parsing large xml inputs - `ISSUE #749 `_ - `PR #750 `_ -* improve hash efficiency by directly using str/unicode hash - `PR #746 `_ -* Added the csvw prefix to the RDFa initial context. - `PR #594 `_ -* syncing changes from pyMicrodata - `PR #587 `_ -* Microdata parser: updated the parser to the latest version of the microdata->rdf note (published in December 2014) - `PR #443 `_ -* Literal.toPython() support for xsd:hexBinary - `PR #388 `_ - -SPARQL Fixes: -^^^^^^^^^^^^^ -* Total order patch patch - `PR #862 `_ -* use <<= instead of deprecated << - `PR #861 `_ -* Fix #847 - `PR #856 `_ -* RDF Literal "1"^^xsd:boolean should _not_ coerce to True - `ISSUE #847 `_ -* Makes NOW() return an UTC date - `PR #844 `_ -* NOW() SPARQL should return an xsd:dateTime with a timezone - `ISSUE #843 `_ -* fix property paths bug: issue #715 - `PR #822 `_ - `ISSUE #715 `_ -* MulPath: correct behaviour of n3() - `PR #820 `_ -* Literal total ordering - `PR #793 `_ -* Remove SPARQLWrapper dependency - `PR #744 `_ -* made UNION faster by not preventing duplicates - `PR #741 `_ -* added a hook to add custom functions to SPARQL - `PR #723 `_ -* Issue714 - `PR #717 `_ -* Use <<= instead of deprecated << in SPARQL parser - `PR #417 `_ -* Custom FILTER function for SPARQL engine - `ISSUE #274 `_ - -Code Quality and Cleanups: -^^^^^^^^^^^^^^^^^^^^^^^^^^ -* a slightly opinionated autopep8 run - `PR #870 `_ -* remove rdfa and microdata parsers from core RDFLib - `PR #828 `_ -* ClosedNamespace KeyError -> AttributeError - `PR #827 `_ -* typo in rdflib/plugins/sparql/update.py - `ISSUE #760 `_ -* Fix logging in interactive mode - `PR #731 `_ -* make namespace module flake8-compliant, change exceptions in that mod… - `PR #711 `_ -* delete ez_setup.py? - `ISSUE #669 `_ -* code duplication issue between rdflib and pymicrodata - `ISSUE #582 `_ -* Transition from 2to3 to use of six.py to be merged in 5.0.0-dev - `PR #519 `_ -* sparqlstore drop deprecated methods and args - `PR #516 `_ -* python3 code seems shockingly inefficient - `ISSUE #440 `_ -* removed md5_term_hash, fixes #240 - `PR #439 `_ - `ISSUE #240 `_ - -Testing: -^^^^^^^^ -* 3.7 for travis - `PR #864 `_ -* Added trig unit tests to highlight some current parsing/serializing issues - `PR #431 `_ - -Documentation Fixes: -^^^^^^^^^^^^^^^^^^^^ -* Fix a doc string in the query module - `PR #976 `_ -* setup.py: Make the license field use an SPDX identifier - `PR #789 `_ -* Update README.md - `PR #764 `_ -* Update namespaces_and_bindings.rst - `PR #757 `_ -* DOC: README.md: rdflib-jsonld, https uris - `PR #712 `_ -* make doctest support py2/py3 - `ISSUE #707 `_ -* ``pip install rdflib`` (as per README.md) gets OSError on Mint 18.1 - `ISSUE #704 `_ - `PR #717 `_ -* Use <<= instead of deprecated << in SPARQL parser - `PR #417 `_ -* Custom FILTER function for SPARQL engine - `ISSUE #274 `_ diff --git a/docs/upgrade5to6.md b/docs/upgrade5to6.md new file mode 100644 index 000000000..8ac59b2a5 --- /dev/null +++ b/docs/upgrade5to6.md @@ -0,0 +1,61 @@ +# Upgrading 5.0.0 to 6.0.0 + +6.0.0 fully adopts Python 3 practices and drops Python 2 support so it is neater, faster and generally more modern than 5.0.0. It also tidies up the [`Graph`][rdflib.graph.Graph] API (removing duplicate functions) so it does include a few breaking changes. Additionally, there is a long list of PRs merged into 6.0.0 adding a number of small fixes and features which are listed below. + +RDFLib version 5.0.0 was released in 2020, 3 years after the previous version (4.2.2) and is fundamentally 5.0.0 compatible with. If you need very long-term backwards-compatibility or Python 2 support, you need 5.0.0. + +## Major Changes + +The most notable changes in RDFLib 6.0.0 are: + +### Python 3.7+ + +* The oldest version of python you can use to run RDFLib is now 3.7. +* This is a big jump from RDFLib 5.0.0 that worked on python 2.7 and 3.5. +* This change is to allow the library maintainers to adopt more modern development tools, newer language features, and avoid the need to support EOL versions of python in he future + +### JSON-LD integration and JSON-LD 1.1 + +* The json-ld serializer/parser plugin was by far the most commonly used RDFLib addon. +* Last year we brought it under the RDFLib org in Github +* Now for 6.0.0 release the JSON-LD serializer and parser are integrated into RDFLib core +* This includes the experimental support for the JSON-LD v1.1 spec +* You no longer need to install the json-ld dependency separately. + +## All Changes + +This list has been assembled from Pull Request and commit information. + +### General Bugs Fixed + +* Pr 451 redux + [PR #978](https://github.com/RDFLib/rdflib/pull/978) + +### Enhanced Features + +* Register additional serializer plugins for SPARQL mime types. + [PR #987](https://github.com/RDFLib/rdflib/pull/987) + +### SPARQL Fixes + +* Total order patch patch + [PR #862](https://github.com/RDFLib/rdflib/pull/862) + +### Code Quality and Cleanups + +* a slightly opinionated autopep8 run + [PR #870](https://github.com/RDFLib/rdflib/pull/870) + +### Testing + +* 3.7 for travis + [PR #864](https://github.com/RDFLib/rdflib/pull/864) + +### Documentation Fixes + +* Fix a doc string in the query module + [PR #976](https://github.com/RDFLib/rdflib/pull/976) + +### Integrate JSON-LD into RDFLib + +[PR #1354](https://github.com/RDFLib/rdflib/pull/1354) diff --git a/docs/upgrade5to6.rst b/docs/upgrade5to6.rst deleted file mode 100644 index 7ffa7e68b..000000000 --- a/docs/upgrade5to6.rst +++ /dev/null @@ -1,79 +0,0 @@ -.. _upgrade4to5: Upgrading from RDFLib version 5.0.0 to 6.0.0 - -============================================ -Upgrading 5.0.0 to 6.0.0 -============================================ - -6.0.0 fully adopts Python 3 practices and drops Python 2 support so it is neater, faster and generally more modern than -5.0.0. It also tidies up the ``Graph`` API (removing duplicate functions) so it does include a few breaking changes. -Additionally, there is a long list of PRs merged into 6.0.0 adding a number of small fixes and features which are listed -below. - -RDFLib version 5.0.0 was released in 2020, 3 years after the previous version (4.2.2) and is fundamentally 5.0.0 -compatible with. If you need very long-term backwards-compatibility or Python 2 support, you need 5.0.0. - - -Major Changes -------------- - -The most notable changes in RDFLib 6.0.0 are: - -Python 3.7+ -^^^^^^^^^^^ -* The oldest version of python you can use to run RDFLib is now 3.7. -* This is a big jump from RDFLib 5.0.0 that worked on python 2.7 and 3.5. -* This change is to allow the library maintainers to adopt more modern development tools, - newer language features, and avoid the need to support EOL versions of python in he future - -JSON-LD integration and JSON-LD 1.1 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -* The json-ld serializer/parser plugin was by far the most commonly used RDFLib addon. -* Last year we brought it under the RDFLib org in Github -* Now for 6.0.0 release the JSON-LD serializer and parser are integrated into RDFLib core -* This includes the experimental support for the JSON-LD v1.1 spec -* You no longer need to install the json-ld dependency separately. - - -All Changes ------------ - -This list has been assembled from Pull Request and commit information. - -General Bugs Fixed: -^^^^^^^^^^^^^^^^^^^ -* Pr 451 redux - `PR #978 `_ - - -Enhanced Features: -^^^^^^^^^^^^^^^^^^ -* Register additional serializer plugins for SPARQL mime types. - `PR #987 `_ - - -SPARQL Fixes: -^^^^^^^^^^^^^ -* Total order patch patch - `PR #862 `_ - - -Code Quality and Cleanups: -^^^^^^^^^^^^^^^^^^^^^^^^^^ -* a slightly opinionated autopep8 run - `PR #870 `_ - - -Testing: -^^^^^^^^ -* 3.7 for travis - `PR #864 `_ - - -Documentation Fixes: -^^^^^^^^^^^^^^^^^^^^ -* Fix a doc string in the query module - `PR #976 `_ - -Integrade JSON-LD into RDFLib: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`PR #1354 `_ diff --git a/docs/upgrade6to7.md b/docs/upgrade6to7.md new file mode 100644 index 000000000..0cba20e22 --- /dev/null +++ b/docs/upgrade6to7.md @@ -0,0 +1,36 @@ +# Upgrading from version 6 to 7 + +## Python version + +RDFLib 7 requires Python 3.8.1 or later. + +## New behaviour for `publicID` in `parse` methods + +Before version 7, the `publicID` argument to the [`parse()`][rdflib.graph.ConjunctiveGraph.parse] and [`parse()`][rdflib.graph.Dataset.parse] methods was used as the name for the default graph, and triples from the default graph in a source were loaded into the graph +named `publicID`. + +In version 7, the `publicID` argument is only used as the base URI for relative URI resolution as defined in [IETF RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986#section-5.1.4). + +To accommodate this change, ensure that use of the `publicID` argument is consistent with the new behaviour. + +If you want to load triples from a format that does not support named graphs into a named graph, use the following code: + +```python +from rdflib import ConjunctiveGraph + +cg = ConjunctiveGraph() +cg.get_context("example:graph_name").parse("http://example.com/source.ttl", format="turtle") +``` + +If you want to move triples from the default graph into a named graph, use the following code: + +```python +from rdflib import ConjunctiveGraph + +cg = ConjunctiveGraph() +cg.parse("http://example.com/source.trig", format="trig") +destination_graph = cg.get_context("example:graph_name") +for triple in cg.default_context.triples((None, None, None)): + destination_graph.add(triple) + cg.default_context.remove(triple) +``` diff --git a/docs/upgrade6to7.rst b/docs/upgrade6to7.rst deleted file mode 100644 index d687634d5..000000000 --- a/docs/upgrade6to7.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. _upgrade6to7: Upgrading from RDFLib version 6 to 7 - -============================================ -Upgrading from version 6 to 7 -============================================ - -Python version ----------------------------------------------------- - -RDFLib 7 requires Python 3.8.1 or later. - -New behaviour for ``publicID`` in ``parse`` methods. ----------------------------------------------------- - -Before version 7, the ``publicID`` argument to the -:meth:`rdflib.graph.ConjunctiveGraph.parse` and -:meth:`rdflib.graph.Dataset.parse` methods was used as the name for the default -graph, and triples from the default graph in a source were loaded into the graph -named ``publicID``. - -In version 7, the ``publicID`` argument is only used as the base URI for relative -URI resolution as defined in `IETF RFC 3986 -`_. - -To accommodate this change, ensure that use of the ``publicID`` argument is -consistent with the new behaviour. - -If you want to load triples from a format that does not support named graphs -into a named graph, use the following code: - -.. code-block:: python - - from rdflib import ConjunctiveGraph - - cg = ConjunctiveGraph() - cg.get_context("example:graph_name").parse("http://example.com/source.ttl", format="turtle") - -If you want to move triples from the default graph into a named graph, use the -following code: - -.. code-block:: python - - from rdflib import ConjunctiveGraph - - cg = ConjunctiveGraph() - cg.parse("http://example.com/source.trig", format="trig") - destination_graph = cg.get_context("example:graph_name") - for triple in cg.default_context.triples((None, None, None)): - destination_graph.add(triple) - cg.default_context.remove(triple) diff --git a/docs/upgrade7to8.md b/docs/upgrade7to8.md new file mode 100644 index 000000000..ecc9aa525 --- /dev/null +++ b/docs/upgrade7to8.md @@ -0,0 +1,9 @@ +# Upgrading from version 7 to 8 + +!!! warning "In Development" + This page is in development and will only be completed with the release of RDFLib v8. + +## Python version + +RDFLib 7 requires Python 3.9 or later. + diff --git a/docs/utilities.md b/docs/utilities.md new file mode 100644 index 000000000..46d2813ba --- /dev/null +++ b/docs/utilities.md @@ -0,0 +1,146 @@ +# Utilities & convenience functions + +For RDF programming, RDFLib and Python may not be the fastest tools, but we try hard to make them the easiest and most convenient to use and thus the *fastest* overall! + +This is a collection of hints and pointers for hassle-free RDF coding. + +## Functional properties + +Use [`value()`][rdflib.graph.Graph.value] and [`set()`][rdflib.graph.Graph.set] to work with *functional property* instances, i.e. properties than can only occur once for a resource. + +```python +from rdflib import Graph, URIRef, Literal, BNode +from rdflib.namespace import FOAF, RDF + +g = Graph() +g.bind("foaf", FOAF) + +# Add demo data +bob = URIRef("http://example.org/people/Bob") +g.add((bob, RDF.type, FOAF.Person)) +g.add((bob, FOAF.name, Literal("Bob"))) +g.add((bob, FOAF.age, Literal(38))) + +# To get a single value, use 'value' +print(g.value(bob, FOAF.age)) +# prints: 38 + +# To change a single of value, use 'set' +g.set((bob, FOAF.age, Literal(39))) +print(g.value(bob, FOAF.age)) +# prints: 39 +``` + +## Slicing graphs + +Python allows slicing arrays with a `slice` object, a triple of `start`, `stop` and `step-size`: + +```python +for i in range(20)[2:9:3]: + print(i) +# prints: +# 2, 5, 8 +``` + +RDFLib graphs override `__getitem__` and we pervert the slice triple to be a RDF triple instead. This lets slice syntax be a shortcut for [`triples()`][rdflib.graph.Graph.triples], [`subject_predicates()`][rdflib.graph.Graph.subject_predicates], [`__contains__()`][rdflib.graph.Graph.__contains__], and other Graph query-methods: + +```python +from rdflib import Graph, URIRef, Literal, BNode +from rdflib.namespace import FOAF, RDF + +g = Graph() +g.bind("foaf", FOAF) + +# Add demo data +bob = URIRef("http://example.org/people/Bob") +bill = URIRef("http://example.org/people/Bill") +g.add((bob, RDF.type, FOAF.Person)) +g.add((bob, FOAF.name, Literal("Bob"))) +g.add((bob, FOAF.age, Literal(38))) +g.add((bob, FOAF.knows, bill)) + +print(g[:]) +# same as +print(iter(g)) + +print(g[bob]) +# same as +print(g.predicate_objects(bob)) + +print(g[bob: FOAF.knows]) +# same as +print(g.objects(bob, FOAF.knows)) + +print(g[bob: FOAF.knows: bill]) +# same as +print((bob, FOAF.knows, bill) in g) + +print(g[:FOAF.knows]) +# same as +print(g.subject_objects(FOAF.knows)) +``` + +See [`examples.slice`][examples.slice] for a complete example. + +!!! warning "Slicing Caution" + Slicing is convenient for run-once scripts for playing around + in the Python `REPL`, however since slicing returns + tuples of varying length depending on which parts of the + slice are bound, you should be careful using it in more + complicated programs. If you pass in variables, and they are + `None` or `False`, you may suddenly get a generator of + different length tuples back than you expect. + +## SPARQL Paths + +[SPARQL property paths](http://www.w3.org/TR/sparql11-property-paths/) are possible using overridden operators on URIRefs. See [`examples.foafpaths`][examples.foafpaths] and [`rdflib.paths`][rdflib.paths]. + +## Serializing a single term to N3 + +For simple output, or simple serialisation, you often want a nice +readable representation of a term. All terms (URIRef, Literal etc.) have a +`n3`, method, which will return a suitable N3 format: + +```python +from rdflib import Graph, URIRef, Literal +from rdflib.namespace import FOAF + +# A URIRef +person = URIRef("http://xmlns.com/foaf/0.1/Person") +print(person.n3()) +# prints: + +# Simplifying the output with a namespace prefix: +g = Graph() +g.bind("foaf", FOAF) + +print(person.n3(g.namespace_manager)) +# prints foaf:Person + +# A typed literal +l = Literal(2) +print(l.n3()) +# prints "2"^^ + +# Simplifying the output with a namespace prefix +# XSD is built in, so no need to bind() it! +l.n3(g.namespace_manager) +# prints: "2"^^xsd:integer +``` + +## Parsing data from a string + +You can parse data from a string with the `data` param: + +```python +from rdflib import Graph + +g = Graph().parse(data=" .") +for r in g.triples((None, None, None)): + print(r) +# prints: (rdflib.term.URIRef('a:'), rdflib.term.URIRef('p:'), rdflib.term.URIRef('p:')) +``` + +## Command Line tools + +RDFLib includes a handful of commandline tools, see [`rdflib.tools`][rdflib.tools]. diff --git a/docs/utilities.rst b/docs/utilities.rst deleted file mode 100644 index 381f9070b..000000000 --- a/docs/utilities.rst +++ /dev/null @@ -1,166 +0,0 @@ -Utilities & convenience functions -================================= - -For RDF programming, RDFLib and Python may not be the fastest tools, -but we try hard to make them the easiest and most convenient to use and thus the *fastest* overall! - -This is a collection of hints and pointers for hassle-free RDF coding. - -Functional properties ---------------------- - -Use :meth:`~rdflib.graph.Graph.value` and -:meth:`~rdflib.graph.Graph.set` to work with *functional -property* instances, i.e. properties than can only occur once for a resource. - -.. code-block:: python - - from rdflib import Graph, URIRef, Literal, BNode - from rdflib.namespace import FOAF, RDF - - g = Graph() - g.bind("foaf", FOAF) - - # Add demo data - bob = URIRef("http://example.org/people/Bob") - g.add((bob, RDF.type, FOAF.Person)) - g.add((bob, FOAF.name, Literal("Bob"))) - g.add((bob, FOAF.age, Literal(38))) - - # To get a single value, use 'value' - print(g.value(bob, FOAF.age)) - # prints: 38 - - # To change a single of value, use 'set' - g.set((bob, FOAF.age, Literal(39))) - print(g.value(bob, FOAF.age)) - # prints: 39 - - -Slicing graphs --------------- - -Python allows slicing arrays with a ``slice`` object, a triple of -``start``, ``stop`` and ``step-size``: - -.. code-block:: python - - for i in range(20)[2:9:3]: - print(i) - # prints: - # 2, 5, 8 - - -RDFLib graphs override ``__getitem__`` and we pervert the slice triple -to be a RDF triple instead. This lets slice syntax be a shortcut for -:meth:`~rdflib.graph.Graph.triples`, -:meth:`~rdflib.graph.Graph.subject_predicates`, -:meth:`~rdflib.graph.Graph.__contains__`, and other Graph query-methods: - -.. code-block:: python - - from rdflib import Graph, URIRef, Literal, BNode - from rdflib.namespace import FOAF, RDF - - g = Graph() - g.bind("foaf", FOAF) - - # Add demo data - bob = URIRef("http://example.org/people/Bob") - bill = URIRef("http://example.org/people/Bill") - g.add((bob, RDF.type, FOAF.Person)) - g.add((bob, FOAF.name, Literal("Bob"))) - g.add((bob, FOAF.age, Literal(38))) - g.add((bob, FOAF.knows, bill)) - - print(g[:]) - # same as - print(iter(g)) - - print(g[bob]) - # same as - print(g.predicate_objects(bob)) - - print(g[bob: FOAF.knows]) - # same as - print(g.objects(bob, FOAF.knows)) - - print(g[bob: FOAF.knows: bill]) - # same as - print((bob, FOAF.knows, bill) in g) - - print(g[:FOAF.knows]) - # same as - print(g.subject_objects(FOAF.knows)) - - -See :mod:`examples.slice` for a complete example. - -.. note:: Slicing is convenient for run-once scripts for playing around - in the Python ``REPL``, however since slicing returns - tuples of varying length depending on which parts of the - slice are bound, you should be careful using it in more - complicated programs. If you pass in variables, and they are - ``None`` or ``False``, you may suddenly get a generator of - different length tuples back than you expect. - -SPARQL Paths ------------- - -`SPARQL property paths -`_ are possible using -overridden operators on URIRefs. See :mod:`examples.foafpaths` and -:mod:`rdflib.paths`. - -Serializing a single term to N3 -------------------------------- - -For simple output, or simple serialisation, you often want a nice -readable representation of a term. All terms (URIRef, Literal etc.) have a -``n3``, method, which will return a suitable N3 format: - -.. code-block:: python - - from rdflib import Graph, URIRef, Literal - from rdflib.namespace import FOAF - - # A URIRef - person = URIRef("http://xmlns.com/foaf/0.1/Person") - print(person.n3()) - # prints: - - # Simplifying the output with a namespace prefix: - g = Graph() - g.bind("foaf", FOAF) - - print(person.n3(g.namespace_manager)) - # prints foaf:Person - - # A typed literal - l = Literal(2) - print(l.n3()) - # prints "2"^^ - - # Simplifying the output with a namespace prefix - # XSD is built in, so no need to bind() it! - l.n3(g.namespace_manager) - # prints: "2"^^xsd:integer - -Parsing data from a string --------------------------- - -You can parse data from a string with the ``data`` param: - -.. code-block:: python - - from rdflib import Graph - - g = Graph().parse(data=" .") - for r in g.triples((None, None, None)): - print(r) - # prints: (rdflib.term.URIRef('a:'), rdflib.term.URIRef('p:'), rdflib.term.URIRef('p:')) - -Command Line tools ------------------- - -RDFLib includes a handful of commandline tools, see :mod:`rdflib.tools`. diff --git a/examples/__init__.py b/examples/__init__.py index e69de29bb..02b536058 100644 --- a/examples/__init__.py +++ b/examples/__init__.py @@ -0,0 +1 @@ +"""These examples all live in `./examples` in the source-distribution of RDFLib.""" diff --git a/examples/conjunctive_graphs.py b/examples/conjunctive_graphs.py index 433a843f4..310ff3c44 100644 --- a/examples/conjunctive_graphs.py +++ b/examples/conjunctive_graphs.py @@ -1,6 +1,6 @@ """ An RDFLib ConjunctiveGraph is an (unnamed) aggregation of all the Named Graphs -within a Store. The :meth:`~rdflib.graph.ConjunctiveGraph.get_context` +within a Store. The [`ConjunctiveGraph.get_context`][rdflib.graph.ConjunctiveGraph.get_context] method can be used to get a particular named graph for use, such as to add triples to, or the default graph can be used. diff --git a/examples/custom_datatype.py b/examples/custom_datatype.py index 46f2a5f23..197578b96 100644 --- a/examples/custom_datatype.py +++ b/examples/custom_datatype.py @@ -4,7 +4,7 @@ Mapping for integers, floats, dateTimes, etc. are already added, but you can also add your own. -This example shows how :meth:`rdflib.term.bind` lets you register new +This example shows how [`bind`][rdflib.term.bind] lets you register new mappings between literal datatypes and Python objects """ diff --git a/examples/custom_eval.py b/examples/custom_eval.py index 32c268606..ae5c9f93d 100644 --- a/examples/custom_eval.py +++ b/examples/custom_eval.py @@ -2,18 +2,11 @@ This example shows how a custom evaluation function can be added to handle certain SPARQL Algebra elements. -A custom function is added that adds ``rdfs:subClassOf`` "inference" when -asking for ``rdf:type`` triples. +A custom function is added that adds `rdfs:subClassOf` "inference" when +asking for `rdf:type` triples. Here the custom eval function is added manually, normally you would use -setuptools and entry_points to do it: -i.e. in your setup.py:: - - entry_points = { - 'rdf.plugins.sparqleval': [ - 'myfunc = mypackage:MyFunction', - ], - } +entry points to do it. See the [Plugins Usage Documentation](../plugins.md). """ from pathlib import Path diff --git a/examples/datasets.py b/examples/datasets.py index d550775a1..7dfc4e48b 100644 --- a/examples/datasets.py +++ b/examples/datasets.py @@ -1,13 +1,23 @@ """ -An RDFLib Dataset is a slight extension to ConjunctiveGraph: it uses simpler terminology -and has a few additional convenience methods, for example add() can be used to -add quads directly to a specific Graph within the Dataset. +This file contains a number of common tasks using the RDFLib Dataset class. -This example file shows how to declare a Dataset, add content to it, serialise it, query it -and remove things from it. +An RDFLib Dataset is an object that stores multiple Named Graphs - instances of RDFLib +Graph identified by IRI - within it and allows whole-of-dataset or single Graph use. + +Dataset extends Graph's Subject, Predicate, Object structure to include Graph - +archaically called Context - producing quads of s, p, o, g. + +There is an older implementation of a Dataset-like class in RDFLib < 7.x called +ConjunctiveGraph that is now deprecated. + +Sections in this file: + +1. Creating & Adding +2. Looping & Counting +3. Manipulating Graphs """ -from rdflib import Dataset, Literal, Namespace, URIRef +from rdflib import Dataset, Graph, Literal, URIRef # Note regarding `mypy: ignore_errors=true`: # @@ -19,41 +29,48 @@ # mypy: ignore_errors=true -# -# Create & Add -# +####################################################################################### +# 1. Creating & Adding +####################################################################################### # Create an empty Dataset d = Dataset() + # Add a namespace prefix to it, just like for Graph -d.bind("ex", Namespace("http://example.com/")) +d.bind("ex", "http://example.com/") -# Declare a Graph URI to be used to identify a Graph -graph_1 = URIRef("http://example.com/graph-1") +# Declare a Graph identifier to be used to identify a Graph +# A string or a URIRef may be used, but safer to always use a URIRef for usage consistency +graph_1_id = URIRef("http://example.com/graph-1") -# Add an empty Graph, identified by graph_1, to the Dataset -d.graph(identifier=graph_1) +# Add an empty Graph, identified by graph_1_id, to the Dataset +d.graph(identifier=graph_1_id) -# Add two quads to Graph graph_1 in the Dataset +# Add two quads to the Dataset which are triples + graph ID +# These insert the triple into the GRaph specified by the ID d.add( ( URIRef("http://example.com/subject-x"), URIRef("http://example.com/predicate-x"), Literal("Triple X"), - graph_1, + graph_1_id, ) ) + d.add( ( URIRef("http://example.com/subject-z"), URIRef("http://example.com/predicate-z"), Literal("Triple Z"), - graph_1, + graph_1_id, ) ) -# Add another quad to the Dataset to a non-existent Graph: -# the Graph is created automatically +# We now have 2 distinct quads in the Dataset to the Dataset has a length of 2 +assert len(d) == 2 + +# Add another quad to the Dataset specifying a non-existent Graph. +# The Graph is created automatically d.add( ( URIRef("http://example.com/subject-y"), @@ -63,8 +80,15 @@ ) ) -# printing the Dataset like this: print(d.serialize(format="trig")) -# produces a result like this: +assert len(d) == 3 + + +# You can print the Dataset like you do a Graph but you must specify a quads format like +# 'trig' or 'trix', not 'turtle', unless the default_union parameter is set to True, and +# then you can print the entire Dataset in triples. +# print(d.serialize(format="trig").strip()) + +# you should see something like this: """ @prefix ex: . @@ -78,85 +102,278 @@ ex:subject-y ex:predicate-y "Triple Y" . } """ -print("Printing Serialised Dataset:") -print("---") -print(d.serialize(format="trig")) -print("---") -print() -print() -# -# Use & Query -# -# print the length of the Dataset, i.e. the count of all triples in all Graphs -# we should get +# Print out one graph in the Dataset, using a standard Graph serialization format - longturtle +print(d.get_graph(URIRef("http://example.com/graph-2")).serialize(format="longturtle")) + +# you should see something like this: """ -3 +PREFIX ex: + +ex:subject-y + ex:predicate-y "Triple Y" ; +. """ -print("Printing Dataset Length:") -print("---") -print(len(d)) -print("---") -print() -print() -# Query one graph in the Dataset for all its triples -# we should get + +####################################################################################### +# 2. Looping & Counting +####################################################################################### + +# Loop through all quads in the dataset +for s, p, o, g in d.quads((None, None, None, None)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: """ -(rdflib.term.URIRef('http://example.com/subject-z'), rdflib.term.URIRef('http://example.com/predicate-z'), rdflib.term.Literal('Triple Z')) -(rdflib.term.URIRef('http://example.com/subject-x'), rdflib.term.URIRef('http://example.com/predicate-x'), rdflib.term.Literal('Triple X')) +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 """ -print("Printing all triple from one Graph in the Dataset:") -print("---") -for triple in d.triples((None, None, None, graph_1)): # type: ignore[arg-type] - print(triple) -print("---") -print() -print() -# Query the union of all graphs in the dataset for all triples -# we should get nothing: +# Loop through all the quads in one Graph - just constrain the Graph field +for s, p, o, g in d.quads((None, None, None, graph_1_id)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: """ +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 """ -# A Dataset's default union graph does not exist by default (default_union property is False) -print("Attempt #1 to print all triples in the Dataset:") -print("---") -for triple in d.triples((None, None, None, None)): - print(triple) -print("---") -print() -print() -# Set the Dataset's default_union property to True and re-query +# Looping through triples in one Graph still works too +for s, p, o in d.triples((None, None, None, graph_1_id)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +""" + +# Looping through triples across the whole Dataset will produce nothing +# unless we set the default_union parameter to True, since each triple is in a Named Graph + +# Setting the default_union parameter to True essentially presents all triples in all +# Graphs as a single Graph d.default_union = True -print("Attempt #2 to print all triples in the Dataset:") -print("---") -for triple in d.triples((None, None, None, None)): - print(triple) -print("---") -print() -print() +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y +""" -# -# Remove -# +# You can still loop through all quads now with the default_union parameter to True +for s, p, o, g in d.quads((None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: +""" +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +""" + +# Adding a triple in graph-1 to graph-2 increases the number of distinct of quads in +# the Dataset +d.add( + ( + URIRef("http://example.com/subject-z"), + URIRef("http://example.com/predicate-z"), + Literal("Triple Z"), + URIRef("http://example.com/graph-2"), + ) +) + +for s, p, o, g in d.quads((None, None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this, with the 'Z' triple in graph-1 and graph-2: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 +""" + +# but the 'length' of the Dataset is still only 3 as only distinct triples are counted +assert len(d) == 3 + + +# Looping through triples sees the 'Z' triple only once +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y +""" + +####################################################################################### +# 3. Manipulating Graphs +####################################################################################### + +# List all the Graphs in the Dataset +for x in d.graphs(): + print(x) + +# this returns the graphs, something like: +""" + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. +""" + +# So try this +for x in d.graphs(): + print(x.identifier) + +# you should see something like this, noting the default, currently empty, graph: +""" +urn:x-rdflib:default +http://example.com/graph-2 +http://example.com/graph-1 +""" -# Remove Graph graph_1 from the Dataset -d.remove_graph(graph_1) +# To add to the default Graph, just add a triple, not a quad, to the Dataset directly +d.add( + ( + URIRef("http://example.com/subject-n"), + URIRef("http://example.com/predicate-n"), + Literal("Triple N"), + ) +) +for s, p, o, g in d.quads((None, None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this, noting the triple in the default Graph: +""" +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-n, http://example.com/predicate-n, Triple N, urn:x-rdflib:default +""" + +# Loop through triples per graph +for x in d.graphs(): + print(x.identifier) + for s, p, o in x.triples((None, None, None)): + print(f"\t{s}, {p}, {o}") -# printing the Dataset like this: print(d.serialize(format="trig")) -# now produces a result like this: +# you should see something like this: +""" +urn:x-rdflib:default + http://example.com/subject-n, http://example.com/predicate-n, Triple N +http://example.com/graph-1 + http://example.com/subject-x, http://example.com/predicate-x, Triple X + http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/graph-2 + http://example.com/subject-y, http://example.com/predicate-y, Triple Y + http://example.com/subject-z, http://example.com/predicate-z, Triple Z +""" +# The default_union parameter includes all triples in the Named Graphs and the Default Graph +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-n, http://example.com/predicate-n, Triple N +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y """ + +# To remove a graph +d.remove_graph(graph_1_id) + +# To remove the default graph +d.remove_graph(URIRef("urn:x-rdflib:default")) + +# print what's left - one graph, graph-2 +print(d.serialize(format="trig")) + +# you should see something like this: +""" +@prefix ex: . + ex:graph-2 { ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} +""" + +# To add a Graph that already exists, you must give it an Identifier or else it will be assigned a Blank Node ID +g_with_id = Graph(identifier=URIRef("http://example.com/graph-3")) +g_with_id.bind("ex", "http://example.com/") + +# Add a distinct triple to the exiting Graph, using Namepspace IRI shortcuts +# g_with_id.bind("ex", "http://example.com/") +g_with_id.add( + ( + URIRef("http://example.com/subject-k"), + URIRef("http://example.com/predicate-k"), + Literal("Triple K"), + ) +) +d.add_graph(g_with_id) +print(d.serialize(format="trig")) + +# you should see something like this: +""" +@prefix ex: . + +ex:graph-3 { + ex:subject_k ex:predicate_k "Triple K" . +} + +ex:graph-2 { + ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} +""" + +# If you add a Graph with no specified identifier... +g_no_id = Graph() +g_no_id.bind("ex", "http://example.com/") + +g_no_id.add( + ( + URIRef("http://example.com/subject-l"), + URIRef("http://example.com/predicate-l"), + Literal("Triple L"), + ) +) +d.add_graph(g_no_id) + +# now when we print it, we will see a Graph with a Blank Node id: +print(d.serialize(format="trig")) + +# you should see somthing like this, but with a different Blank Node ID , as this is rebuilt each code execution +""" +@prefix ex: . + +ex:graph-3 { + ex:subject-k ex:predicate-k "Triple K" . +} + +ex:graph-2 { + ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} + +_:N9cc8b54c91724e31896da5ce41e0c937 { + ex:subject-l ex:predicate-l "Triple L" . } """ -print("Printing Serialised Dataset after graph_1 removal:") -print("---") -print(d.serialize(format="trig").strip()) -print("---") -print() -print() diff --git a/examples/foafpaths.py b/examples/foafpaths.py index db34fb316..152b4deaa 100644 --- a/examples/foafpaths.py +++ b/examples/foafpaths.py @@ -5,23 +5,20 @@ We overload some Python operators on URIRefs to allow creating path operators directly in Python. -============ ========================================= -Operator Path -============ ========================================= -``p1 / p2`` Path sequence -``p1 | p2`` Path alternative -``p1 * '*'`` chain of 0 or more p's -``p1 * '+'`` chain of 1 or more p's -``p1 * '?'`` 0 or 1 p -``~p1`` p1 inverted, i.e. (s p1 o) <=> (o ~p1 s) -``-p1`` NOT p1, i.e. any property but p1 -============ ========================================= - - -These can then be used in property position for ``s,p,o`` triple queries +| Operator | Path | +|-------------|----------------------------------------------------| +| `p1 / p2` | Path sequence | +| `p1 | p2` | Path alternative | +| `p1 * '*'` | Chain of 0 or more p's | +| `p1 * '+'` | Chain of 1 or more p's | +| `p1 * '?'` | 0 or 1 p | +| `~p1` | p1 inverted, i.e. `(s p1 o)` ⇔ `(o ~p1 s)` | +| `-p1` | NOT p1, i.e. any property but p1 | + +These can then be used in property position for `s,p,o` triple queries for any graph method. -See the docs for :mod:`rdflib.paths` for the details. +See the docs for [`paths`][rdflib.paths] for the details. This example shows how to get the name of friends (i.e values two steps away x knows y, y name z) with a single query. """ diff --git a/examples/prepared_query.py b/examples/prepared_query.py index 035c6137d..a297bcbe9 100644 --- a/examples/prepared_query.py +++ b/examples/prepared_query.py @@ -1,11 +1,11 @@ """ SPARQL Queries be prepared (i.e parsed and translated to SPARQL algebra) -by the :meth:`rdflib.plugins.sparql.prepareQuery` method. +by the [`prepareQuery`][rdflib.plugins.sparql.prepareQuery] method. -``initNs`` can be used instead of PREFIX values. +`initNs` can be used instead of PREFIX values. When executing, variables can be bound with the -``initBindings`` keyword parameter. +`initBindings` keyword parameter. """ from pathlib import Path diff --git a/examples/resource_example.py b/examples/resource_example.py index da93042fa..ecb7937de 100644 --- a/examples/resource_example.py +++ b/examples/resource_example.py @@ -1,10 +1,10 @@ """ -RDFLib has a :class:`~rdflib.resource.Resource` class, for a resource-centric API. -The :class:`~rdflib.Graph` class also has a ``resource`` function that can be used +RDFLib has a [`Resource`][rdflib.resource.Resource] class, for a resource-centric API. +The [`Graph`][rdflib.Graph] class also has a `resource` function that can be used to create resources and manipulate them by quickly adding or querying for triples where this resource is the subject. -This example shows g.resource() in action. +This example shows `g.resource()` in action. """ from rdflib import RDF, RDFS, Graph, Literal diff --git a/examples/secure_with_audit.py b/examples/secure_with_audit.py index 2bd4e28fb..20a6def20 100644 --- a/examples/secure_with_audit.py +++ b/examples/secure_with_audit.py @@ -1,10 +1,9 @@ """ -This example demonstrates how to use `Python audit hooks -`_ to block access +This example demonstrates how to use [Python audit hooks](https://docs.python.org/3/library/sys.html#sys.addaudithook) to block access to files and URLs. -It installs a audit hook with `sys.addaudithook `_ that blocks access to files and -URLs that end with ``blocked.jsonld``. +It installs a audit hook with [sys.addaudithook](https://docs.python.org/3/library/sys.html#sys.addaudithook) that blocks access to files and +URLs that end with `blocked.jsonld`. The code in the example then verifies that the audit hook is blocking access to URLs and files as expected. @@ -15,23 +14,28 @@ import logging import os import sys -from typing import Any, Optional, Tuple +from typing import Any from rdflib import Graph -def audit_hook(name: str, args: Tuple[Any, ...]) -> None: +def audit_hook(name: str, args: tuple[Any, ...]) -> None: """ An audit hook that blocks access when an attempt is made to open a - file or URL that ends with ``blocked.jsonld``. + file or URL that ends with `blocked.jsonld`. - Details of the audit events can be seen in the `audit events - table `_. + Details of the audit events can be seen in the + [audit events table](https://docs.python.org/3/library/audit_events.html). - :param name: The name of the audit event. - :param args: The arguments of the audit event. - :return: `None` if the audit hook does not block access. - :raises PermissionError: If the file or URL being accessed ends with ``blocked.jsonld``. + Args: + name: The name of the audit event. + args: The arguments of the audit event. + + Returns: + `None` if the audit hook does not block access. + + Raises: + PermissionError: If the file or URL being accessed ends with `blocked.jsonld`. """ if name == "urllib.Request" and args[0].endswith("blocked.jsonld"): raise PermissionError("Permission denied for URL") @@ -70,7 +74,7 @@ def main() -> None: # Attempt to parse a JSON-LD document that will result in the blocked URL # being accessed. - error: Optional[PermissionError] = None + error: PermissionError | None = None try: graph.parse( data=r"""{ diff --git a/examples/secure_with_urlopen.py b/examples/secure_with_urlopen.py index 005504796..aadbf340a 100644 --- a/examples/secure_with_urlopen.py +++ b/examples/secure_with_urlopen.py @@ -8,7 +8,6 @@ import logging import os import sys -from typing import Optional from urllib.request import HTTPHandler, OpenerDirector, Request, install_opener from rdflib import Graph @@ -23,9 +22,14 @@ def http_open(self, req: Request) -> http.client.HTTPResponse: """ Block access to URLs that end with "blocked.jsonld". - :param req: The request to open. - :return: The response. - :raises PermissionError: If the URL ends with "blocked.jsonld". + Args: + req: The request to open. + + Returns: + The response. + + Raises: + PermissionError: If the URL ends with "blocked.jsonld". """ if req.get_full_url().endswith("blocked.jsonld"): raise PermissionError("Permission denied for URL") @@ -61,7 +65,7 @@ def main() -> None: # Attempt to parse a JSON-LD document that will result in the blocked URL # being accessed. - error: Optional[PermissionError] = None + error: PermissionError | None = None try: graph.parse( data=r"""{ diff --git a/examples/slice.py b/examples/slice.py index 6994613e6..82474e18b 100644 --- a/examples/slice.py +++ b/examples/slice.py @@ -3,10 +3,10 @@ This is a short-hand for iterating over triples. -Combined with SPARQL paths (see ``foafpaths.py``) - quite complex queries +Combined with SPARQL paths (see `foafpaths.py`) - quite complex queries can be realised. -See :meth:`rdflib.graph.Graph.__getitem__` for details +See [`Graph.__getitem__`][rdflib.graph.Graph.__getitem__] for details """ from pathlib import Path diff --git a/examples/smushing.py b/examples/smushing.py index 88d68a520..701993abb 100644 --- a/examples/smushing.py +++ b/examples/smushing.py @@ -1,22 +1,22 @@ """ A FOAF smushing example. -Filter a graph by normalizing all ``foaf:Persons`` into URIs based on -their ``mbox_sha1sum``. +Filter a graph by normalizing all `foaf:Persons` into URIs based on +their `mbox_sha1sum`. -Suppose I get two `FOAF `_ documents each -talking about the same person (according to ``mbox_sha1sum``) but they -each used a :class:`rdflib.term.BNode` for the subject. For this demo +Suppose I get two [FOAF](http://xmlns.com/foaf/0.1) documents each +talking about the same person (according to `mbox_sha1sum`) but they +each used a [`BNode`][rdflib.term.BNode] for the subject. For this demo I've combined those two documents into one file: This filters a graph by changing every subject with a -``foaf:mbox_sha1sum`` into a new subject whose URI is based on the -``sha1sum``. This new graph might be easier to do some operations on. +`foaf:mbox_sha1sum` into a new subject whose URI is based on the +`sha1sum`. This new graph might be easier to do some operations on. An advantage of this approach over other methods for collapsing BNodes is that I can incrementally process new FOAF documents as they come in without having to access my ever-growing archive. Even if another -``65b983bb397fb71849da910996741752ace8369b`` document comes in next +`65b983bb397fb71849da910996741752ace8369b` document comes in next year, I would still give it the same stable subject URI that merges with my existing data. """ diff --git a/examples/sparql_query_example.py b/examples/sparql_query_example.py index 0e9fc225c..29fef43c7 100644 --- a/examples/sparql_query_example.py +++ b/examples/sparql_query_example.py @@ -1,14 +1,14 @@ """ -SPARQL Query using :meth:`rdflib.graph.Graph.query` +SPARQL Query using [`Graph.query`][rdflib.graph.Graph.query] -The method returns a :class:`~rdflib.query.Result`, iterating over -this yields :class:`~rdflib.query.ResultRow` objects +The method returns a [`Result`][rdflib.query.Result], iterating over +this yields [`ResultRow`][rdflib.query.ResultRow] objects The variable bindings can be accessed as attributes of the row objects For variable names that are not valid python identifiers, dict access -(i.e. with ``row[var] / __getitem__``) is also possible. +(i.e. with `row[var] / __getitem__`) is also possible. -:attr:`~rdflib.query.Result.vars` contains the variables +[`Result.vars`][rdflib.query.Result.vars] contains the variables """ import logging diff --git a/examples/sparql_update_example.py b/examples/sparql_update_example.py index a99749962..f5c02b335 100644 --- a/examples/sparql_update_example.py +++ b/examples/sparql_update_example.py @@ -1,5 +1,5 @@ """ -SPARQL Update statements can be applied with :meth:`rdflib.graph.Graph.update` +SPARQL Update statements can be applied with [`Graph.update`][rdflib.graph.Graph.update] """ from pathlib import Path diff --git a/examples/transitive.py b/examples/transitive.py index 800cbc80c..9c4708992 100644 --- a/examples/transitive.py +++ b/examples/transitive.py @@ -1,45 +1,45 @@ """ An example illustrating how to use the -:meth:`~rdflib.graph.Graph.transitive_subjects` and -:meth:`~rdflib.graph.Graph.transitive_objects` graph methods +[`Graph.transitive_subjects`][rdflib.graph.Graph.transitive_subjects] and +[`Graph.transitive_objects`][rdflib.graph.Graph.transitive_objects] graph methods -Formal definition -^^^^^^^^^^^^^^^^^^ +## Formal definition -The :meth:`~rdflib.graph.Graph.transitive_objects` method finds all + +The [`Graph.transitive_objects`][rdflib.graph.Graph.transitive_objects] method finds all nodes such that there is a path from subject to one of those nodes using only the predicate property in the triples. The -:meth:`~rdflib.graph.Graph.transitive_subjects` method is similar; it +[`Graph.transitive_subjects`][rdflib.graph.Graph.transitive_subjects] method is similar; it finds all nodes such that there is a path from the node to the object using only the predicate property. -Informal description, with an example -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +## Informal description, with an example -In brief, :meth:`~rdflib.graph.Graph.transitive_objects` walks forward +In brief, [`Graph.transitive_objects`][rdflib.graph.Graph.transitive_objects] walks forward in a graph using a particular property, and -:meth:`~rdflib.graph.Graph.transitive_subjects` walks backward. A good -example uses a property ``ex:parent``, the semantics of which are +[`Graph.transitive_subjects`][rdflib.graph.Graph.transitive_subjects] walks backward. A good +example uses a property `ex:parent`, the semantics of which are biological parentage. The -:meth:`~rdflib.graph.Graph.transitive_objects` method would get all +[`Graph.transitive_objects`][rdflib.graph.Graph.transitive_objects] method would get all the ancestors of a particular person (all nodes such that there is a parent path between the person and the object). The -:meth:`~rdflib.graph.Graph.transitive_subjects` method would get all +[`Graph.transitive_subjects`][rdflib.graph.Graph.transitive_subjects] method would get all the descendants of a particular person (all nodes such that there is a parent path between the node and the person). So, say that your URI is -``ex:person``. +`ex:person`. This example would get all of your (known) ancestors, and then get all the (known) descendants of your maternal grandmother. -.. warning:: The :meth:`~rdflib.graph.Graph.transitive_objects` method has the start node - as the *first* argument, but the :meth:`~rdflib.graph.Graph.transitive_subjects` +!!! warning "Important note on arguments" + + The [`Graph.transitive_objects`][rdflib.graph.Graph.transitive_objects] method has the start node + as the *first* argument, but the [`Graph.transitive_subjects`][rdflib.graph.Graph.transitive_subjects] method has the start node as the *second* argument. -User-defined transitive closures -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +## User-defined transitive closures -The method :meth:`~rdflib.graph.Graph.transitiveClosure` returns +The method [`Graph.transitiveClosure`][rdflib.graph.Graph.transitiveClosure] returns transtive closures of user-defined functions. """ diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..ec321cfed --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,180 @@ +site_name: RDFLib +site_description: Python library for working with RDF, a simple yet powerful language for representing information. +site_author: RDFLib Team +site_url: https://rdflib.readthedocs.org +repo_name: RDFLib/rdflib +repo_url: https://github.com/RDFLib/rdflib +edit_uri: "edit/main/docs/" +copyright: Copyright © 2002 - 2025, RDFLib Team. + +# poetry run mkdocs serve -a localhost:8000 + +nav: + - Usage: + - Overview: index.md + - Getting started with RDFLib: gettingstarted.md + - Loading and saving RDF: intro_to_parsing.md + - Creating RDF triples: intro_to_creating_rdf.md + - Navigating Graphs: intro_to_graphs.md + - Querying with SPARQL: intro_to_sparql.md + - Utilities functions: utilities.md + + - In depth: + - Plugins: plugins.md + - RDF terms: rdf_terms.md + - Namespaces and Bindings: namespaces_and_bindings.md + - Persistence: persistence.md + - Merging graphs: merging.md + - Security considerations: security_considerations.md + + - Changes: + - Changelog: changelog.md + - Upgrading v7 to 8: upgrade7to8.md + - Upgrading v6 to 7: upgrade6to7.md + - Upgrading v5 to 6: upgrade5to6.md + - Upgrading v4 to 5: upgrade4to5.md + + - API Reference: + - Examples: apidocs/examples.md + - Graph: apidocs/rdflib.graph.md + - Term: apidocs/rdflib.term.md + - Namespace: apidocs/rdflib.namespace.md + - Tools: apidocs/rdflib.tools.md + - Extras: apidocs/rdflib.extras.md + - Container: apidocs/rdflib.container.md + - Collection: apidocs/rdflib.collection.md + - Paths: apidocs/rdflib.paths.md + - Plugin: apidocs/rdflib.plugin.md + - Util: apidocs/rdflib.util.md + - Plugins: + - Parsers: apidocs/rdflib.plugins.parsers.md + - Serializers: apidocs/rdflib.plugins.serializers.md + - Stores: apidocs/rdflib.plugins.stores.md + - SPARQL: apidocs/rdflib.plugins.sparql.md + + - Development: + - Contributing guide: CONTRIBUTING.md + - Developers guide: developers.md + - Documentation guide: docs.md + - Type Hints: type_hints.md + - Persisting Notation 3 Terms: persisting_n3_terms.md + - Code of Conduct: CODE_OF_CONDUCT.md + - Decision Records: decisions.md + + +theme: + name: "material" + favicon: _static/RDFlib.png + logo: _static/RDFlib.png + language: en + # Choose color: https://squidfunk.github.io/mkdocs-material/setup/changing-the-colors/#primary-color + palette: + - media: "(prefers-color-scheme: light)" + primary: blue grey + scheme: default + toggle: + icon: material/weather-night + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + primary: indigo + scheme: slate + toggle: + icon: material/weather-sunny + name: Switch to light mode + features: + - navigation.indexes + - navigation.sections + - navigation.tabs + - navigation.top + - navigation.tracking + - navigation.footer + - content.code.copy + - content.code.annotate + - content.code.select + - content.tabs.link # Group tabs switch + - content.action.edit + - content.action.view + - search.highlight + - search.share + - search.suggest + - toc.follow + - content.tooltips + # - header.autohide + # - navigation.tabs.sticky + # - navigation.expand + # - navigation.instant + + +plugins: +- search +- autorefs +- include-markdown +- gen-files: + scripts: + - docs/gen_ref_pages.py +- mkdocstrings: + default_handler: python + handlers: + python: + # https://mkdocstrings.github.io/python/reference/api/#mkdocstrings_handlers.python.PythonInputOptions + options: + docstring_style: google + docstring_options: + ignore_init_summary: true + docstring_section_style: list + filters: ["!^_[^_]"] # Exclude names starting with a single underscore + heading_level: 1 + inherited_members: false # Disable inherited members to avoid duplicates + merge_init_into_class: true + parameter_headings: true + separate_signature: true + signature_crossrefs: true + summary: true + show_bases: true + show_root_heading: true + show_root_full_path: false + show_signature_annotations: true + show_source: true + show_symbol_type_heading: true + show_symbol_type_toc: true + show_overloads: false + show_if_no_docstring: true # Showing when no docstring increases build time + +watch: + - rdflib + - docs + + +# Supported admonititions: https://squidfunk.github.io/mkdocs-material/reference/admonitions/#supported-types +markdown_extensions: + - admonition + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - pymdownx.details + - pymdownx.extra + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - attr_list + - smarty + - abbr + - pymdownx.snippets: + auto_append: + - docs/includes/abbreviations.md + + +# extra_css: +# - _static/custom.css +# extra_javascript: +# - _static/fontawesome.min.js + +extra: + social: + - icon: fontawesome/brands/python + link: https://pypi.org/project/rdflib + - icon: fontawesome/brands/github + link: https://github.com/RDFLib diff --git a/poetry.lock b/poetry.lock index c2d3eb897..39edeb198 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,69 +1,81 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] -name = "alabaster" -version = "0.7.13" -description = "A configurable sidebar-enabled Sphinx theme" +name = "babel" +version = "2.17.0" +description = "Internationalization utilities" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, - {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] +[package.extras] +dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"] + [[package]] -name = "babel" -version = "2.12.1" -description = "Internationalization utilities" +name = "backrefs" +version = "5.8" +description = "A wrapper around re and regex that adds additional back references." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, - {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, + {file = "backrefs-5.8-py310-none-any.whl", hash = "sha256:c67f6638a34a5b8730812f5101376f9d41dc38c43f1fdc35cb54700f6ed4465d"}, + {file = "backrefs-5.8-py311-none-any.whl", hash = "sha256:2e1c15e4af0e12e45c8701bd5da0902d326b2e200cafcd25e49d9f06d44bb61b"}, + {file = "backrefs-5.8-py312-none-any.whl", hash = "sha256:bbef7169a33811080d67cdf1538c8289f76f0942ff971222a16034da88a73486"}, + {file = "backrefs-5.8-py313-none-any.whl", hash = "sha256:e3a63b073867dbefd0536425f43db618578528e3896fb77be7141328642a1585"}, + {file = "backrefs-5.8-py39-none-any.whl", hash = "sha256:a66851e4533fb5b371aa0628e1fee1af05135616b86140c9d787a2ffdf4b8fdc"}, + {file = "backrefs-5.8.tar.gz", hash = "sha256:2cab642a205ce966af3dd4b38ee36009b31fa9502a35fd61d59ccc116e40a6bd"}, ] -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} +[package.extras] +extras = ["regex"] [[package]] name = "berkeleydb" -version = "18.1.10" +version = "18.1.14" description = "Python bindings for Oracle Berkeley DB" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"berkeleydb\"" files = [ - {file = "berkeleydb-18.1.10.tar.gz", hash = "sha256:426341a16007a9002d987a6f4d97226f8eafffcb1a0488488053d38a3127c81a"}, + {file = "berkeleydb-18.1.14.tar.gz", hash = "sha256:8c260282f57ebd5b9c3ce53da0eb75be5957addb303e3190935b716448f32f7d"}, ] [[package]] name = "black" -version = "24.4.2" +version = "24.10.0" description = "The uncompromising code formatter." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, - {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, - {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, - {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, - {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, - {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, - {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, - {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, - {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, - {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, - {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, - {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, - {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, - {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, - {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, - {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, - {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, - {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, - {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, - {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, - {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, - {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, ] [package.dependencies] @@ -77,16 +89,29 @@ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "bracex" +version = "2.5.post1" +description = "Bash style brace expander." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "bracex-2.5.post1-py3-none-any.whl", hash = "sha256:13e5732fec27828d6af308628285ad358047cec36801598368cb28bc631dbaf6"}, + {file = "bracex-2.5.post1.tar.gz", hash = "sha256:12c50952415bfa773d2d9ccb8e79651b8cdb1f31a42f6091b804f6ba2b4a66b6"}, +] + [[package]] name = "build" version = "1.2.2.post1" description = "A simple, correct Python build frontend" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, @@ -108,108 +133,128 @@ virtualenv = ["virtualenv (>=20.0.35)"] [[package]] name = "certifi" -version = "2023.7.22" +version = "2025.4.26" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["docs"] files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, ] [[package]] name = "charset-normalizer" -version = "3.2.0" +version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, ] [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["dev", "docs"] files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] @@ -221,90 +266,88 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev", "docs", "tests"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {dev = "platform_system == \"Windows\" or os_name == \"nt\"", tests = "sys_platform == \"win32\""} [[package]] name = "coverage" -version = "7.6.1" +version = "7.8.2" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["tests"] files = [ - {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, - {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, - {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, - {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, - {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, - {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, - {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, - {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, - {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, - {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, - {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, - {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, - {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, - {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, - {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, + {file = "coverage-7.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd8ec21e1443fd7a447881332f7ce9d35b8fbd2849e761bb290b584535636b0a"}, + {file = "coverage-7.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26c2396674816deaeae7ded0e2b42c26537280f8fe313335858ffff35019be"}, + {file = "coverage-7.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1aec326ed237e5880bfe69ad41616d333712c7937bcefc1343145e972938f9b3"}, + {file = "coverage-7.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e818796f71702d7a13e50c70de2a1924f729228580bcba1607cccf32eea46e6"}, + {file = "coverage-7.8.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:546e537d9e24efc765c9c891328f30f826e3e4808e31f5d0f87c4ba12bbd1622"}, + {file = "coverage-7.8.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab9b09a2349f58e73f8ebc06fac546dd623e23b063e5398343c5270072e3201c"}, + {file = "coverage-7.8.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fd51355ab8a372d89fb0e6a31719e825cf8df8b6724bee942fb5b92c3f016ba3"}, + {file = "coverage-7.8.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0774df1e093acb6c9e4d58bce7f86656aeed6c132a16e2337692c12786b32404"}, + {file = "coverage-7.8.2-cp310-cp310-win32.whl", hash = "sha256:00f2e2f2e37f47e5f54423aeefd6c32a7dbcedc033fcd3928a4f4948e8b96af7"}, + {file = "coverage-7.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:145b07bea229821d51811bf15eeab346c236d523838eda395ea969d120d13347"}, + {file = "coverage-7.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b99058eef42e6a8dcd135afb068b3d53aff3921ce699e127602efff9956457a9"}, + {file = "coverage-7.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5feb7f2c3e6ea94d3b877def0270dff0947b8d8c04cfa34a17be0a4dc1836879"}, + {file = "coverage-7.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:670a13249b957bb9050fab12d86acef7bf8f6a879b9d1a883799276e0d4c674a"}, + {file = "coverage-7.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bdc8bf760459a4a4187b452213e04d039990211f98644c7292adf1e471162b5"}, + {file = "coverage-7.8.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07a989c867986c2a75f158f03fdb413128aad29aca9d4dbce5fc755672d96f11"}, + {file = "coverage-7.8.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2db10dedeb619a771ef0e2949ccba7b75e33905de959c2643a4607bef2f3fb3a"}, + {file = "coverage-7.8.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e6ea7dba4e92926b7b5f0990634b78ea02f208d04af520c73a7c876d5a8d36cb"}, + {file = "coverage-7.8.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ef2f22795a7aca99fc3c84393a55a53dd18ab8c93fb431004e4d8f0774150f54"}, + {file = "coverage-7.8.2-cp311-cp311-win32.whl", hash = "sha256:641988828bc18a6368fe72355df5f1703e44411adbe49bba5644b941ce6f2e3a"}, + {file = "coverage-7.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8ab4a51cb39dc1933ba627e0875046d150e88478dbe22ce145a68393e9652975"}, + {file = "coverage-7.8.2-cp311-cp311-win_arm64.whl", hash = "sha256:8966a821e2083c74d88cca5b7dcccc0a3a888a596a04c0b9668a891de3a0cc53"}, + {file = "coverage-7.8.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e2f6fe3654468d061942591aef56686131335b7a8325684eda85dacdf311356c"}, + {file = "coverage-7.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76090fab50610798cc05241bf83b603477c40ee87acd358b66196ab0ca44ffa1"}, + {file = "coverage-7.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bd0a0a5054be160777a7920b731a0570284db5142abaaf81bcbb282b8d99279"}, + {file = "coverage-7.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da23ce9a3d356d0affe9c7036030b5c8f14556bd970c9b224f9c8205505e3b99"}, + {file = "coverage-7.8.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9392773cffeb8d7e042a7b15b82a414011e9d2b5fdbbd3f7e6a6b17d5e21b20"}, + {file = "coverage-7.8.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:876cbfd0b09ce09d81585d266c07a32657beb3eaec896f39484b631555be0fe2"}, + {file = "coverage-7.8.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3da9b771c98977a13fbc3830f6caa85cae6c9c83911d24cb2d218e9394259c57"}, + {file = "coverage-7.8.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a990f6510b3292686713bfef26d0049cd63b9c7bb17e0864f133cbfd2e6167f"}, + {file = "coverage-7.8.2-cp312-cp312-win32.whl", hash = "sha256:bf8111cddd0f2b54d34e96613e7fbdd59a673f0cf5574b61134ae75b6f5a33b8"}, + {file = "coverage-7.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:86a323a275e9e44cdf228af9b71c5030861d4d2610886ab920d9945672a81223"}, + {file = "coverage-7.8.2-cp312-cp312-win_arm64.whl", hash = "sha256:820157de3a589e992689ffcda8639fbabb313b323d26388d02e154164c57b07f"}, + {file = "coverage-7.8.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ea561010914ec1c26ab4188aef8b1567272ef6de096312716f90e5baa79ef8ca"}, + {file = "coverage-7.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cb86337a4fcdd0e598ff2caeb513ac604d2f3da6d53df2c8e368e07ee38e277d"}, + {file = "coverage-7.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26a4636ddb666971345541b59899e969f3b301143dd86b0ddbb570bd591f1e85"}, + {file = "coverage-7.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5040536cf9b13fb033f76bcb5e1e5cb3b57c4807fef37db9e0ed129c6a094257"}, + {file = "coverage-7.8.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc67994df9bcd7e0150a47ef41278b9e0a0ea187caba72414b71dc590b99a108"}, + {file = "coverage-7.8.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e6c86888fd076d9e0fe848af0a2142bf606044dc5ceee0aa9eddb56e26895a0"}, + {file = "coverage-7.8.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:684ca9f58119b8e26bef860db33524ae0365601492e86ba0b71d513f525e7050"}, + {file = "coverage-7.8.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8165584ddedb49204c4e18da083913bdf6a982bfb558632a79bdaadcdafd0d48"}, + {file = "coverage-7.8.2-cp313-cp313-win32.whl", hash = "sha256:34759ee2c65362163699cc917bdb2a54114dd06d19bab860725f94ef45a3d9b7"}, + {file = "coverage-7.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:2f9bc608fbafaee40eb60a9a53dbfb90f53cc66d3d32c2849dc27cf5638a21e3"}, + {file = "coverage-7.8.2-cp313-cp313-win_arm64.whl", hash = "sha256:9fe449ee461a3b0c7105690419d0b0aba1232f4ff6d120a9e241e58a556733f7"}, + {file = "coverage-7.8.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8369a7c8ef66bded2b6484053749ff220dbf83cba84f3398c84c51a6f748a008"}, + {file = "coverage-7.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:159b81df53a5fcbc7d45dae3adad554fdbde9829a994e15227b3f9d816d00b36"}, + {file = "coverage-7.8.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6fcbbd35a96192d042c691c9e0c49ef54bd7ed865846a3c9d624c30bb67ce46"}, + {file = "coverage-7.8.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05364b9cc82f138cc86128dc4e2e1251c2981a2218bfcd556fe6b0fbaa3501be"}, + {file = "coverage-7.8.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46d532db4e5ff3979ce47d18e2fe8ecad283eeb7367726da0e5ef88e4fe64740"}, + {file = "coverage-7.8.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4000a31c34932e7e4fa0381a3d6deb43dc0c8f458e3e7ea6502e6238e10be625"}, + {file = "coverage-7.8.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:43ff5033d657cd51f83015c3b7a443287250dc14e69910577c3e03bd2e06f27b"}, + {file = "coverage-7.8.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:94316e13f0981cbbba132c1f9f365cac1d26716aaac130866ca812006f662199"}, + {file = "coverage-7.8.2-cp313-cp313t-win32.whl", hash = "sha256:3f5673888d3676d0a745c3d0e16da338c5eea300cb1f4ada9c872981265e76d8"}, + {file = "coverage-7.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:2c08b05ee8d7861e45dc5a2cc4195c8c66dca5ac613144eb6ebeaff2d502e73d"}, + {file = "coverage-7.8.2-cp313-cp313t-win_arm64.whl", hash = "sha256:1e1448bb72b387755e1ff3ef1268a06617afd94188164960dba8d0245a46004b"}, + {file = "coverage-7.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:496948261eaac5ac9cf43f5d0a9f6eb7a6d4cb3bedb2c5d294138142f5c18f2a"}, + {file = "coverage-7.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eacd2de0d30871eff893bab0b67840a96445edcb3c8fd915e6b11ac4b2f3fa6d"}, + {file = "coverage-7.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b039ffddc99ad65d5078ef300e0c7eed08c270dc26570440e3ef18beb816c1ca"}, + {file = "coverage-7.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e49824808d4375ede9dd84e9961a59c47f9113039f1a525e6be170aa4f5c34d"}, + {file = "coverage-7.8.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b069938961dfad881dc2f8d02b47645cd2f455d3809ba92a8a687bf513839787"}, + {file = "coverage-7.8.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:de77c3ba8bb686d1c411e78ee1b97e6e0b963fb98b1637658dd9ad2c875cf9d7"}, + {file = "coverage-7.8.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1676628065a498943bd3f64f099bb573e08cf1bc6088bbe33cf4424e0876f4b3"}, + {file = "coverage-7.8.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8e1a26e7e50076e35f7afafde570ca2b4d7900a491174ca357d29dece5aacee7"}, + {file = "coverage-7.8.2-cp39-cp39-win32.whl", hash = "sha256:6782a12bf76fa61ad9350d5a6ef5f3f020b57f5e6305cbc663803f2ebd0f270a"}, + {file = "coverage-7.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1efa4166ba75ccefd647f2d78b64f53f14fb82622bc94c5a5cb0a622f50f1c9e"}, + {file = "coverage-7.8.2-pp39.pp310.pp311-none-any.whl", hash = "sha256:ec455eedf3ba0bbdf8f5a570012617eb305c63cb9f03428d39bf544cb2b94837"}, + {file = "coverage-7.8.2-py3-none-any.whl", hash = "sha256:726f32ee3713f7359696331a18daf0c3b3a70bb0ae71141b9d3c52be7c595e32"}, + {file = "coverage-7.8.2.tar.gz", hash = "sha256:a886d531373a1f6ff9fad2a2ba4a045b68467b779ae729ee0b3b10ac20033b27"}, ] [package.dependencies] @@ -314,97 +357,120 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 toml = ["tomli"] [[package]] -name = "docutils" -version = "0.20.1" -description = "Docutils -- Python Documentation Utilities" +name = "exceptiongroup" +version = "1.3.0" +description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["tests"] +markers = "python_version < \"3.11\"" files = [ - {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, - {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + +[package.extras] +test = ["pytest (>=6)"] + [[package]] -name = "exceptiongroup" -version = "1.1.3" -description = "Backport of PEP 654 (exception groups)" +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." optional = false -python-versions = ">=3.7" +python-versions = "*" +groups = ["docs"] files = [ - {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, - {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, ] +[package.dependencies] +python-dateutil = ">=2.8.1" + [package.extras] -test = ["pytest (>=6)"] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "griffe" +version = "1.7.3" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, + {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, +] + +[package.dependencies] +colorama = ">=0.4" [[package]] name = "html5rdf" -version = "1.2" +version = "1.2.1" description = "HTML parser based on the WHATWG HTML specification" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"html\"" files = [ - {file = "html5rdf-1.2-py2.py3-none-any.whl", hash = "sha256:08169aa52a98ee3a6d3456d83feb36211fb5edcbcf3e05f6d19e0136f581638c"}, - {file = "html5rdf-1.2.tar.gz", hash = "sha256:08378cbbbb63993ba7bb5eb1eac44bf9ca7b1a23dbee3d2afef5376597fb00a5"}, + {file = "html5rdf-1.2.1-py2.py3-none-any.whl", hash = "sha256:1f519121bc366af3e485310dc8041d2e86e5173c1a320fac3dc9d2604069b83e"}, + {file = "html5rdf-1.2.1.tar.gz", hash = "sha256:ace9b420ce52995bb4f05e7425eedf19e433c981dfe7a831ab391e2fa2e1a195"}, ] -[package.extras] -all = ["chardet (>=2.2.1)", "genshi (>=0.7.1)", "lxml (>=3.4.0)"] -chardet = ["chardet (>=2.2.1)"] -genshi = ["genshi (>=0.7.1)"] -lxml = ["lxml (>=3.4.0)"] - [[package]] name = "idna" -version = "3.4" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" +groups = ["docs"] files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] -[[package]] -name = "imagesize" -version = "1.4.1" -description = "Getting image size from png/jpeg/jpeg2000/gif file" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, - {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, -] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] [[package]] name = "importlib-metadata" -version = "6.8.0" +version = "8.7.0" description = "Read metadata from Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev", "docs"] files = [ - {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, - {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, ] +markers = {dev = "python_full_version < \"3.10.2\"", docs = "python_version < \"3.10\""} [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["tests"] files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, ] [[package]] @@ -413,6 +479,8 @@ version = "0.7.2" description = "An ISO 8601 date/time/duration parser and formatter" optional = false python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15"}, {file = "isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6"}, @@ -420,13 +488,14 @@ files = [ [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -437,157 +506,153 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "lxml" -version = "5.3.0" +version = "5.4.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"lxml\"" files = [ - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, - {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, - {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, - {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, - {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, - {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, - {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, - {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, - {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, - {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, - {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, - {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, - {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, - {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, - {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, - {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, - {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, - {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, - {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, - {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, - {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, - {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"}, + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:696ea9e87442467819ac22394ca36cb3d01848dad1be6fac3fb612d3bd5a12cf"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef80aeac414f33c24b3815ecd560cee272786c3adfa5f31316d8b349bfade28"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b9c2754cef6963f3408ab381ea55f47dabc6f78f4b8ebb0f0b25cf1ac1f7609"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a62cc23d754bb449d63ff35334acc9f5c02e6dae830d78dab4dd12b78a524f4"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f82125bc7203c5ae8633a7d5d20bcfdff0ba33e436e4ab0abc026a53a8960b7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:b67319b4aef1a6c56576ff544b67a2a6fbd7eaee485b241cabf53115e8908b8f"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:a8ef956fce64c8551221f395ba21d0724fed6b9b6242ca4f2f7beb4ce2f41997"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:0a01ce7d8479dce84fc03324e3b0c9c90b1ece9a9bb6a1b6c9025e7e4520e78c"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91505d3ddebf268bb1588eb0f63821f738d20e1e7f05d3c647a5ca900288760b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3bcdde35d82ff385f4ede021df801b5c4a5bcdfb61ea87caabcebfc4945dc1b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aea7c06667b987787c7d1f5e1dfcd70419b711cdb47d6b4bb4ad4b76777a0563"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a7fb111eef4d05909b82152721a59c1b14d0f365e2be4c742a473c5d7372f4f5"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43d549b876ce64aa18b2328faff70f5877f8c6dede415f80a2f799d31644d776"}, + {file = "lxml-5.4.0-cp310-cp310-win32.whl", hash = "sha256:75133890e40d229d6c5837b0312abbe5bac1c342452cf0e12523477cd3aa21e7"}, + {file = "lxml-5.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:de5b4e1088523e2b6f730d0509a9a813355b7f5659d70eb4f319c76beea2e250"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:98a3912194c079ef37e716ed228ae0dcb960992100461b704aea4e93af6b0bb9"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ea0252b51d296a75f6118ed0d8696888e7403408ad42345d7dfd0d1e93309a7"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92b69441d1bd39f4940f9eadfa417a25862242ca2c396b406f9272ef09cdcaa"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20e16c08254b9b6466526bc1828d9370ee6c0d60a4b64836bc3ac2917d1e16df"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7605c1c32c3d6e8c990dd28a0970a3cbbf1429d5b92279e37fda05fb0c92190e"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecf4c4b83f1ab3d5a7ace10bafcb6f11df6156857a3c418244cef41ca9fa3e44"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cef4feae82709eed352cd7e97ae062ef6ae9c7b5dbe3663f104cd2c0e8d94ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:df53330a3bff250f10472ce96a9af28628ff1f4efc51ccba351a8820bca2a8ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:aefe1a7cb852fa61150fcb21a8c8fcea7b58c4cb11fbe59c97a0a4b31cae3c8c"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ef5a7178fcc73b7d8c07229e89f8eb45b2908a9238eb90dcfc46571ccf0383b8"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d2ed1b3cb9ff1c10e6e8b00941bb2e5bb568b307bfc6b17dffbbe8be5eecba86"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:72ac9762a9f8ce74c9eed4a4e74306f2f18613a6b71fa065495a67ac227b3056"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f5cb182f6396706dc6cc1896dd02b1c889d644c081b0cdec38747573db88a7d7"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3a3178b4873df8ef9457a4875703488eb1622632a9cee6d76464b60e90adbfcd"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e094ec83694b59d263802ed03a8384594fcce477ce484b0cbcd0008a211ca751"}, + {file = "lxml-5.4.0-cp311-cp311-win32.whl", hash = "sha256:4329422de653cdb2b72afa39b0aa04252fca9071550044904b2e7036d9d97fe4"}, + {file = "lxml-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd3be6481ef54b8cfd0e1e953323b7aa9d9789b94842d0e5b142ef4bb7999539"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc"}, + {file = "lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f"}, + {file = "lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a"}, + {file = "lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82"}, + {file = "lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f"}, + {file = "lxml-5.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7be701c24e7f843e6788353c055d806e8bd8466b52907bafe5d13ec6a6dbaecd"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb54f7c6bafaa808f27166569b1511fc42701a7713858dddc08afdde9746849e"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97dac543661e84a284502e0cf8a67b5c711b0ad5fb661d1bd505c02f8cf716d7"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:c70e93fba207106cb16bf852e421c37bbded92acd5964390aad07cb50d60f5cf"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9c886b481aefdf818ad44846145f6eaf373a20d200b5ce1a5c8e1bc2d8745410"}, + {file = "lxml-5.4.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:fa0e294046de09acd6146be0ed6727d1f42ded4ce3ea1e9a19c11b6774eea27c"}, + {file = "lxml-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:61c7bbf432f09ee44b1ccaa24896d21075e533cd01477966a5ff5a71d88b2f56"}, + {file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"}, + {file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:891f7f991a68d20c75cb13c5c9142b2a3f9eb161f1f12a9489c82172d1f133c0"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ac7ba71f9561cd7d7b55e1ea5511543c0282e2b6450f122672a2694621d63b7e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ce31158630a6ac85bddd6b830cffd46085ff90498b397bd0a259f59d27a12188"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"}, + {file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"}, + {file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"}, + {file = "lxml-5.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eaf24066ad0b30917186420d51e2e3edf4b0e2ea68d8cd885b14dc8afdcf6556"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b31a3a77501d86d8ade128abb01082724c0dfd9524f542f2f07d693c9f1175f"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e108352e203c7afd0eb91d782582f00a0b16a948d204d4dec8565024fafeea5"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11a96c3b3f7551c8a8109aa65e8594e551d5a84c76bf950da33d0fb6dfafab7"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:ca755eebf0d9e62d6cb013f1261e510317a41bf4650f22963474a663fdfe02aa"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:4cd915c0fb1bed47b5e6d6edd424ac25856252f09120e3e8ba5154b6b921860e"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:226046e386556a45ebc787871d6d2467b32c37ce76c2680f5c608e25823ffc84"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b108134b9667bcd71236c5a02aad5ddd073e372fb5d48ea74853e009fe38acb6"}, + {file = "lxml-5.4.0-cp38-cp38-win32.whl", hash = "sha256:1320091caa89805df7dcb9e908add28166113dcd062590668514dbd510798c88"}, + {file = "lxml-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:073eb6dcdf1f587d9b88c8c93528b57eccda40209cf9be549d469b942b41d70b"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bda3ea44c39eb74e2488297bb39d47186ed01342f0022c8ff407c250ac3f498e"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9ceaf423b50ecfc23ca00b7f50b64baba85fb3fb91c53e2c9d00bc86150c7e40"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:664cdc733bc87449fe781dbb1f309090966c11cc0c0cd7b84af956a02a8a4729"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67ed8a40665b84d161bae3181aa2763beea3747f748bca5874b4af4d75998f87"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4a3bd174cc9cdaa1afbc4620c049038b441d6ba07629d89a83b408e54c35cd"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b0989737a3ba6cf2a16efb857fb0dfa20bc5c542737fddb6d893fde48be45433"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:dc0af80267edc68adf85f2a5d9be1cdf062f973db6790c1d065e45025fa26140"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:639978bccb04c42677db43c79bdaa23785dc7f9b83bfd87570da8207872f1ce5"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a99d86351f9c15e4a901fc56404b485b1462039db59288b203f8c629260a142"}, + {file = "lxml-5.4.0-cp39-cp39-win32.whl", hash = "sha256:3e6d5557989cdc3ebb5302bbdc42b439733a841891762ded9514e74f60319ad6"}, + {file = "lxml-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8c9b7f16b63e65bbba889acb436a1034a82d34fa09752d754f88d708eca80e1"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1b717b00a71b901b4667226bba282dd462c42ccf618ade12f9ba3674e1fabc55"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27a9ded0f0b52098ff89dd4c418325b987feed2ea5cc86e8860b0f844285d740"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7ce10634113651d6f383aa712a194179dcd496bd8c41e191cec2099fa09de5"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53370c26500d22b45182f98847243efb518d268374a9570409d2e2276232fd37"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c6364038c519dffdbe07e3cf42e6a7f8b90c275d4d1617a69bb59734c1a2d571"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f11a1526ebd0dee85e7b1e39e39a0cc0d9d03fb527f56d8457f6df48a10dc0c"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b4afaf38bf79109bb060d9016fad014a9a48fb244e11b94f74ae366a64d252"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de6f6bb8a7840c7bf216fb83eec4e2f79f7325eca8858167b68708b929ab2172"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5cca36a194a4eb4e2ed6be36923d3cffd03dcdf477515dea687185506583d4c9"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b7c86884ad23d61b025989d99bfdd92a7351de956e01c61307cb87035960bcb1"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:53d9469ab5460402c19553b56c3648746774ecd0681b1b27ea74d5d8a3ef5590"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:56dbdbab0551532bb26c19c914848d7251d73edb507c3079d6805fa8bba5b706"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14479c2ad1cb08b62bb941ba8e0e05938524ee3c3114644df905d2331c76cd57"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32697d2ea994e0db19c1df9e40275ffe84973e4232b5c274f47e7c1ec9763cdd"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:24f6df5f24fc3385f622c0c9d63fe34604893bc1a5bdbb2dbf5870f85f9a404a"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:151d6c40bc9db11e960619d2bf2ec5829f0aaffb10b41dcf6ad2ce0f3c0b2325"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4025bf2884ac4370a3243c5aa8d66d3cb9e15d3ddd0af2d796eccc5f0244390e"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9459e6892f59ecea2e2584ee1058f5d8f629446eab52ba2305ae13a32a059530"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47fb24cc0f052f0576ea382872b3fc7e1f7e3028e53299ea751839418ade92a6"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50441c9de951a153c698b9b99992e806b71c1f36d14b154592580ff4a9d0d877"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ab339536aa798b1e17750733663d272038bf28069761d5be57cb4a9b0137b4f8"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9776af1aad5a4b4a1317242ee2bea51da54b2a7b7b48674be736d463c999f37d"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:63e7968ff83da2eb6fdda967483a7a023aa497d85ad8f05c3ad9b1f2e8c84987"}, + {file = "lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] -html-clean = ["lxml-html-clean"] +html-clean = ["lxml_html_clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.11)"] +source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "lxml-stubs" @@ -595,6 +660,7 @@ version = "0.5.1" description = "Type annotations for the lxml package" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "lxml-stubs-0.5.1.tar.gz", hash = "sha256:e0ec2aa1ce92d91278b719091ce4515c12adc1d564359dfaf81efa7d4feab79d"}, {file = "lxml_stubs-0.5.1-py3-none-any.whl", hash = "sha256:1f689e5dbc4b9247cb09ae820c7d34daeb1fdbd1db06123814b856dae7787272"}, @@ -604,339 +670,515 @@ files = [ test = ["coverage[toml] (>=7.2.5)", "mypy (>=1.2.0)", "pytest (>=7.3.0)", "pytest-mypy-plugins (>=1.10.1)"] [[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" +name = "markdown" +version = "3.8" +description = "Python implementation of John Gruber's Markdown." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, + {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, + {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, ] [package.dependencies] -mdurl = ">=0.1,<1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} [package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] [[package]] name = "markupsafe" -version = "2.1.3" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +groups = ["docs"] files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, -] - -[[package]] -name = "mdit-py-plugins" -version = "0.4.0" -description = "Collection of plugins for markdown-it-py" + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.1" +description = "Project documentation with Markdown." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "mdit_py_plugins-0.4.0-py3-none-any.whl", hash = "sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9"}, - {file = "mdit_py_plugins-0.4.0.tar.gz", hash = "sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b"}, + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, ] [package.dependencies] -markdown-it-py = ">=1.0.0,<4.0.0" +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" [package.extras] -code-style = ["pre-commit"] -rtd = ["myst-parser", "sphinx-book-theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-autorefs" +version = "1.4.2" +description = "Automatically link across pages in MkDocs." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocs_autorefs-1.4.2-py3-none-any.whl", hash = "sha256:83d6d777b66ec3c372a1aad4ae0cf77c243ba5bcda5bf0c6b8a2c5e7a3d89f13"}, + {file = "mkdocs_autorefs-1.4.2.tar.gz", hash = "sha256:e2ebe1abd2b67d597ed19378c0fff84d73d1dbce411fce7a7cc6f161888b6749"}, +] + +[package.dependencies] +Markdown = ">=3.3" +markupsafe = ">=2.0.1" +mkdocs = ">=1.1" [[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" +name = "mkdocs-gen-files" +version = "0.5.0" +description = "MkDocs plugin to programmatically generate documentation pages during the build" optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, + {file = "mkdocs_gen_files-0.5.0-py3-none-any.whl", hash = "sha256:7ac060096f3f40bd19039e7277dd3050be9a453c8ac578645844d4d91d7978ea"}, + {file = "mkdocs_gen_files-0.5.0.tar.gz", hash = "sha256:4c7cf256b5d67062a788f6b1d035e157fc1a9498c2399be9af5257d4ff4d19bc"}, ] +[package.dependencies] +mkdocs = ">=1.0.3" + [[package]] -name = "mypy" -version = "1.11.2" -description = "Optional static typing for Python" +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-include-markdown-plugin" +version = "7.1.5" +description = "Mkdocs Markdown includer plugin." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocs_include_markdown_plugin-7.1.5-py3-none-any.whl", hash = "sha256:d0b96edee45e7fda5eb189e63331cfaf1bf1fbdbebbd08371f1daa77045d3ae9"}, + {file = "mkdocs_include_markdown_plugin-7.1.5.tar.gz", hash = "sha256:a986967594da6789226798e3c41c70bc17130fadb92b4313f42bd3defdac0adc"}, +] + +[package.dependencies] +mkdocs = ">=1.4" +wcmatch = "*" [package.extras] -dmypy = ["psutil (>=4.0)"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] +cache = ["platformdirs"] [[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." +name = "mkdocs-material" +version = "9.6.14" +description = "Documentation that simply works" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, + {file = "mkdocs_material-9.6.14-py3-none-any.whl", hash = "sha256:3b9cee6d3688551bf7a8e8f41afda97a3c39a12f0325436d76c86706114b721b"}, + {file = "mkdocs_material-9.6.14.tar.gz", hash = "sha256:39d795e90dce6b531387c255bd07e866e027828b7346d3eba5ac3de265053754"}, ] +[package.dependencies] +babel = ">=2.10,<3.0" +backrefs = ">=5.7.post1,<6.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.1,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<3)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + [[package]] -name = "myst-parser" -version = "3.0.1" -description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." optional = false python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "mkdocstrings" +version = "0.29.1" +description = "Automatic documentation from sources, for MkDocs." +optional = false +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1"}, - {file = "myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87"}, + {file = "mkdocstrings-0.29.1-py3-none-any.whl", hash = "sha256:37a9736134934eea89cbd055a513d40a020d87dfcae9e3052c2a6b8cd4af09b6"}, + {file = "mkdocstrings-0.29.1.tar.gz", hash = "sha256:8722f8f8c5cd75da56671e0a0c1bbed1df9946c0cef74794d6141b34011abd42"}, ] [package.dependencies] -docutils = ">=0.18,<0.22" -jinja2 = "*" -markdown-it-py = ">=3.0,<4.0" -mdit-py-plugins = ">=0.4,<1.0" -pyyaml = "*" -sphinx = ">=6,<8" +importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} +Jinja2 = ">=2.11.1" +Markdown = ">=3.6" +MarkupSafe = ">=1.1" +mkdocs = ">=1.6" +mkdocs-autorefs = ">=1.4" +mkdocstrings-python = {version = ">=1.16.2", optional = true, markers = "extra == \"python\""} +pymdown-extensions = ">=6.3" [package.extras] -code-style = ["pre-commit (>=3.0,<4.0)"] -linkify = ["linkify-it-py (>=2.0,<3.0)"] -rtd = ["ipython", "sphinx (>=7)", "sphinx-autodoc2 (>=0.5.0,<0.6.0)", "sphinx-book-theme (>=1.1,<2.0)", "sphinx-copybutton", "sphinx-design", "sphinx-pyscript", "sphinx-tippy (>=0.4.3)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.9.0,<0.10.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] -testing = ["beautifulsoup4", "coverage[toml]", "defusedxml", "pytest (>=8,<9)", "pytest-cov", "pytest-param-files (>=0.6.0,<0.7.0)", "pytest-regressions", "sphinx-pytest"] -testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,<0.7.0)"] +crystal = ["mkdocstrings-crystal (>=0.3.4)"] +python = ["mkdocstrings-python (>=1.16.2)"] +python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] + +[[package]] +name = "mkdocstrings-python" +version = "1.16.11" +description = "A Python handler for mkdocstrings." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocstrings_python-1.16.11-py3-none-any.whl", hash = "sha256:25d96cc9c1f9c272ea1bd8222c900b5f852bf46c984003e9c7c56eaa4696190f"}, + {file = "mkdocstrings_python-1.16.11.tar.gz", hash = "sha256:935f95efa887f99178e4a7becaaa1286fb35adafffd669b04fd611d97c00e5ce"}, +] + +[package.dependencies] +griffe = ">=1.6.2" +mkdocs-autorefs = ">=1.4" +mkdocstrings = ">=0.28.3" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mypy" +version = "1.16.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7909541fef256527e5ee9c0a7e2aeed78b6cda72ba44298d1334fe7881b05c5c"}, + {file = "mypy-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e71d6f0090c2256c713ed3d52711d01859c82608b5d68d4fa01a3fe30df95571"}, + {file = "mypy-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:936ccfdd749af4766be824268bfe22d1db9eb2f34a3ea1d00ffbe5b5265f5491"}, + {file = "mypy-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4086883a73166631307fdd330c4a9080ce24913d4f4c5ec596c601b3a4bdd777"}, + {file = "mypy-1.16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:feec38097f71797da0231997e0de3a58108c51845399669ebc532c815f93866b"}, + {file = "mypy-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:09a8da6a0ee9a9770b8ff61b39c0bb07971cda90e7297f4213741b48a0cc8d93"}, + {file = "mypy-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9f826aaa7ff8443bac6a494cf743f591488ea940dd360e7dd330e30dd772a5ab"}, + {file = "mypy-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:82d056e6faa508501af333a6af192c700b33e15865bda49611e3d7d8358ebea2"}, + {file = "mypy-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:089bedc02307c2548eb51f426e085546db1fa7dd87fbb7c9fa561575cf6eb1ff"}, + {file = "mypy-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6a2322896003ba66bbd1318c10d3afdfe24e78ef12ea10e2acd985e9d684a666"}, + {file = "mypy-1.16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:021a68568082c5b36e977d54e8f1de978baf401a33884ffcea09bd8e88a98f4c"}, + {file = "mypy-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:54066fed302d83bf5128632d05b4ec68412e1f03ef2c300434057d66866cea4b"}, + {file = "mypy-1.16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c5436d11e89a3ad16ce8afe752f0f373ae9620841c50883dc96f8b8805620b13"}, + {file = "mypy-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f2622af30bf01d8fc36466231bdd203d120d7a599a6d88fb22bdcb9dbff84090"}, + {file = "mypy-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d045d33c284e10a038f5e29faca055b90eee87da3fc63b8889085744ebabb5a1"}, + {file = "mypy-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b4968f14f44c62e2ec4a038c8797a87315be8df7740dc3ee8d3bfe1c6bf5dba8"}, + {file = "mypy-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb14a4a871bb8efb1e4a50360d4e3c8d6c601e7a31028a2c79f9bb659b63d730"}, + {file = "mypy-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:bd4e1ebe126152a7bbaa4daedd781c90c8f9643c79b9748caa270ad542f12bec"}, + {file = "mypy-1.16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a9e056237c89f1587a3be1a3a70a06a698d25e2479b9a2f57325ddaaffc3567b"}, + {file = "mypy-1.16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b07e107affb9ee6ce1f342c07f51552d126c32cd62955f59a7db94a51ad12c0"}, + {file = "mypy-1.16.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c6fb60cbd85dc65d4d63d37cb5c86f4e3a301ec605f606ae3a9173e5cf34997b"}, + {file = "mypy-1.16.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7e32297a437cc915599e0578fa6bc68ae6a8dc059c9e009c628e1c47f91495d"}, + {file = "mypy-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:afe420c9380ccec31e744e8baff0d406c846683681025db3531b32db56962d52"}, + {file = "mypy-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:55f9076c6ce55dd3f8cd0c6fff26a008ca8e5131b89d5ba6d86bd3f47e736eeb"}, + {file = "mypy-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f56236114c425620875c7cf71700e3d60004858da856c6fc78998ffe767b73d3"}, + {file = "mypy-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:15486beea80be24ff067d7d0ede673b001d0d684d0095803b3e6e17a886a2a92"}, + {file = "mypy-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f2ed0e0847a80655afa2c121835b848ed101cc7b8d8d6ecc5205aedc732b1436"}, + {file = "mypy-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eb5fbc8063cb4fde7787e4c0406aa63094a34a2daf4673f359a1fb64050e9cb2"}, + {file = "mypy-1.16.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a5fcfdb7318c6a8dd127b14b1052743b83e97a970f0edb6c913211507a255e20"}, + {file = "mypy-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:2e7e0ad35275e02797323a5aa1be0b14a4d03ffdb2e5f2b0489fa07b89c67b21"}, + {file = "mypy-1.16.0-py3-none-any.whl", hash = "sha256:29e1499864a3888bca5c1542f2d7232c6e586295183320caa95758fc84034031"}, + {file = "mypy-1.16.0.tar.gz", hash = "sha256:84b94283f817e2aa6350a14b4a8fb2a35a53c286f97c9d30f53b63620e7af8ab"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] [[package]] name = "networkx" -version = "3.1" +version = "3.2.1" description = "Python package for creating and manipulating graphs and networks" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"networkx\"" files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, ] [package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "orjson" -version = "3.10.10" +version = "3.10.18" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"orjson\"" files = [ - {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"}, - {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"}, - {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"}, - {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"}, - {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"}, - {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"}, - {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"}, - {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"}, - {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"}, - {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"}, - {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"}, - {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"}, - {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"}, - {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"}, - {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"}, - {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"}, - {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"}, - {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"}, - {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"}, + {file = "orjson-3.10.18-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a45e5d68066b408e4bc383b6e4ef05e717c65219a9e1390abc6155a520cac402"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be3b9b143e8b9db05368b13b04c84d37544ec85bb97237b3a923f076265ec89c"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9b0aa09745e2c9b3bf779b096fa71d1cc2d801a604ef6dd79c8b1bfef52b2f92"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53a245c104d2792e65c8d225158f2b8262749ffe64bc7755b00024757d957a13"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9495ab2611b7f8a0a8a505bcb0f0cbdb5469caafe17b0e404c3c746f9900469"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73be1cbcebadeabdbc468f82b087df435843c809cd079a565fb16f0f3b23238f"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8936ee2679e38903df158037a2f1c108129dee218975122e37847fb1d4ac68"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7115fcbc8525c74e4c2b608129bef740198e9a120ae46184dac7683191042056"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:771474ad34c66bc4d1c01f645f150048030694ea5b2709b87d3bda273ffe505d"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:7c14047dbbea52886dd87169f21939af5d55143dad22d10db6a7514f058156a8"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:641481b73baec8db14fdf58f8967e52dc8bda1f2aba3aa5f5c1b07ed6df50b7f"}, + {file = "orjson-3.10.18-cp310-cp310-win32.whl", hash = "sha256:607eb3ae0909d47280c1fc657c4284c34b785bae371d007595633f4b1a2bbe06"}, + {file = "orjson-3.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:8770432524ce0eca50b7efc2a9a5f486ee0113a5fbb4231526d414e6254eba92"}, + {file = "orjson-3.10.18-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e0a183ac3b8e40471e8d843105da6fbe7c070faab023be3b08188ee3f85719b8"}, + {file = "orjson-3.10.18-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:5ef7c164d9174362f85238d0cd4afdeeb89d9e523e4651add6a5d458d6f7d42d"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afd14c5d99cdc7bf93f22b12ec3b294931518aa019e2a147e8aa2f31fd3240f7"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b672502323b6cd133c4af6b79e3bea36bad2d16bca6c1f645903fce83909a7a"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51f8c63be6e070ec894c629186b1c0fe798662b8687f3d9fdfa5e401c6bd7679"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9478ade5313d724e0495d167083c6f3be0dd2f1c9c8a38db9a9e912cdaf947"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:187aefa562300a9d382b4b4eb9694806e5848b0cedf52037bb5c228c61bb66d4"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da552683bc9da222379c7a01779bddd0ad39dd699dd6300abaf43eadee38334"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e450885f7b47a0231979d9c49b567ed1c4e9f69240804621be87c40bc9d3cf17"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:5e3c9cc2ba324187cd06287ca24f65528f16dfc80add48dc99fa6c836bb3137e"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:50ce016233ac4bfd843ac5471e232b865271d7d9d44cf9d33773bcd883ce442b"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b3ceff74a8f7ffde0b2785ca749fc4e80e4315c0fd887561144059fb1c138aa7"}, + {file = "orjson-3.10.18-cp311-cp311-win32.whl", hash = "sha256:fdba703c722bd868c04702cac4cb8c6b8ff137af2623bc0ddb3b3e6a2c8996c1"}, + {file = "orjson-3.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:c28082933c71ff4bc6ccc82a454a2bffcef6e1d7379756ca567c772e4fb3278a"}, + {file = "orjson-3.10.18-cp311-cp311-win_arm64.whl", hash = "sha256:a6c7c391beaedd3fa63206e5c2b7b554196f14debf1ec9deb54b5d279b1b46f5"}, + {file = "orjson-3.10.18-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:50c15557afb7f6d63bc6d6348e0337a880a04eaa9cd7c9d569bcb4e760a24753"}, + {file = "orjson-3.10.18-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:356b076f1662c9813d5fa56db7d63ccceef4c271b1fb3dd522aca291375fcf17"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:559eb40a70a7494cd5beab2d73657262a74a2c59aff2068fdba8f0424ec5b39d"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f3c29eb9a81e2fbc6fd7ddcfba3e101ba92eaff455b8d602bf7511088bbc0eae"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6612787e5b0756a171c7d81ba245ef63a3533a637c335aa7fcb8e665f4a0966f"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ac6bd7be0dcab5b702c9d43d25e70eb456dfd2e119d512447468f6405b4a69c"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f72f100cee8dde70100406d5c1abba515a7df926d4ed81e20a9730c062fe9ad"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dca85398d6d093dd41dc0983cbf54ab8e6afd1c547b6b8a311643917fbf4e0c"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22748de2a07fcc8781a70edb887abf801bb6142e6236123ff93d12d92db3d406"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:3a83c9954a4107b9acd10291b7f12a6b29e35e8d43a414799906ea10e75438e6"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:303565c67a6c7b1f194c94632a4a39918e067bd6176a48bec697393865ce4f06"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:86314fdb5053a2f5a5d881f03fca0219bfdf832912aa88d18676a5175c6916b5"}, + {file = "orjson-3.10.18-cp312-cp312-win32.whl", hash = "sha256:187ec33bbec58c76dbd4066340067d9ece6e10067bb0cc074a21ae3300caa84e"}, + {file = "orjson-3.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:f9f94cf6d3f9cd720d641f8399e390e7411487e493962213390d1ae45c7814fc"}, + {file = "orjson-3.10.18-cp312-cp312-win_arm64.whl", hash = "sha256:3d600be83fe4514944500fa8c2a0a77099025ec6482e8087d7659e891f23058a"}, + {file = "orjson-3.10.18-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:69c34b9441b863175cc6a01f2935de994025e773f814412030f269da4f7be147"}, + {file = "orjson-3.10.18-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:1ebeda919725f9dbdb269f59bc94f861afbe2a27dce5608cdba2d92772364d1c"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5adf5f4eed520a4959d29ea80192fa626ab9a20b2ea13f8f6dc58644f6927103"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7592bb48a214e18cd670974f289520f12b7aed1fa0b2e2616b8ed9e069e08595"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f872bef9f042734110642b7a11937440797ace8c87527de25e0c53558b579ccc"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0315317601149c244cb3ecef246ef5861a64824ccbcb8018d32c66a60a84ffbc"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0da26957e77e9e55a6c2ce2e7182a36a6f6b180ab7189315cb0995ec362e049"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb70d489bc79b7519e5803e2cc4c72343c9dc1154258adf2f8925d0b60da7c58"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e9e86a6af31b92299b00736c89caf63816f70a4001e750bda179e15564d7a034"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c382a5c0b5931a5fc5405053d36c1ce3fd561694738626c77ae0b1dfc0242ca1"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8e4b2ae732431127171b875cb2668f883e1234711d3c147ffd69fe5be51a8012"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d808e34ddb24fc29a4d4041dcfafbae13e129c93509b847b14432717d94b44f"}, + {file = "orjson-3.10.18-cp313-cp313-win32.whl", hash = "sha256:ad8eacbb5d904d5591f27dee4031e2c1db43d559edb8f91778efd642d70e6bea"}, + {file = "orjson-3.10.18-cp313-cp313-win_amd64.whl", hash = "sha256:aed411bcb68bf62e85588f2a7e03a6082cc42e5a2796e06e72a962d7c6310b52"}, + {file = "orjson-3.10.18-cp313-cp313-win_arm64.whl", hash = "sha256:f54c1385a0e6aba2f15a40d703b858bedad36ded0491e55d35d905b2c34a4cc3"}, + {file = "orjson-3.10.18-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95fae14225edfd699454e84f61c3dd938df6629a00c6ce15e704f57b58433bb"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5232d85f177f98e0cefabb48b5e7f60cff6f3f0365f9c60631fecd73849b2a82"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2783e121cafedf0d85c148c248a20470018b4ffd34494a68e125e7d5857655d1"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e54ee3722caf3db09c91f442441e78f916046aa58d16b93af8a91500b7bbf273"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2daf7e5379b61380808c24f6fc182b7719301739e4271c3ec88f2984a2d61f89"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f39b371af3add20b25338f4b29a8d6e79a8c7ed0e9dd49e008228a065d07781"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b819ed34c01d88c6bec290e6842966f8e9ff84b7694632e88341363440d4cc0"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2f6c57debaef0b1aa13092822cbd3698a1fb0209a9ea013a969f4efa36bdea57"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:755b6d61ffdb1ffa1e768330190132e21343757c9aa2308c67257cc81a1a6f5a"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce8d0a875a85b4c8579eab5ac535fb4b2a50937267482be402627ca7e7570ee3"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57b5d0673cbd26781bebc2bf86f99dd19bd5a9cb55f71cc4f66419f6b50f3d77"}, + {file = "orjson-3.10.18-cp39-cp39-win32.whl", hash = "sha256:951775d8b49d1d16ca8818b1f20c4965cae9157e7b562a2ae34d3967b8f21c8e"}, + {file = "orjson-3.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:fdd9d68f83f0bc4406610b1ac68bdcded8c5ee58605cc69e643a06f4d075f429"}, + {file = "orjson-3.10.18.tar.gz", hash = "sha256:e8da3947d92123eda795b68228cafe2724815621fe35e8e320a9e9593a4bcd53"}, ] [[package]] name = "packaging" -version = "23.1" +version = "25.0" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev", "docs", "tests"] files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, ] [[package]] -name = "pathspec" -version = "0.11.2" -description = "Utility library for gitignore style pattern matching of file paths." +name = "paginate" +version = "0.5.7" +description = "Divides large result sets into pages for easier browsing" optional = false -python-versions = ">=3.7" +python-versions = "*" +groups = ["docs"] files = [ - {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, - {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, + {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, + {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, ] +[package.extras] +dev = ["pytest", "tox"] +lint = ["black"] + [[package]] -name = "pbr" -version = "5.11.1" -description = "Python Build Reasonableness" +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." optional = false -python-versions = ">=2.6" +python-versions = ">=3.8" +groups = ["dev", "docs"] files = [ - {file = "pbr-5.11.1-py2.py3-none-any.whl", hash = "sha256:567f09558bae2b3ab53cb3c1e2e33e726ff3338e7bae3db5dc954b3a44eef12b"}, - {file = "pbr-5.11.1.tar.gz", hash = "sha256:aefc51675b0b533d56bb5fd1c8c6c0522fe31896679882e1c4c63d5e4a0fccb3"}, + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] [[package]] name = "pip" -version = "24.2" +version = "25.1.1" description = "The PyPA recommended tool for installing Python packages." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "pip-24.2-py3-none-any.whl", hash = "sha256:2cd581cf58ab7fcfca4ce8efa6dcacd0de5bf8d0a3eb9ec927e07405f4d9e2a2"}, - {file = "pip-24.2.tar.gz", hash = "sha256:5b5e490b5e9cb275c879595064adce9ebd31b854e3e803740b72f9ccf34a45b8"}, + {file = "pip-25.1.1-py3-none-any.whl", hash = "sha256:2913a38a2abf4ea6b64ab507bd9e967f3b53dc1ede74b01b0931e1ce548751af"}, + {file = "pip-25.1.1.tar.gz", hash = "sha256:3de45d411d308d5054c2168185d8da7f9a2cd753dbac8acbfa88a8909ecd9077"}, ] [[package]] @@ -945,6 +1187,7 @@ version = "7.4.1" description = "pip-tools keeps your pinned dependencies fresh." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pip-tools-7.4.1.tar.gz", hash = "sha256:864826f5073864450e24dbeeb85ce3920cdfb09848a3d69ebf537b521f14bcc9"}, {file = "pip_tools-7.4.1-py3-none-any.whl", hash = "sha256:4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9"}, @@ -965,57 +1208,81 @@ testing = ["flit_core (>=2,<4)", "poetry_core (>=1.0.0)", "pytest (>=7.2.0)", "p [[package]] name = "platformdirs" -version = "3.10.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["dev", "docs"] files = [ - {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, - {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["tests"] files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "pygments" -version = "2.16.1" +version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymdown-extensions" +version = "10.15" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, - {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, + {file = "pymdown_extensions-10.15-py3-none-any.whl", hash = "sha256:46e99bb272612b0de3b7e7caf6da8dd5f4ca5212c0b273feb9304e236c484e5f"}, + {file = "pymdown_extensions-10.15.tar.gz", hash = "sha256:0e5994e32155f4b03504f939e501b981d306daf7ec2aa1cd2eb6bd300784f8f7"}, ] +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + [package.extras] -plugins = ["importlib-metadata"] +extra = ["pygments (>=2.19.1)"] [[package]] name = "pyparsing" -version = "3.1.4" +version = "3.2.3" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false -python-versions = ">=3.6.8" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, + {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, + {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, ] [package.extras] @@ -1027,6 +1294,7 @@ version = "1.2.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, @@ -1034,13 +1302,14 @@ files = [ [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ - {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, - {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, ] [package.dependencies] @@ -1056,101 +1325,126 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments [[package]] name = "pytest-cov" -version = "5.0.0" +version = "6.1.1" description = "Pytest plugin for measuring coverage." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["tests"] files = [ - {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, - {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, + {file = "pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde"}, + {file = "pytest_cov-6.1.1.tar.gz", hash = "sha256:46935f7aaefba760e716c2ebfbe1c216240b9592966e7da99ea8292d4d3e2a0a"}, ] [package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} +coverage = {version = ">=7.5", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] -name = "pytz" -version = "2023.3" -description = "World timezone definitions, modern and historical" +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["docs"] files = [ - {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, - {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] +[package.dependencies] +six = ">=1.5" + [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "pyyaml-env-tag" +version = "1.1" +description = "A custom YAML tag for referencing environment variables in YAML files." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04"}, + {file = "pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff"}, +] + +[package.dependencies] +pyyaml = "*" + [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -1165,29 +1459,30 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.7.0" +version = "0.8.6" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["lint"] files = [ - {file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"}, - {file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"}, - {file = "ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11"}, - {file = "ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec"}, - {file = "ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2"}, - {file = "ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e"}, - {file = "ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b"}, + {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"}, + {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"}, + {file = "ruff-0.8.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e88b8f6d901477c41559ba540beeb5a671e14cd29ebd5683903572f4b40a9807"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0509e8da430228236a18a677fcdb0c1f102dd26d5520f71f79b094963322ed25"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a7ddb221779871cf226100e677b5ea38c2d54e9e2c8ed847450ebbdf99b32d"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:248b1fb3f739d01d528cc50b35ee9c4812aa58cc5935998e776bf8ed5b251e75"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bc3c083c50390cf69e7e1b5a5a7303898966be973664ec0c4a4acea82c1d4315"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52d587092ab8df308635762386f45f4638badb0866355b2b86760f6d3c076188"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61323159cf21bc3897674e5adb27cd9e7700bab6b84de40d7be28c3d46dc67cf"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ae4478b1471fc0c44ed52a6fb787e641a2ac58b1c1f91763bafbc2faddc5117"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0c000a471d519b3e6cfc9c6680025d923b4ca140ce3e4612d1a2ef58e11f11fe"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9257aa841e9e8d9b727423086f0fa9a86b6b420fbf4bf9e1465d1250ce8e4d8d"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45a56f61b24682f6f6709636949ae8cc82ae229d8d773b4c76c09ec83964a95a"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:496dd38a53aa173481a7d8866bcd6451bd934d06976a2505028a50583e001b76"}, + {file = "ruff-0.8.6-py3-none-win32.whl", hash = "sha256:e169ea1b9eae61c99b257dc83b9ee6c76f89042752cb2d83486a7d6e48e8f764"}, + {file = "ruff-0.8.6-py3-none-win_amd64.whl", hash = "sha256:f1d70bef3d16fdc897ee290d7d20da3cbe4e26349f62e8a0274e7a3f4ce7a905"}, + {file = "ruff-0.8.6-py3-none-win_arm64.whl", hash = "sha256:7d7fc2377a04b6e04ffe588caad613d0c460eb2ecba4c0ccbbfe2bc973cbc162"}, + {file = "ruff-0.8.6.tar.gz", hash = "sha256:dcad24b81b62650b0eb8814f576fc65cfee8674772a6e24c9b747911801eeaa5"}, ] [[package]] @@ -1196,6 +1491,7 @@ version = "71.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" +groups = ["dev", "tests"] files = [ {file = "setuptools-71.1.0-py3-none-any.whl", hash = "sha256:33874fdc59b3188304b2e7c80d9029097ea31627180896fb549c578ceb8a0855"}, {file = "setuptools-71.1.0.tar.gz", hash = "sha256:032d42ee9fb536e33087fb66cac5f840eb9391ed05637b3f2a76a7c8fb477936"}, @@ -1207,233 +1503,170 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments- test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] -name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["docs"] files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] -name = "sphinx" -version = "7.1.2" -description = "Python documentation generator" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinx-7.1.2-py3-none-any.whl", hash = "sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe"}, - {file = "sphinx-7.1.2.tar.gz", hash = "sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f"}, -] - -[package.dependencies] -alabaster = ">=0.7,<0.8" -babel = ">=2.9" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18.1,<0.21" -imagesize = ">=1.3" -importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} -Jinja2 = ">=3.0" -packaging = ">=21.0" -Pygments = ">=2.13" -requests = ">=2.25.0" -snowballstemmer = ">=2.0" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = ">=2.0.0" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = ">=1.1.5" - -[package.extras] -docs = ["sphinxcontrib-websupport"] -lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] -test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] - -[[package]] -name = "sphinx-autodoc-typehints" -version = "2.0.1" -description = "Type hints (PEP 484) support for the Sphinx autodoc extension" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinx_autodoc_typehints-2.0.1-py3-none-any.whl", hash = "sha256:f73ae89b43a799e587e39266672c1075b2ef783aeb382d3ebed77c38a3fc0149"}, - {file = "sphinx_autodoc_typehints-2.0.1.tar.gz", hash = "sha256:60ed1e3b2c970acc0aa6e877be42d48029a9faec7378a17838716cacd8c10b12"}, -] - -[package.dependencies] -sphinx = ">=7.1.2" - -[package.extras] -docs = ["furo (>=2024.1.29)"] -numpy = ["nptyping (>=2.5)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.4.2)", "diff-cover (>=8.0.3)", "pytest (>=8.0.1)", "pytest-cov (>=4.1)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.9)"] - -[[package]] -name = "sphinxcontrib-apidoc" -version = "0.5.0" -description = "A Sphinx extension for running 'sphinx-apidoc' on each build" +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev", "tests"] files = [ - {file = "sphinxcontrib-apidoc-0.5.0.tar.gz", hash = "sha256:65efcd92212a5f823715fb95ee098b458a6bb09a5ee617d9ed3dead97177cd55"}, - {file = "sphinxcontrib_apidoc-0.5.0-py3-none-any.whl", hash = "sha256:c671d644d6dc468be91b813dcddf74d87893bff74fe8f1b8b01b69408f0fb776"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] - -[package.dependencies] -pbr = "*" -Sphinx = ">=5.0.0" +markers = {dev = "python_version < \"3.11\"", tests = "python_full_version <= \"3.11.0a6\""} [[package]] -name = "sphinxcontrib-applehelp" -version = "1.0.4" -description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +name = "types-setuptools" +version = "71.1.0.20240818" +description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ - {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, - {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, + {file = "types-setuptools-71.1.0.20240818.tar.gz", hash = "sha256:f62eaffaa39774462c65fbb49368c4dc1d91a90a28371cb14e1af090ff0e41e3"}, + {file = "types_setuptools-71.1.0.20240818-py3-none-any.whl", hash = "sha256:c4f95302f88369ac0ac46c67ddbfc70c6c4dbbb184d9fed356244217a2934025"}, ] -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-devhelp" -version = "1.0.2" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - [[package]] -name = "sphinxcontrib-htmlhelp" -version = "2.0.1" -description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +name = "typing-extensions" +version = "4.13.2" +description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["dev", "docs", "tests"] files = [ - {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, - {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["html5lib", "pytest"] - [[package]] -name = "sphinxcontrib-jsmath" -version = "1.0.1" -description = "A sphinx extension which renders display math in HTML via JavaScript" +name = "urllib3" +version = "2.4.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, - {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -test = ["flake8", "mypy", "pytest"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] -name = "sphinxcontrib-qthelp" -version = "1.0.3" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] +watchmedo = ["PyYAML (>=3.10)"] [[package]] -name = "sphinxcontrib-serializinghtml" -version = "1.1.5" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, - {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "types-setuptools" -version = "71.1.0.20240723" -description = "Typing stubs for setuptools" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-setuptools-71.1.0.20240723.tar.gz", hash = "sha256:8a9349038c7e22d88e6c5d9c6705b347b22930424114a452c1712899e85131ff"}, - {file = "types_setuptools-71.1.0.20240723-py3-none-any.whl", hash = "sha256:ac9fc263f59d1e02bca49cb7270a12c47ab80b3b911fb4d92f1fecf978bfe88a"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +name = "wcmatch" +version = "10.0" +description = "Wildcard/glob file name matcher." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "urllib3" -version = "2.0.4" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.7" -files = [ - {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, - {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, + {file = "wcmatch-10.0-py3-none-any.whl", hash = "sha256:0dd927072d03c0a6527a20d2e6ad5ba8d0380e60870c383bc533b71744df7b7a"}, + {file = "wcmatch-10.0.tar.gz", hash = "sha256:e72f0de09bba6a04e0de70937b0cf06e55f36f37b3deb422dfaf854b867b840a"}, ] -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] +[package.dependencies] +bracex = ">=2.1.1" [[package]] name = "wheel" -version = "0.44.0" +version = "0.45.1" description = "A built-package format for Python" optional = false python-versions = ">=3.8" +groups = ["dev", "tests"] files = [ - {file = "wheel-0.44.0-py3-none-any.whl", hash = "sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f"}, - {file = "wheel-0.44.0.tar.gz", hash = "sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49"}, + {file = "wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248"}, + {file = "wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729"}, ] [package.extras] @@ -1441,27 +1674,33 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"] [[package]] name = "zipp" -version = "3.16.2" +version = "3.22.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev", "docs"] files = [ - {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"}, - {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"}, + {file = "zipp-3.22.0-py3-none-any.whl", hash = "sha256:fe208f65f2aca48b81f9e6fd8cf7b8b32c26375266b009b413d45306b6148343"}, + {file = "zipp-3.22.0.tar.gz", hash = "sha256:dd2f28c3ce4bc67507bfd3781d21b7bb2be31103b51a4553ad7d90b84e57ace5"}, ] +markers = {dev = "python_full_version < \"3.10.2\"", docs = "python_version < \"3.10\""} [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib_resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] [extras] berkeleydb = ["berkeleydb"] html = ["html5rdf"] -lxml = ["lxml"] +lxml = ["lxml", "lxml"] networkx = ["networkx"] orjson = ["orjson"] [metadata] -lock-version = "2.0" -python-versions = "^3.8.1" -content-hash = "71704ba175e33528872fab8121cb609041bd97b6a99f8f04022a26904941b27c" +lock-version = "2.1" +python-versions = ">=3.9,<4" +content-hash = "ef36af3b2461cc3e029a5a0a405eda0ee1569deb7f8ecfbec69f4c69f314e3a8" diff --git a/pyproject.toml b/pyproject.toml index 1e15fe569..dd8f0397c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,25 +1,27 @@ -[tool.poetry] +[project] name = "rdflib" -version = "7.1.1" +version = "8.0.0a0" description = """RDFLib is a Python library for working with RDF, \ a simple yet powerful language for representing information.""" -authors = ["Daniel 'eikeon' Krech "] -maintainers = ["RDFLib Team "] +authors = [{ name = "Daniel 'eikeon' Krech", email = "eikeon@eikeon.com" }] +maintainers = [{ name = "RDFLib Team", email = "rdflib-dev@googlegroups.com" }] repository = "https://github.com/RDFLib/rdflib" documentation = "https://rdflib.readthedocs.org/" license = "BSD-3-Clause" -classifiers=[ +classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "License :: OSI Approved :: BSD License", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: OS Independent", "Natural Language :: English" ] +requires-python = ">=3.9,<4" readme = "README.md" packages = [ { include = "rdflib" }, @@ -29,64 +31,63 @@ include = [ { path = "docs", format = "sdist" }, { path = "examples", format = "sdist" }, ] +dynamic = [ "dependencies" ] +dependencies = [ + 'isodate >=0.7.2,<1.0.0; python_version < "3.11"', + 'pyparsing >=3.2.0,<4', +] -[tool.poetry.scripts] +[project.scripts] rdfpipe = 'rdflib.tools.rdfpipe:main' csv2rdf = 'rdflib.tools.csv2rdf:main' rdf2dot = 'rdflib.tools.rdf2dot:main' rdfs2dot = 'rdflib.tools.rdfs2dot:main' rdfgraphisomorphism = 'rdflib.tools.graphisomorphism:main' -[tool.poetry.dependencies] -python = "^3.8.1" -isodate = {version=">=0.7.2,<1.0.0", python = "<3.11"} -pyparsing = ">=2.1.0,<4" -berkeleydb = {version = "^18.1.0", optional = true} -networkx = {version = ">=2,<4", optional = true} -html5rdf = {version = ">=1.2,<2", optional = true} -lxml = {version = ">=4.3,<6.0", optional = true} -orjson = {version = ">=3.9.14,<4", optional = true} - [tool.poetry.group.dev.dependencies] -black = "24.4.2" -mypy = "^1.1.0" +black = "24.10.0" +mypy = "^1.13.0" lxml-stubs = ">=0.4,<0.6" pip-tools = "^7.4.1" [tool.poetry.group.tests.dependencies] pytest = ">=7.1.3,<9.0.0" -pytest-cov = ">=4,<6" -coverage = {version = "^7.0.1", extras = ["toml"]} +pytest-cov = ">=4,<7" +coverage = {version = "^7.8.2", extras = ["toml"]} types-setuptools = ">=68.0.0.3,<72.0.0.0" setuptools = ">=68,<72" -wheel = ">=0.42,<0.45" +wheel = ">=0.42,<0.46" [tool.poetry.group.docs.dependencies] -sphinx = ">=7.1.2,<8" -myst-parser = ">=2,<4" -sphinxcontrib-apidoc = ">=0.3,<0.6" -sphinx-autodoc-typehints = ">=1.25.3,<=2.0.1" -typing-extensions = "^4.5.0" +typing-extensions = "^4.11.0" +mkdocs = ">=1.6.1" +mkdocs-material = ">=9.6.12" +mkdocstrings = {version = ">=0.29.1", extras = ["python"]} +mkdocs-gen-files = "^0.5.0" +mkdocs-include-markdown-plugin = "^7.1.5" [tool.poetry.group.lint.dependencies] -ruff = ">=0.0.286,<0.8.0" +ruff = "0.8.6" -[tool.poetry.extras] -berkeleydb = ["berkeleydb"] -networkx = ["networkx"] +[project.optional-dependencies] +berkeleydb = ["berkeleydb >18.1.0"] +networkx = ["networkx >=2,<4"] # html support is optional, it is used only in tokenizing `rdf:HTML` type Literals -html = ["html5rdf"] +html = ["html5rdf >=1.2.1,<2"] # lxml support is optional, it is used only for parsing XML-formatted SPARQL results -lxml = ["lxml"] -orjson = ["orjson"] +lxml = [ + 'lxml >=4.8.0,<6.0; python_version <"3.11"', + 'lxml >=4.9.3,<6.0; python_version >="3.11"', +] +orjson = ["orjson >=3.9.14,<4"] [build-system] -requires = ["poetry-core>=1.4.0"] +requires = ["poetry-core>=2.0.0"] build-backend = "poetry.core.masonry.api" [tool.ruff] # https://beta.ruff.rs/docs/configuration/ -target-version = "py38" +target-version = "py39" # Same as Black. line-length = 88 @@ -166,9 +167,9 @@ ignore = [ ] [tool.black] -line-length = "88" -target-version = ['py38'] -required-version = "24.4.2" +line-length = 88 +target-version = ['py39'] +required-version = "24.10.0" include = '\.pyi?$' exclude = ''' ( @@ -182,7 +183,7 @@ exclude = ''' | \.venv | \.var | \.github - | _build + | site | htmlcov | benchmarks | test_reports @@ -197,13 +198,13 @@ exclude = ''' [tool.pytest.ini_options] addopts = [ - "--doctest-modules", - "--ignore=admin", - "--ignore=devtools", - "--ignore=rdflib/extras/external_graph_libs.py", - "--ignore-glob=docs/*.py", - "--doctest-glob=docs/*.rst", - "--strict-markers", + "--doctest-modules", + "--ignore=admin", + "--ignore=devtools", + "--ignore=rdflib/extras/external_graph_libs.py", + "--ignore-glob=docs/*.py", + "--ignore-glob=site/*", + "--strict-markers", ] filterwarnings = [ # The below warning is a consequence of how pytest doctest detects mocks and how DefinedNamespace behaves when an undefined attribute is being accessed. @@ -223,7 +224,7 @@ log_cli_date_format = "%Y-%m-%dT%H:%M:%S" [tool.isort] profile = "black" -py_version = 37 +py_version = 39 line_length = 88 src_paths= ["rdflib", "test", "devtools", "examples"] supported_extensions = ["pyw", "pyi", "py"] @@ -237,7 +238,7 @@ skip = [ '.venv', '.var', '.github', - '_build', + 'site', 'htmlcov', 'benchmarks', 'test_reports', @@ -250,7 +251,7 @@ skip = [ [tool.mypy] files = ['rdflib', 'test', 'devtools', 'examples'] -python_version = "3.8" +python_version = "3.9" warn_unused_configs = true ignore_missing_imports = true disallow_subclassing_any = false @@ -284,3 +285,6 @@ exclude_lines = [ "if __name__ == .__main__.:", "if __name__==.__main__.:" ] + +[tool.poetry.requires-plugins] +poetry-plugin-export = ">=1.8.0" diff --git a/rdflib/__init__.py b/rdflib/__init__.py index 0c40cd7a4..290214c6c 100644 --- a/rdflib/__init__.py +++ b/rdflib/__init__.py @@ -7,40 +7,42 @@ rdflib package. The primary interface `rdflib` exposes to work with RDF is -`rdflib.graph.Graph`. +[`rdflib.graph.Graph`][rdflib.graph.Graph]. A tiny example: - >>> from rdflib import Graph, URIRef, Literal - - >>> g = Graph() - >>> result = g.parse("http://www.w3.org/2000/10/swap/test/meet/blue.rdf") - - >>> print("graph has %s statements." % len(g)) - graph has 4 statements. - >>> - >>> for s, p, o in g: - ... if (s, p, o) not in g: - ... raise Exception("It better be!") - - >>> s = g.serialize(format='nt') - >>> - >>> sorted(g) == [ - ... (URIRef("http://meetings.example.com/cal#m1"), - ... URIRef("http://www.example.org/meeting_organization#homePage"), - ... URIRef("http://meetings.example.com/m1/hp")), - ... (URIRef("http://www.example.org/people#fred"), - ... URIRef("http://www.example.org/meeting_organization#attending"), - ... URIRef("http://meetings.example.com/cal#m1")), - ... (URIRef("http://www.example.org/people#fred"), - ... URIRef("http://www.example.org/personal_details#GivenName"), - ... Literal("Fred")), - ... (URIRef("http://www.example.org/people#fred"), - ... URIRef("http://www.example.org/personal_details#hasEmail"), - ... URIRef("mailto:fred@example.com")) - ... ] - True - +```python +>>> from rdflib import Graph, URIRef, Literal + +>>> g = Graph() +>>> result = g.parse("http://www.w3.org/2000/10/swap/test/meet/blue.rdf") + +>>> print("graph has %s statements." % len(g)) +graph has 4 statements. +>>> +>>> for s, p, o in g: +... if (s, p, o) not in g: +... raise Exception("It better be!") + +>>> s = g.serialize(format='nt') +>>> +>>> sorted(g) == [ +... (URIRef("http://meetings.example.com/cal#m1"), +... URIRef("http://www.example.org/meeting_organization#homePage"), +... URIRef("http://meetings.example.com/m1/hp")), +... (URIRef("http://www.example.org/people#fred"), +... URIRef("http://www.example.org/meeting_organization#attending"), +... URIRef("http://meetings.example.com/cal#m1")), +... (URIRef("http://www.example.org/people#fred"), +... URIRef("http://www.example.org/personal_details#GivenName"), +... Literal("Fred")), +... (URIRef("http://www.example.org/people#fred"), +... URIRef("http://www.example.org/personal_details#hasEmail"), +... URIRef("mailto:fred@example.com")) +... ] +True + +``` """ import logging @@ -52,13 +54,14 @@ __docformat__ = "restructuredtext en" __version__: str = _DISTRIBUTION_METADATA["Version"] -__date__ = "2024-10-28" +__date__ = "2025-01-10" __all__ = [ "URIRef", "BNode", "IdentifiedNode", "Literal", + "Node", "Variable", "Namespace", "Dataset", @@ -132,10 +135,13 @@ For example: +```python >>> from rdflib import Literal,XSD >>> Literal("01", datatype=XSD.int) rdflib.term.Literal("1", datatype=rdflib.term.URIRef("http://www.w3.org/2001/XMLSchema#integer")) +``` + This flag may be changed at any time, but will only affect literals created after that time, previously created literals will remain (un)normalized. @@ -144,14 +150,13 @@ DAWG_LITERAL_COLLATION = False -""" -DAWG_LITERAL_COLLATION determines how literals are ordered or compared +"""DAWG_LITERAL_COLLATION determines how literals are ordered or compared to each other. In SPARQL, applying the >,<,>=,<= operators to literals of incompatible data-types is an error, i.e: -Literal(2)>Literal('cake') is neither true nor false, but an error. +`Literal(2)>Literal('cake')` is neither true nor false, but an error. This is a problem in PY3, where lists of Literals of incompatible types can no longer be sorted. @@ -161,7 +166,7 @@ datatype URI In particular, this determines how the rich comparison operators for -Literal work, eq, __neq__, __lt__, etc. +Literal work, eq, `__neq__`, `__lt__`, etc. """ @@ -195,7 +200,7 @@ XSD, Namespace, ) -from rdflib.term import BNode, IdentifiedNode, Literal, URIRef, Variable +from rdflib.term import BNode, IdentifiedNode, Literal, Node, URIRef, Variable from rdflib import plugin, query, util # isort:skip from rdflib.container import * # isort:skip # noqa: F403 diff --git a/rdflib/_networking.py b/rdflib/_networking.py index 311096a89..95da6e2bf 100644 --- a/rdflib/_networking.py +++ b/rdflib/_networking.py @@ -2,7 +2,6 @@ import string import sys -from typing import Dict from urllib.error import HTTPError from urllib.parse import quote as urlquote from urllib.parse import urljoin, urlsplit @@ -11,23 +10,27 @@ def _make_redirect_request(request: Request, http_error: HTTPError) -> Request: - """ - Create a new request object for a redirected request. - - The logic is based on `urllib.request.HTTPRedirectHandler` from `this commit _`. - - :param request: The original request that resulted in the redirect. - :param http_error: The response to the original request that indicates a - redirect should occur and contains the new location. - :return: A new request object to the location indicated by the response. - :raises HTTPError: the supplied ``http_error`` if the redirect request - cannot be created. - :raises ValueError: If the response code is `None`. - :raises ValueError: If the response does not contain a ``Location`` header - or the ``Location`` header is not a string. - :raises HTTPError: If the scheme of the new location is not ``http``, - ``https``, or ``ftp``. - :raises HTTPError: If there are too many redirects or a redirect loop. + """Create a new request object for a redirected request. + + The logic is based on [HTTPRedirectHandler](https://github.com/python/cpython/blob/b58bc8c2a9a316891a5ea1a0487aebfc86c2793a/Lib/urllib/request.py#L641-L751) from urllib.request. + + Args: + request: The original request that resulted in the redirect. + http_error: The response to the original request that indicates a + redirect should occur and contains the new location. + + Returns: + A new request object to the location indicated by the response. + + Raises: + HTTPError: the supplied `http_error` if the redirect request + cannot be created. + ValueError: If the response code is None. + ValueError: If the response does not contain a `Location` header + or the `Location` header is not a string. + HTTPError: If the scheme of the new location is not `http`, + `https`, or `ftp`. + HTTPError: If there are too many redirects or a redirect loop. """ new_url = http_error.headers.get("Location") if new_url is None: @@ -68,7 +71,7 @@ def _make_redirect_request(request: Request, http_error: HTTPError) -> Request: unverifiable=True, ) - visited: Dict[str, int] + visited: dict[str, int] if hasattr(request, "redirect_dict"): visited = request.redirect_dict if ( @@ -92,15 +95,17 @@ def _make_redirect_request(request: Request, http_error: HTTPError) -> Request: def _urlopen(request: Request) -> addinfourl: - """ - This is a shim for `urlopen` that handles HTTP redirects with status code + """This is a shim for `urlopen` that handles HTTP redirects with status code 308 (Permanent Redirect). This function should be removed once all supported versions of Python handles the 308 HTTP status code. - :param request: The request to open. - :return: The response to the request. + Args: + request: The request to open. + + Returns: + The response to the request. """ try: return urlopen(request) diff --git a/rdflib/_type_checking.py b/rdflib/_type_checking.py index 1bbeda134..2ead0eabd 100644 --- a/rdflib/_type_checking.py +++ b/rdflib/_type_checking.py @@ -3,13 +3,13 @@ as it would otherwise introduce a runtime dependency on `typing_extensions` for older python versions which is not desirable. -This was made mainly to accommodate ``sphinx-autodoc-typehints`` which cannot +This was made mainly to accommodate `sphinx-autodoc-typehints` which cannot recognize type aliases from imported files if the type aliases are defined -inside ``if TYPE_CHECKING:``. So instead of placing the type aliases in normal -modules inside ``TYPE_CHECKING`` guards they are in this file which should only -be imported inside ``TYPE_CHECKING`` guards. +inside `if TYPE_CHECKING:`. So instead of placing the type aliases in normal +modules inside `TYPE_CHECKING` guards they are in this file which should only +be imported inside `TYPE_CHECKING` guards. -.. important:: +!!! info "Internal use only" Things inside this module are not for use outside of RDFLib and this module is not part the the RDFLib public API. """ diff --git a/rdflib/collection.py b/rdflib/collection.py index ed0a48ff9..7e2be6713 100644 --- a/rdflib/collection.py +++ b/rdflib/collection.py @@ -1,21 +1,22 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional +from typing import TYPE_CHECKING, Optional, cast from rdflib.namespace import RDF -from rdflib.term import BNode, Node +from rdflib.term import BNode, IdentifiedNode if TYPE_CHECKING: - from rdflib.graph import Graph + from collections.abc import Iterable, Iterator + + from rdflib.graph import Graph, _ObjectType __all__ = ["Collection"] class Collection: - """ - See "Emulating container types": - https://docs.python.org/reference/datamodel.html#emulating-container-types + """See "Emulating container types": + ```python >>> from rdflib.term import Literal >>> from rdflib.graph import Graph >>> from pprint import pprint @@ -40,7 +41,6 @@ class Collection: ['"1"^^', '"2"^^', '"3"^^'] - >>> Literal(1) in c True >>> len(c) @@ -50,11 +50,15 @@ class Collection: >>> c.index(Literal(2)) == 1 True - The collection is immutable if ``uri`` is the empty list - (``http://www.w3.org/1999/02/22-rdf-syntax-ns#nil``). + ``` + + The collection is immutable if `uri` is the empty list (`http://www.w3.org/1999/02/22-rdf-syntax-ns#nil`). """ - def __init__(self, graph: Graph, uri: Node, seq: List[Node] = []): + uri: IdentifiedNode + graph: Graph + + def __init__(self, graph: Graph, uri: IdentifiedNode, seq: list[_ObjectType] = []): self.graph = graph self.uri = uri or BNode() if seq: @@ -62,6 +66,7 @@ def __init__(self, graph: Graph, uri: Node, seq: List[Node] = []): def n3(self) -> str: """ + ```python >>> from rdflib.term import Literal >>> from rdflib.graph import Graph >>> listname = BNode() @@ -83,29 +88,33 @@ def n3(self) -> str: >>> c = Collection(g, listname) >>> print(c.n3()) #doctest: +NORMALIZE_WHITESPACE ( "1"^^ - "2"^^ - "3"^^ ) + "2"^^ + "3"^^ ) + + ``` """ return "( %s )" % (" ".join([i.n3() for i in self])) - def _get_container(self, index: int) -> Optional[Node]: + def _get_container(self, index: int) -> IdentifiedNode | None: """Gets the first, rest holding node at index.""" assert isinstance(index, int) graph = self.graph - container: Optional[Node] = self.uri + container: IdentifiedNode | None = self.uri i = 0 - while i < index: + while i < index and container is not None: i += 1 - container = graph.value(container, RDF.rest) - if container is None: - break + ret = graph.value(container, RDF.rest) + if ret is not None: + container = cast(IdentifiedNode, ret) + else: + container = None return container def __len__(self) -> int: """length of items in collection.""" return len(list(self.graph.items(self.uri))) - def index(self, item: Node) -> int: + def index(self, item: _ObjectType) -> int: """ Returns the 0-based numerical index of the item in the list """ @@ -123,9 +132,9 @@ def index(self, item: Node) -> int: raise Exception("Malformed RDF Collection: %s" % self.uri) else: assert len(newlink) == 1, "Malformed RDF Collection: %s" % self.uri - listname = newlink[0] + listname = cast(IdentifiedNode, newlink[0]) - def __getitem__(self, key: int) -> Node: + def __getitem__(self, key: int) -> _ObjectType: """TODO""" c = self._get_container(key) if c: @@ -137,7 +146,7 @@ def __getitem__(self, key: int) -> Node: else: raise IndexError(key) - def __setitem__(self, key: int, value: Node) -> None: + def __setitem__(self, key: int, value: _ObjectType) -> None: """TODO""" c = self._get_container(key) if c: @@ -147,6 +156,7 @@ def __setitem__(self, key: int, value: Node) -> None: def __delitem__(self, key: int) -> None: """ + ```python >>> from rdflib.namespace import RDF, RDFS >>> from rdflib import Graph >>> from pprint import pformat @@ -187,6 +197,7 @@ def __delitem__(self, key: int) -> None: >>> len(g) 4 + ``` """ self[key] # to raise any potential key exceptions graph = self.graph @@ -207,22 +218,23 @@ def __delitem__(self, key: int) -> None: graph.remove((current, None, None)) graph.set((prior, RDF.rest, next)) - def __iter__(self) -> Iterator[Node]: + def __iter__(self) -> Iterator[_ObjectType]: """Iterator over items in Collections""" return self.graph.items(self.uri) - def _end(self) -> Node: + def _end(self) -> IdentifiedNode: # find end of list - container = self.uri + container: IdentifiedNode = self.uri while True: rest = self.graph.value(container, RDF.rest) if rest is None or rest == RDF.nil: return container else: - container = rest + container = cast(IdentifiedNode, rest) - def append(self, item: Node) -> Collection: + def append(self, item: _ObjectType) -> Collection: """ + ```python >>> from rdflib.term import Literal >>> from rdflib.graph import Graph >>> listname = BNode() @@ -233,8 +245,8 @@ def append(self, item: Node) -> Collection: >>> len([i for i in links if (i, RDF.rest, RDF.nil) in g]) 1 + ``` """ - end = self._end() if end == RDF.nil: raise ValueError("Cannot append to empty list") @@ -249,7 +261,7 @@ def append(self, item: Node) -> Collection: self.graph.add((end, RDF.rest, RDF.nil)) return self - def __iadd__(self, other: Iterable[Node]): + def __iadd__(self, other: Iterable[_ObjectType]): end = self._end() if end == RDF.nil: raise ValueError("Cannot append to empty list") @@ -267,11 +279,11 @@ def __iadd__(self, other: Iterable[Node]): return self def clear(self): - container: Optional[Node] = self.uri + container: IdentifiedNode | None = self.uri graph = self.graph - while container: + while container is not None: rest = graph.value(container, RDF.rest) graph.remove((container, RDF.first, None)) graph.remove((container, RDF.rest, None)) - container = rest + container = cast(Optional[IdentifiedNode], rest) return self diff --git a/rdflib/compare.py b/rdflib/compare.py index afc2c40b5..e519e7a1c 100644 --- a/rdflib/compare.py +++ b/rdflib/compare.py @@ -7,70 +7,84 @@ Warning: the time to canonicalize bnodes may increase exponentially on degenerate larger graphs. Use with care! -Example of comparing two graphs:: - - >>> g1 = Graph().parse(format='n3', data=''' - ... @prefix : . - ... :rel - ... , - ... [ :label "Same" ], - ... , - ... [ :label "A" ] . - ... ''') - >>> g2 = Graph().parse(format='n3', data=''' - ... @prefix : . - ... :rel - ... , - ... [ :label "Same" ], - ... , - ... [ :label "B" ] . - ... ''') - >>> - >>> iso1 = to_isomorphic(g1) - >>> iso2 = to_isomorphic(g2) - -These are not isomorphic:: - - >>> iso1 == iso2 - False - -Diff the two graphs:: - - >>> in_both, in_first, in_second = graph_diff(iso1, iso2) - -Present in both:: - - >>> def dump_nt_sorted(g): - ... for l in sorted(g.serialize(format='nt').splitlines()): - ... if l: print(l.decode('ascii')) - - >>> dump_nt_sorted(in_both) #doctest: +SKIP - - . - - _:cbcaabaaba17fecbc304a64f8edee4335e . - _:cbcaabaaba17fecbc304a64f8edee4335e - "Same" . - -Only in first:: - - >>> dump_nt_sorted(in_first) #doctest: +SKIP - - . - - _:cb124e4c6da0579f810c0ffe4eff485bd9 . - _:cb124e4c6da0579f810c0ffe4eff485bd9 - "A" . - -Only in second:: - - >>> dump_nt_sorted(in_second) #doctest: +SKIP - - . - - _:cb558f30e21ddfc05ca53108348338ade8 . - _:cb558f30e21ddfc05ca53108348338ade8 - "B" . +Example of comparing two graphs: + +```python +>>> g1 = Graph().parse(format='n3', data=''' +... @prefix : . +... :rel +... , +... [ :label "Same" ], +... , +... [ :label "A" ] . +... ''') +>>> g2 = Graph().parse(format='n3', data=''' +... @prefix : . +... :rel +... , +... [ :label "Same" ], +... , +... [ :label "B" ] . +... ''') +>>> +>>> iso1 = to_isomorphic(g1) +>>> iso2 = to_isomorphic(g2) + +``` + +These are not isomorphic + +```python +>>> iso1 == iso2 +False + +``` + +Diff the two graphs: + +```python +>>> in_both, in_first, in_second = graph_diff(iso1, iso2) + +``` + +Present in both: + +```python +>>> def dump_nt_sorted(g): +... for l in sorted(g.serialize(format='nt').splitlines()): +... if l: print(l.decode('ascii')) +>>> dump_nt_sorted(in_both) #doctest: +SKIP + + . + + _:cbcaabaaba17fecbc304a64f8edee4335e . +_:cbcaabaaba17fecbc304a64f8edee4335e + "Same" . +``` + +Only in first: + +```python +>>> dump_nt_sorted(in_first) #doctest: +SKIP + + . + + _:cb124e4c6da0579f810c0ffe4eff485bd9 . +_:cb124e4c6da0579f810c0ffe4eff485bd9 + "A" . +``` + +Only in second: + +```python +>>> dump_nt_sorted(in_second) #doctest: +SKIP + + . + + _:cb558f30e21ddfc05ca53108348338ade8 . +_:cb558f30e21ddfc05ca53108348338ade8 + "B" . +``` """ from __future__ import annotations @@ -90,19 +104,10 @@ ] from collections import defaultdict +from collections.abc import Callable, Iterator from datetime import datetime from hashlib import sha256 -from typing import ( - TYPE_CHECKING, - Callable, - Dict, - Iterator, - List, - Optional, - Set, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Optional, Union from rdflib.graph import ConjunctiveGraph, Graph, ReadOnlyGraphAggregate, _TripleType from rdflib.term import BNode, IdentifiedNode, Node, URIRef @@ -194,7 +199,7 @@ def graph_digest(self, stats=None): def internal_hash(self, stats=None): """ - This is defined instead of __hash__ to avoid a circular recursion + This is defined instead of `__hash__` to avoid a circular recursion scenario with the Memory store for rdflib which requires a hash lookup in order to return a generator of triples. """ @@ -202,16 +207,16 @@ def internal_hash(self, stats=None): HashFunc = Callable[[str], int] -ColorItem = Tuple[Union[int, str], URIRef, Union[int, str]] -ColorItemTuple = Tuple[ColorItem, ...] -HashCache = Optional[Dict[ColorItemTuple, str]] -Stats = Dict[str, Union[int, str]] +ColorItem = tuple[Union[int, str], URIRef, Union[int, str]] +ColorItemTuple = tuple[ColorItem, ...] +HashCache = Optional[dict[ColorItemTuple, str]] +Stats = dict[str, Union[int, str]] class Color: def __init__( self, - nodes: List[IdentifiedNode], + nodes: list[IdentifiedNode], hashfunc: HashFunc, color: ColorItemTuple = (), hash_cache: HashCache = None, @@ -231,7 +236,7 @@ def __str__(self): def key(self): return (len(self.nodes), self.hash_color()) - def hash_color(self, color: Optional[Tuple[ColorItem, ...]] = None) -> str: + def hash_color(self, color: tuple[ColorItem, ...] | None = None) -> str: if color is None: color = self.color if color in self._hash_cache: @@ -253,9 +258,9 @@ def stringify(x): return val def distinguish(self, W: Color, graph: Graph): # noqa: N803 - colors: Dict[str, Color] = {} + colors: dict[str, Color] = {} for n in self.nodes: - new_color: Tuple[ColorItem, ...] = list(self.color) # type: ignore[assignment] + new_color: tuple[ColorItem, ...] = list(self.color) # type: ignore[assignment] for node in W.nodes: new_color += [ # type: ignore[operator] (1, p, W.hash_color()) for s, p, o in graph.triples((n, None, node)) @@ -296,10 +301,10 @@ def _hashfunc(s: str): self._hash_cache: HashCache = {} self.hashfunc = _hashfunc - def _discrete(self, coloring: List[Color]) -> bool: + def _discrete(self, coloring: list[Color]) -> bool: return len([c for c in coloring if not c.discrete()]) == 0 - def _initial_color(self) -> List[Color]: + def _initial_color(self) -> list[Color]: """Finds an initial color for the graph. Finds an initial color of the graph by finding all blank nodes and @@ -307,7 +312,7 @@ def _initial_color(self) -> List[Color]: nodes are not included, as they are a) already colored (by URI or literal) and b) do not factor into the color of any blank node. """ - bnodes: Set[BNode] = set() + bnodes: set[BNode] = set() others = set() self._neighbors = defaultdict(set) for s, p, o in self.graph: @@ -343,12 +348,12 @@ def _individuate(self, color, individual): ) return c - def _get_candidates(self, coloring: List[Color]) -> Iterator[Tuple[Node, Color]]: + def _get_candidates(self, coloring: list[Color]) -> Iterator[tuple[Node, Color]]: for c in [c for c in coloring if not c.discrete()]: for node in c.nodes: yield node, c - def _refine(self, coloring: List[Color], sequence: List[Color]) -> List[Color]: + def _refine(self, coloring: list[Color], sequence: list[Color]) -> list[Color]: sequence = sorted(sequence, key=lambda x: x.key(), reverse=True) coloring = coloring[:] while len(sequence) > 0 and not self._discrete(coloring): @@ -367,8 +372,8 @@ def _refine(self, coloring: List[Color], sequence: List[Color]) -> List[Color]: sequence = sequence[:si] + colors + sequence[si + 1 :] except ValueError: sequence = colors[1:] + sequence - combined_colors: List[Color] = [] - combined_color_map: Dict[str, Color] = dict() + combined_colors: list[Color] = [] + combined_color_map: dict[str, Color] = dict() for color in coloring: color_hash = color.hash_color() # This is a hash collision, and be combined into a single color for individuation. @@ -380,7 +385,7 @@ def _refine(self, coloring: List[Color], sequence: List[Color]) -> List[Color]: return combined_colors @_runtime("to_hash_runtime") - def to_hash(self, stats: Optional[Stats] = None): + def to_hash(self, stats: Stats | None = None): result = 0 for triple in self.canonical_triples(stats=stats): result += self.hashfunc(" ".join([x.n3() for x in triple])) @@ -388,7 +393,7 @@ def to_hash(self, stats: Optional[Stats] = None): stats["graph_digest"] = "%x" % result return result - def _experimental_path(self, coloring: List[Color]) -> List[Color]: + def _experimental_path(self, coloring: list[Color]) -> list[Color]: coloring = [c.copy() for c in coloring] while not self._discrete(coloring): color = [x for x in coloring if not x.discrete()][0] @@ -400,9 +405,9 @@ def _experimental_path(self, coloring: List[Color]) -> List[Color]: def _create_generator( self, - colorings: List[List[Color]], - groupings: Optional[Dict[Node, Set[Node]]] = None, - ) -> Dict[Node, Set[Node]]: + colorings: list[list[Color]], + groupings: dict[Node, set[Node]] | None = None, + ) -> dict[Node, set[Node]]: if not groupings: groupings = defaultdict(set) for group in zip(*colorings): @@ -416,20 +421,20 @@ def _create_generator( @_call_count("individuations") def _traces( self, - coloring: List[Color], - stats: Optional[Stats] = None, - depth: List[int] = [0], - ) -> List[Color]: + coloring: list[Color], + stats: Stats | None = None, + depth: list[int] = [0], + ) -> list[Color]: if stats is not None and "prunings" not in stats: stats["prunings"] = 0 depth[0] += 1 candidates = self._get_candidates(coloring) - best: List[List[Color]] = [] + best: list[list[Color]] = [] best_score = None best_experimental_score = None last_coloring = None - generator: Dict[Node, Set[Node]] = defaultdict(set) - visited: Set[Node] = set() + generator: dict[Node, set[Node]] = defaultdict(set) + visited: set[Node] = set() for candidate, color in candidates: if candidate in generator: v = generator[candidate] & visited @@ -437,7 +442,7 @@ def _traces( visited.add(candidate) continue visited.add(candidate) - coloring_copy: List[Color] = [] + coloring_copy: list[Color] = [] color_copy = None for c in coloring: c_copy = c.copy() @@ -451,25 +456,25 @@ def _traces( experimental = self._experimental_path(coloring_copy) experimental_score = set([c.key() for c in experimental]) if last_coloring: - generator = self._create_generator( # type: ignore[unreachable] + generator = self._create_generator( [last_coloring, experimental], generator ) last_coloring = experimental - if best_score is None or best_score < color_score: # type: ignore[unreachable] + if best_score is None or best_score < color_score: best = [refined_coloring] best_score = color_score best_experimental_score = experimental_score - elif best_score > color_score: # type: ignore[unreachable] + elif best_score > color_score: # prune this branch. - if stats is not None: + if stats is not None and isinstance(stats["prunings"], int): stats["prunings"] += 1 elif experimental_score != best_experimental_score: best.append(refined_coloring) else: # prune this branch. - if stats is not None: + if stats is not None and isinstance(stats["prunings"], int): stats["prunings"] += 1 - discrete: List[List[Color]] = [x for x in best if self._discrete(x)] + discrete: list[list[Color]] = [x for x in best if self._discrete(x)] if len(discrete) == 0: best_score = None best_depth = None @@ -477,14 +482,14 @@ def _traces( d = [depth[0]] new_color = self._traces(coloring, stats=stats, depth=d) color_score = tuple([c.key() for c in refined_coloring]) - if best_score is None or color_score > best_score: # type: ignore[unreachable] + if best_score is None or color_score > best_score: discrete = [new_color] best_score = color_score best_depth = d[0] depth[0] = best_depth # type: ignore[assignment] return discrete[0] - def canonical_triples(self, stats: Optional[Stats] = None): + def canonical_triples(self, stats: Stats | None = None): if stats is not None: start_coloring = datetime.now() coloring = self._initial_color() @@ -509,7 +514,7 @@ def canonical_triples(self, stats: Optional[Stats] = None): if stats is not None: stats["color_count"] = len(coloring) - bnode_labels: Dict[Node, str] = dict( + bnode_labels: dict[Node, str] = dict( [(c.nodes[0], c.hash_color()) for c in coloring] ) if stats is not None: @@ -523,7 +528,7 @@ def canonical_triples(self, stats: Optional[Stats] = None): def _canonicalize_bnodes( self, triple: _TripleType, - labels: Dict[Node, str], + labels: dict[Node, str], ): for term in triple: if isinstance(term, BNode): @@ -547,8 +552,8 @@ def isomorphic(graph1: Graph, graph2: Graph) -> bool: Uses an algorithm to compute unique hashes which takes bnodes into account. - Examples:: - + Example: + ```python >>> g1 = Graph().parse(format='n3', data=''' ... @prefix : . ... :rel . @@ -563,7 +568,6 @@ def isomorphic(graph1: Graph, graph2: Graph) -> bool: ... ''') >>> isomorphic(g1, g2) True - >>> g3 = Graph().parse(format='n3', data=''' ... @prefix : . ... :rel . @@ -572,15 +576,15 @@ def isomorphic(graph1: Graph, graph2: Graph) -> bool: ... ''') >>> isomorphic(g1, g3) False + + ``` """ gd1 = _TripleCanonicalizer(graph1).to_hash() gd2 = _TripleCanonicalizer(graph2).to_hash() return gd1 == gd2 -def to_canonical_graph( - g1: Graph, stats: Optional[Stats] = None -) -> ReadOnlyGraphAggregate: +def to_canonical_graph(g1: Graph, stats: Stats | None = None) -> ReadOnlyGraphAggregate: """Creates a canonical, read-only graph. Creates a canonical, read-only graph where all bnode id:s are based on @@ -591,7 +595,7 @@ def to_canonical_graph( return ReadOnlyGraphAggregate([graph]) -def graph_diff(g1: Graph, g2: Graph) -> Tuple[Graph, Graph, Graph]: +def graph_diff(g1: Graph, g2: Graph) -> tuple[Graph, Graph, Graph]: """Returns three sets of triples: "in both", "in first" and "in second".""" # bnodes have deterministic values in canonical graphs: cg1 = to_canonical_graph(g1) @@ -610,10 +614,10 @@ def similar(g1: Graph, g2: Graph): Checks if the two graphs are "similar", by comparing sorted triples where all bnodes have been replaced by a singular mock bnode (the - ``_MOCK_BNODE``). + `_MOCK_BNODE`). This is a much cheaper, but less reliable, alternative to the comparison - algorithm in ``isomorphic``. + algorithm in `isomorphic`. """ return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2)) diff --git a/rdflib/compat.py b/rdflib/compat.py index ddb55eb0b..dcab51798 100644 --- a/rdflib/compat.py +++ b/rdflib/compat.py @@ -8,7 +8,7 @@ import codecs import re import warnings -from typing import Match +from re import Match def cast_bytes(s, enc="utf-8"): diff --git a/rdflib/container.py b/rdflib/container.py index 6ee92848b..cbfd2cac5 100644 --- a/rdflib/container.py +++ b/rdflib/container.py @@ -8,50 +8,53 @@ class Container: - """A class for constructing RDF containers, as per https://www.w3.org/TR/rdf11-mt/#rdf-containers - - Basic usage, creating a ``Bag`` and adding to it:: - - >>> from rdflib import Graph, BNode, Literal, Bag - >>> g = Graph() - >>> b = Bag(g, BNode(), [Literal("One"), Literal("Two"), Literal("Three")]) - >>> print(g.serialize(format="turtle")) - @prefix rdf: . - - [] a rdf:Bag ; - rdf:_1 "One" ; - rdf:_2 "Two" ; - rdf:_3 "Three" . - - - - >>> # print out an item using an index reference - >>> print(b[2]) - Two - - >>> # add a new item - >>> b.append(Literal("Hello")) # doctest: +ELLIPSIS - - >>> print(g.serialize(format="turtle")) - @prefix rdf: . - - [] a rdf:Bag ; - rdf:_1 "One" ; - rdf:_2 "Two" ; - rdf:_3 "Three" ; - rdf:_4 "Hello" . - - - + """A class for constructing RDF containers, as per + + Basic usage, creating a `Bag` and adding to it: + + ```python + >>> from rdflib import Graph, BNode, Literal, Bag + >>> g = Graph() + >>> b = Bag(g, BNode(), [Literal("One"), Literal("Two"), Literal("Three")]) + >>> print(g.serialize(format="turtle")) + @prefix rdf: . + + [] a rdf:Bag ; + rdf:_1 "One" ; + rdf:_2 "Two" ; + rdf:_3 "Three" . + + + + >>> # print out an item using an index reference + >>> print(b[2]) + Two + + >>> # add a new item + >>> b.append(Literal("Hello")) # doctest: +ELLIPSIS + + >>> print(g.serialize(format="turtle")) + @prefix rdf: . + + [] a rdf:Bag ; + rdf:_1 "One" ; + rdf:_2 "Two" ; + rdf:_3 "Three" ; + rdf:_4 "Hello" . + + + + ``` """ def __init__(self, graph, uri, seq=[], rtype="Bag"): """Creates a Container - :param graph: a Graph instance - :param uri: URI or Blank Node of the Container - :param seq: the elements of the Container - :param rtype: the type of Container, one of "Bag", "Seq" or "Alt" + Args: + graph: a Graph instance + uri: URI or Blank Node of the Container + seq: the elements of the Container + rtype: the type of Container, one of "Bag", "Seq" or "Alt" """ self.graph = graph diff --git a/rdflib/events.py b/rdflib/events.py index 61f3454b6..9bcbc07c9 100644 --- a/rdflib/events.py +++ b/rdflib/events.py @@ -6,26 +6,35 @@ Create a dispatcher: - >>> d = Dispatcher() +```python +>>> d = Dispatcher() + +``` Now create a handler for the event and subscribe it to the dispatcher to handle Event events. A handler is a simple function or method that accepts the event as an argument: - >>> def handler1(event): print(repr(event)) - >>> d.subscribe(Event, handler1) # doctest: +ELLIPSIS - +```python +>>> def handler1(event): print(repr(event)) +>>> d.subscribe(Event, handler1) # doctest: +ELLIPSIS + + +``` Now dispatch a new event into the dispatcher, and see handler1 get fired: - >>> d.dispatch(Event(foo='bar', data='yours', used_by='the event handlers')) - +```python +>>> d.dispatch(Event(foo='bar', data='yours', used_by='the event handlers')) + + +``` """ from __future__ import annotations -from typing import Any, Dict, Optional +from typing import Any __all__ = ["Event", "Dispatcher"] @@ -57,9 +66,9 @@ class Dispatcher: subscribers. """ - _dispatch_map: Optional[Dict[Any, Any]] = None + _dispatch_map: dict[Any, Any] | None = None - def set_map(self, amap: Dict[Any, Any]): + def set_map(self, amap: dict[Any, Any]): self._dispatch_map = amap return self diff --git a/rdflib/exceptions.py b/rdflib/exceptions.py index cbe68fb98..2f183d72e 100644 --- a/rdflib/exceptions.py +++ b/rdflib/exceptions.py @@ -11,13 +11,13 @@ ] -from typing import Any, Optional +from typing import Any class Error(Exception): """Base class for rdflib exceptions.""" - def __init__(self, msg: Optional[str] = None): + def __init__(self, msg: str | None = None): Exception.__init__(self, msg) self.msg = msg diff --git a/rdflib/extras/describer.py b/rdflib/extras/describer.py index f0df70675..27780baf9 100644 --- a/rdflib/extras/describer.py +++ b/rdflib/extras/describer.py @@ -5,101 +5,104 @@ The `Describer.rel` and `Describer.rev` methods return a context manager which sets the current about to the referenced resource for the context scope (for use with the -``with`` statement). - -Full example in the ``to_rdf`` method below:: - - >>> import datetime - >>> from rdflib.graph import Graph - >>> from rdflib.namespace import Namespace, RDFS, FOAF - >>> - >>> ORG_URI = "http://example.org/" - >>> - >>> CV = Namespace("http://purl.org/captsolo/resume-rdf/0.2/cv#") - >>> - >>> class Person: - ... def __init__(self): - ... self.first_name = "Some" - ... self.last_name = "Body" - ... self.username = "some1" - ... self.presentation = "Just a Python & RDF hacker." - ... self.image = "/images/persons/" + self.username + ".jpg" - ... self.site = "http://example.net/" - ... self.start_date = datetime.date(2009, 9, 4) - ... def get_full_name(self): - ... return " ".join([self.first_name, self.last_name]) - ... def get_absolute_url(self): - ... return "/persons/" + self.username - ... def get_thumbnail_url(self): - ... return self.image.replace('.jpg', '-thumb.jpg') - ... - ... def to_rdf(self): - ... graph = Graph() - ... graph.bind('foaf', FOAF) - ... graph.bind('cv', CV) - ... lang = 'en' - ... d = Describer(graph, base=ORG_URI) - ... d.about(self.get_absolute_url()+'#person') - ... d.rdftype(FOAF.Person) - ... d.value(FOAF.name, self.get_full_name()) - ... d.value(FOAF.givenName, self.first_name) - ... d.value(FOAF.familyName, self.last_name) - ... d.rel(FOAF.homepage, self.site) - ... d.value(RDFS.comment, self.presentation, lang=lang) - ... with d.rel(FOAF.depiction, self.image): - ... d.rdftype(FOAF.Image) - ... d.rel(FOAF.thumbnail, self.get_thumbnail_url()) - ... with d.rev(CV.aboutPerson): - ... d.rdftype(CV.CV) - ... with d.rel(CV.hasWorkHistory): - ... d.value(CV.startDate, self.start_date) - ... d.rel(CV.employedIn, ORG_URI+"#company") - ... return graph - ... - >>> person_graph = Person().to_rdf() - >>> expected = Graph().parse(data=''' - ... - ... - ... Some Body - ... Some - ... Body - ... - ... - ... - ... - ... - ... - ... Just a Python & RDF hacker. - ... - ... - ... - ... - ... - ... - ... - ... - ... 2009-09-04 - ... - ... - ... - ... - ... - ... ''', format="xml") - >>> - >>> from rdflib.compare import isomorphic - >>> isomorphic(person_graph, expected) #doctest: +SKIP - True +`with` statement). + +Full example in the `to_rdf` method below: + +```python +>>> import datetime +>>> from rdflib.graph import Graph +>>> from rdflib.namespace import Namespace, RDFS, FOAF + +>>> ORG_URI = "http://example.org/" + +>>> CV = Namespace("http://purl.org/captsolo/resume-rdf/0.2/cv#") + +>>> class Person: +... def __init__(self): +... self.first_name = "Some" +... self.last_name = "Body" +... self.username = "some1" +... self.presentation = "Just a Python & RDF hacker." +... self.image = "/images/persons/" + self.username + ".jpg" +... self.site = "http://example.net/" +... self.start_date = datetime.date(2009, 9, 4) +... def get_full_name(self): +... return " ".join([self.first_name, self.last_name]) +... def get_absolute_url(self): +... return "/persons/" + self.username +... def get_thumbnail_url(self): +... return self.image.replace('.jpg', '-thumb.jpg') +... +... def to_rdf(self): +... graph = Graph() +... graph.bind('foaf', FOAF) +... graph.bind('cv', CV) +... lang = 'en' +... d = Describer(graph, base=ORG_URI) +... d.about(self.get_absolute_url()+'#person') +... d.rdftype(FOAF.Person) +... d.value(FOAF.name, self.get_full_name()) +... d.value(FOAF.givenName, self.first_name) +... d.value(FOAF.familyName, self.last_name) +... d.rel(FOAF.homepage, self.site) +... d.value(RDFS.comment, self.presentation, lang=lang) +... with d.rel(FOAF.depiction, self.image): +... d.rdftype(FOAF.Image) +... d.rel(FOAF.thumbnail, self.get_thumbnail_url()) +... with d.rev(CV.aboutPerson): +... d.rdftype(CV.CV) +... with d.rel(CV.hasWorkHistory): +... d.value(CV.startDate, self.start_date) +... d.rel(CV.employedIn, ORG_URI+"#company") +... return graph +... +>>> person_graph = Person().to_rdf() +>>> expected = Graph().parse(data=''' +... +... +... Some Body +... Some +... Body +... +... +... +... +... +... +... Just a Python & RDF hacker. +... +... +... +... +... +... +... +... +... 2009-09-04 +... +... +... +... +... +... ''', format="xml") + +>>> from rdflib.compare import isomorphic +>>> isomorphic(person_graph, expected) #doctest: +SKIP +True + +``` """ from contextlib import contextmanager @@ -121,10 +124,10 @@ def __init__(self, graph=None, about=None, base=None): def about(self, subject, **kws): """ Sets the current subject. Will convert the given object into an - ``URIRef`` if it's not an ``Identifier``. - - Usage:: + `URIRef` if it's not an `Identifier`. + Example: + ```python >>> d = Describer() >>> d._current() #doctest: +ELLIPSIS rdflib.term.BNode(...) @@ -132,6 +135,7 @@ def about(self, subject, **kws): >>> d._current() rdflib.term.URIRef('http://example.org/') + ``` """ kws.setdefault("base", self.base) subject = cast_identifier(subject, **kws) @@ -143,10 +147,10 @@ def about(self, subject, **kws): def value(self, p, v, **kws): """ Set a literal value for the given property. Will cast the value to an - ``Literal`` if a plain literal is given. - - Usage:: + `Literal` if a plain literal is given. + Example: + ```python >>> from rdflib import URIRef >>> from rdflib.namespace import RDF, RDFS >>> d = Describer(about="http://example.org/") @@ -154,20 +158,21 @@ def value(self, p, v, **kws): >>> d.graph.value(URIRef('http://example.org/'), RDFS.label) rdflib.term.Literal('Example') + ``` """ v = cast_value(v, **kws) self.graph.add((self._current(), p, v)) def rel(self, p, o=None, **kws): """Set an object for the given property. Will convert the given object - into an ``URIRef`` if it's not an ``Identifier``. If none is given, a - new ``BNode`` is used. + into an `URIRef` if it's not an `Identifier`. If none is given, a + new `BNode` is used. - Returns a context manager for use in a ``with`` block, within which the + Returns a context manager for use in a `with` block, within which the given object is used as current subject. - Usage:: - + Example: + ```python >>> from rdflib import URIRef >>> from rdflib.namespace import RDF, RDFS >>> d = Describer(about="/", base="http://example.org/") @@ -183,6 +188,7 @@ def rel(self, p, o=None, **kws): >>> d.graph.value(URIRef('http://example.org/more'), RDFS.label) rdflib.term.Literal('More') + ``` """ kws.setdefault("base", self.base) @@ -193,12 +199,12 @@ def rel(self, p, o=None, **kws): def rev(self, p, s=None, **kws): """ - Same as ``rel``, but uses current subject as *object* of the relation. + Same as `rel`, but uses current subject as *object* of the relation. The given resource is still used as subject in the returned context manager. - Usage:: - + Example: + ```python >>> from rdflib import URIRef >>> from rdflib.namespace import RDF, RDFS >>> d = Describer(about="http://example.org/") @@ -210,6 +216,7 @@ def rev(self, p, s=None, **kws): >>> d.graph.value(URIRef('http://example.net/'), RDFS.label) rdflib.term.Literal('Net') + ``` """ kws.setdefault("base", self.base) p = cast_identifier(p) @@ -218,11 +225,10 @@ def rev(self, p, s=None, **kws): return self._subject_stack(s) def rdftype(self, t): - """ - Shorthand for setting rdf:type of the current subject. - - Usage:: + """Shorthand for setting rdf:type of the current subject. + Example: + ```python >>> from rdflib import URIRef >>> from rdflib.namespace import RDF, RDFS >>> d = Describer(about="http://example.org/") @@ -231,6 +237,7 @@ def rdftype(self, t): ... RDF.type, RDFS.Resource) in d.graph True + ``` """ self.graph.add((self._current(), RDF.type, t)) diff --git a/rdflib/extras/external_graph_libs.py b/rdflib/extras/external_graph_libs.py index 42469778e..5f2fc801c 100644 --- a/rdflib/extras/external_graph_libs.py +++ b/rdflib/extras/external_graph_libs.py @@ -1,18 +1,19 @@ """Convert (to and) from rdflib graphs to other well known graph libraries. Currently the following libraries are supported: + - networkx: MultiDiGraph, DiGraph, Graph - graph_tool: Graph Doctests in this file are all skipped, as we can't run them conditionally if networkx or graph_tool are available and they would err otherwise. -see ../../test/test_extras_external_graph_libs.py for conditional tests +see `../../test/test_extras_external_graph_libs.py` for conditional tests """ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, Dict, List +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from rdflib.graph import Graph @@ -37,16 +38,16 @@ def _rdflib_to_networkx_graph( Modifies nxgraph in-place! - Arguments: + Args: graph: an rdflib.Graph. nxgraph: a networkx.Graph/DiGraph/MultiDigraph. calc_weights: If True adds a 'weight' attribute to each edge according to the count of s,p,o triples between s and o, which is meaningful for Graph/DiGraph. edge_attrs: Callable to construct edge data from s, p, o. - 'triples' attribute is handled specially to be merged. - 'weight' should not be generated if calc_weights==True. - (see invokers below!) + 'triples' attribute is handled specially to be merged. + 'weight' should not be generated if calc_weights==True. + (see invokers below!) transform_s: Callable to transform node generated from s. transform_o: Callable to transform node generated from o. """ @@ -81,44 +82,46 @@ def rdflib_to_networkx_multidigraph( The subjects and objects are the later nodes of the MultiDiGraph. The predicates are used as edge keys (to identify multi-edges). - :Parameters: - - - graph: a rdflib.Graph. - - edge_attrs: Callable to construct later edge_attributes. It receives + Args: + graph: a rdflib.Graph. + edge_attrs: Callable to construct later edge_attributes. It receives 3 variables (s, p, o) and should construct a dictionary that is passed to networkx's add_edge(s, o, \*\*attrs) function. By default this will include setting the MultiDiGraph key=p here. If you don't want to be able to re-identify the edge later on, you - can set this to ``lambda s, p, o: {}``. In this case MultiDiGraph's + can set this to `lambda s, p, o: {}`. In this case MultiDiGraph's default (increasing ints) will be used. Returns: networkx.MultiDiGraph - >>> from rdflib import Graph, URIRef, Literal - >>> g = Graph() - >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') - >>> p, q = URIRef('p'), URIRef('q') - >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] - >>> for t in edges: - ... g.add(t) - ... - >>> mdg = rdflib_to_networkx_multidigraph(g) - >>> len(mdg.edges()) - 4 - >>> mdg.has_edge(a, b) - True - >>> mdg.has_edge(a, b, key=p) - True - >>> mdg.has_edge(a, b, key=q) - True - - >>> mdg = rdflib_to_networkx_multidigraph(g, edge_attrs=lambda s,p,o: {}) - >>> mdg.has_edge(a, b, key=0) - True - >>> mdg.has_edge(a, b, key=1) - True + Example: + ```python + >>> from rdflib import Graph, URIRef, Literal + >>> g = Graph() + >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') + >>> p, q = URIRef('p'), URIRef('q') + >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] + >>> for t in edges: + ... g.add(t) + ... + >>> mdg = rdflib_to_networkx_multidigraph(g) + >>> len(mdg.edges()) + 4 + >>> mdg.has_edge(a, b) + True + >>> mdg.has_edge(a, b, key=p) + True + >>> mdg.has_edge(a, b, key=q) + True + + >>> mdg = rdflib_to_networkx_multidigraph(g, edge_attrs=lambda s,p,o: {}) + >>> mdg.has_edge(a, b, key=0) + True + >>> mdg.has_edge(a, b, key=1) + True + ``` """ import networkx as nx @@ -140,11 +143,10 @@ def rdflib_to_networkx_digraph( all triples between s and o. Also by default calculates the edge weight as the length of triples. - :Parameters: - - - ``graph``: a rdflib.Graph. - - ``calc_weights``: If true calculate multi-graph edge-count as edge 'weight' - - ``edge_attrs``: Callable to construct later edge_attributes. It receives + Args: + graph: a rdflib.Graph. + calc_weights: If true calculate multi-graph edge-count as edge 'weight' + edge_attrs: Callable to construct later edge_attributes. It receives 3 variables (s, p, o) and should construct a dictionary that is passed to networkx's add_edge(s, o, \*\*attrs) function. @@ -152,36 +154,38 @@ def rdflib_to_networkx_digraph( which is treated specially by us to be merged. Other attributes of multi-edges will only contain the attributes of the first edge. If you don't want the 'triples' attribute for tracking, set this to - ``lambda s, p, o: {}``. + `lambda s, p, o: {}`. Returns: networkx.DiGraph - >>> from rdflib import Graph, URIRef, Literal - >>> g = Graph() - >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') - >>> p, q = URIRef('p'), URIRef('q') - >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] - >>> for t in edges: - ... g.add(t) - ... - >>> dg = rdflib_to_networkx_digraph(g) - >>> dg[a][b]['weight'] - 2 - >>> sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)] - True - >>> len(dg.edges()) - 3 - >>> dg.size() - 3 - >>> dg.size(weight='weight') - 4.0 - - >>> dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{}) - >>> 'weight' in dg[a][b] - False - >>> 'triples' in dg[a][b] - False - + Example: + ```python + >>> from rdflib import Graph, URIRef, Literal + >>> g = Graph() + >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') + >>> p, q = URIRef('p'), URIRef('q') + >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] + >>> for t in edges: + ... g.add(t) + ... + >>> dg = rdflib_to_networkx_digraph(g) + >>> dg[a][b]['weight'] + 2 + >>> sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)] + True + >>> len(dg.edges()) + 3 + >>> dg.size() + 3 + >>> dg.size(weight='weight') + 4.0 + + >>> dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{}) + >>> 'weight' in dg[a][b] + False + >>> 'triples' in dg[a][b] + False + ``` """ import networkx as nx @@ -198,53 +202,54 @@ def rdflib_to_networkx_graph( ): r"""Converts the given graph into a networkx.Graph. - As an rdflib.Graph() can contain multiple directed edges between nodes, by - default adds the a 'triples' attribute to the single DiGraph edge with a - list of triples between s and o in graph. - Also by default calculates the edge weight as the len(triples). - - :Parameters: + As an [`rdflib.Graph()`][rdflib.Graph] can contain multiple directed edges between nodes, by + default adds the a 'triples' attribute to the single DiGraph edge with a list of triples between s and o in graph. + Also by default calculates the edge weight as the `len(triples)`. - - graph: a rdflib.Graph. - - calc_weights: If true calculate multi-graph edge-count as edge 'weight' - - edge_attrs: Callable to construct later edge_attributes. It receives - 3 variables (s, p, o) and should construct a dictionary that is - passed to networkx's add_edge(s, o, \*\*attrs) function. + Args: + graph: a rdflib.Graph. + calc_weights: If true calculate multi-graph edge-count as edge 'weight' + edge_attrs: Callable to construct later edge_attributes. It receives + 3 variables (s, p, o) and should construct a dictionary that is + passed to networkx's add_edge(s, o, \*\*attrs) function. - By default this will include setting the 'triples' attribute here, - which is treated specially by us to be merged. Other attributes of - multi-edges will only contain the attributes of the first edge. - If you don't want the 'triples' attribute for tracking, set this to - ``lambda s, p, o: {}``. + By default this will include setting the 'triples' attribute here, + which is treated specially by us to be merged. Other attributes of + multi-edges will only contain the attributes of the first edge. + If you don't want the 'triples' attribute for tracking, set this to + `lambda s, p, o: {}`. Returns: networkx.Graph - >>> from rdflib import Graph, URIRef, Literal - >>> g = Graph() - >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') - >>> p, q = URIRef('p'), URIRef('q') - >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] - >>> for t in edges: - ... g.add(t) - ... - >>> ug = rdflib_to_networkx_graph(g) - >>> ug[a][b]['weight'] - 3 - >>> sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)] - True - >>> len(ug.edges()) - 2 - >>> ug.size() - 2 - >>> ug.size(weight='weight') - 4.0 - - >>> ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{}) - >>> 'weight' in ug[a][b] - False - >>> 'triples' in ug[a][b] - False + Example: + ```python + >>> from rdflib import Graph, URIRef, Literal + >>> g = Graph() + >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') + >>> p, q = URIRef('p'), URIRef('q') + >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] + >>> for t in edges: + ... g.add(t) + ... + >>> ug = rdflib_to_networkx_graph(g) + >>> ug[a][b]['weight'] + 3 + >>> sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)] + True + >>> len(ug.edges()) + 2 + >>> ug.size() + 2 + >>> ug.size(weight='weight') + 4.0 + + >>> ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{}) + >>> 'weight' in ug[a][b] + False + >>> 'triples' in ug[a][b] + False + ``` """ import networkx as nx @@ -255,8 +260,8 @@ def rdflib_to_networkx_graph( def rdflib_to_graphtool( graph: Graph, - v_prop_names: List[str] = ["term"], - e_prop_names: List[str] = ["term"], + v_prop_names: list[str] = ["term"], + e_prop_names: list[str] = ["term"], transform_s=lambda s, p, o: {"term": s}, transform_p=lambda s, p, o: {"term": p}, transform_o=lambda s, p, o: {"term": o}, @@ -266,56 +271,58 @@ def rdflib_to_graphtool( The subjects and objects are the later vertices of the Graph. The predicates become edges. - :Parameters: - - graph: a rdflib.Graph. - - v_prop_names: a list of names for the vertex properties. The default is set - to ['term'] (see transform_s, transform_o below). - - e_prop_names: a list of names for the edge properties. - - transform_s: callable with s, p, o input. Should return a dictionary - containing a value for each name in v_prop_names. By default is set - to {'term': s} which in combination with v_prop_names = ['term'] - adds s as 'term' property to the generated vertex for s. - - transform_p: similar to transform_s, but wrt. e_prop_names. By default - returns {'term': p} which adds p as a property to the generated - edge between the vertex for s and the vertex for o. - - transform_o: similar to transform_s. + Args: + graph: a rdflib.Graph. + v_prop_names: a list of names for the vertex properties. The default is set + to ['term'] (see transform_s, transform_o below). + e_prop_names: a list of names for the edge properties. + transform_s: callable with s, p, o input. Should return a dictionary + containing a value for each name in v_prop_names. By default is set + to {'term': s} which in combination with v_prop_names = ['term'] + adds s as 'term' property to the generated vertex for s. + transform_p: similar to transform_s, but wrt. e_prop_names. By default + returns {'term': p} which adds p as a property to the generated + edge between the vertex for s and the vertex for o. + transform_o: similar to transform_s. Returns: graph_tool.Graph() - >>> from rdflib import Graph, URIRef, Literal - >>> g = Graph() - >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') - >>> p, q = URIRef('p'), URIRef('q') - >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] - >>> for t in edges: - ... g.add(t) - ... - >>> mdg = rdflib_to_graphtool(g) - >>> len(list(mdg.edges())) - 4 - >>> from graph_tool import util as gt_util - >>> vpterm = mdg.vertex_properties['term'] - >>> va = gt_util.find_vertex(mdg, vpterm, a)[0] - >>> vb = gt_util.find_vertex(mdg, vpterm, b)[0] - >>> vl = gt_util.find_vertex(mdg, vpterm, l)[0] - >>> (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())] - True - >>> epterm = mdg.edge_properties['term'] - >>> len(list(gt_util.find_edge(mdg, epterm, p))) == 3 - True - >>> len(list(gt_util.find_edge(mdg, epterm, q))) == 1 - True - - >>> mdg = rdflib_to_graphtool( - ... g, - ... e_prop_names=[str('name')], - ... transform_p=lambda s, p, o: {str('name'): unicode(p)}) - >>> epterm = mdg.edge_properties['name'] - >>> len(list(gt_util.find_edge(mdg, epterm, unicode(p)))) == 3 - True - >>> len(list(gt_util.find_edge(mdg, epterm, unicode(q)))) == 1 - True - + Example: + ```python + >>> from rdflib import Graph, URIRef, Literal + >>> g = Graph() + >>> a, b, l = URIRef('a'), URIRef('b'), Literal('l') + >>> p, q = URIRef('p'), URIRef('q') + >>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)] + >>> for t in edges: + ... g.add(t) + ... + >>> mdg = rdflib_to_graphtool(g) + >>> len(list(mdg.edges())) + 4 + >>> from graph_tool import util as gt_util + >>> vpterm = mdg.vertex_properties['term'] + >>> va = gt_util.find_vertex(mdg, vpterm, a)[0] + >>> vb = gt_util.find_vertex(mdg, vpterm, b)[0] + >>> vl = gt_util.find_vertex(mdg, vpterm, l)[0] + >>> (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())] + True + >>> epterm = mdg.edge_properties['term'] + >>> len(list(gt_util.find_edge(mdg, epterm, p))) == 3 + True + >>> len(list(gt_util.find_edge(mdg, epterm, q))) == 1 + True + + >>> mdg = rdflib_to_graphtool( + ... g, + ... e_prop_names=[str('name')], + ... transform_p=lambda s, p, o: {str('name'): unicode(p)}) + >>> epterm = mdg.edge_properties['name'] + >>> len(list(gt_util.find_edge(mdg, epterm, unicode(p)))) == 3 + True + >>> len(list(gt_util.find_edge(mdg, epterm, unicode(q)))) == 1 + True + ``` """ # pytype error: Can't find module 'graph_tool'. import graph_tool as gt # pytype: disable=import-error @@ -328,7 +335,7 @@ def rdflib_to_graphtool( eprops = [(epn, g.new_edge_property("object")) for epn in e_prop_names] for epn, eprop in eprops: g.edge_properties[epn] = eprop - node_to_vertex: Dict[Any, Any] = {} + node_to_vertex: dict[Any, Any] = {} for s, p, o in graph: sv = node_to_vertex.get(s) if sv is None: diff --git a/rdflib/extras/infixowl.py b/rdflib/extras/infixowl.py index b80fb0c16..1634bcf85 100644 --- a/rdflib/extras/infixowl.py +++ b/rdflib/extras/infixowl.py @@ -1,58 +1,75 @@ """RDFLib Python binding for OWL Abstract Syntax -OWL Constructor DL Syntax Manchester OWL Syntax Example -==================================================================================== -intersectionOf C ∩ D C AND D Human AND Male -unionOf C ∪ D C OR D Man OR Woman -complementOf ¬ C NOT C NOT Male -oneOf {a} ∪ {b}... {a b ...} {England Italy Spain} -someValuesFrom ∃ R C R SOME C hasColleague SOME Professor -allValuesFrom ∀ R C R ONLY C hasColleague ONLY Professor -minCardinality ≥ N R R MIN 3 hasColleague MIN 3 -maxCardinality ≤ N R R MAX 3 hasColleague MAX 3 -cardinality = N R R EXACTLY 3 hasColleague EXACTLY 3 -hasValue ∃ R {a} R VALUE a hasColleague VALUE Matthew - -see: http://www.w3.org/TR/owl-semantics/syntax.html - http://owl-workshop.man.ac.uk/acceptedLong/submission_9.pdf +| OWL Constructor | DL Syntax | Manchester OWL Syntax | Example | +|------------------|---------------|------------------------|----------------------------------| +| `intersectionOf` | C ∩ D | C AND D | Human AND Male | +| `unionOf` | C ∪ D | C OR D | Man OR Woman | +| `complementOf` | ¬C | NOT C | NOT Male | +| `oneOf` | {a} ∪ {b}... | {a b ...} | {England Italy Spain} | +| `someValuesFrom` | ∃ R C | R SOME C | hasColleague SOME Professor | +| `allValuesFrom` | ∀ R C | R ONLY C | hasColleague ONLY Professor | +| `minCardinality` | ≥ N R | R MIN 3 | hasColleague MIN 3 | +| `maxCardinality` | ≤ N R | R MAX 3 | hasColleague MAX 3 | +| `cardinality` | = N R | R EXACTLY 3 | hasColleague EXACTLY 3 | +| `hasValue` | ∃ R.{a} | R VALUE a | hasColleague VALUE Matthew | + +See: +- http://www.w3.org/TR/owl-semantics/syntax.html +- http://owl-workshop.man.ac.uk/acceptedLong/submission_9.pdf 3.2.3 Axioms for complete classes without using owl:equivalentClass Named class description of type 2 (with owl:oneOf) or type 4-6 (with owl:intersectionOf, owl:unionOf or owl:complementOf -Uses Manchester Syntax for __repr__ +Uses Manchester Syntax for `__repr__` +```python >>> exNs = Namespace("http://example.com/") >>> g = Graph() >>> g.bind("ex", exNs, override=False) +``` + Now we have an empty graph, we can construct OWL classes in it using the Python classes defined in this module +```python >>> a = Class(exNs.Opera, graph=g) +``` + Now we can assert rdfs:subClassOf and owl:equivalentClass relationships (in the underlying graph) with other classes using the 'subClassOf' and 'equivalentClass' descriptors which can be set to a list of objects for the corresponding predicates. +```python >>> a.subClassOf = [exNs.MusicalWork] +``` + We can then access the rdfs:subClassOf relationships +```python >>> print(list(a.subClassOf)) [Class: ex:MusicalWork ] +``` + This can also be used against already populated graphs: +```python >>> owlGraph = Graph().parse(str(OWL)) >>> list(Class(OWL.Class, graph=owlGraph).subClassOf) [Class: rdfs:Class ] +``` + Operators are also available. For instance we can add ex:Opera to the extension of the ex:CreativeWork class via the '+=' operator +```python >>> a Class: ex:Opera SubClassOf: ex:MusicalWork >>> b = Class(exNs.CreativeWork, graph=g) @@ -60,29 +77,41 @@ >>> print(sorted(a.subClassOf, key=lambda c:c.identifier)) [Class: ex:CreativeWork , Class: ex:MusicalWork ] +``` + And we can then remove it from the extension as well +```python >>> b -= a >>> a Class: ex:Opera SubClassOf: ex:MusicalWork +``` + Boolean class constructions can also be created with Python operators. For example, The | operator can be used to construct a class consisting of a owl:unionOf the operands: +```python >>> c = a | b | Class(exNs.Work, graph=g) >>> c ( ex:Opera OR ex:CreativeWork OR ex:Work ) +``` + Boolean class expressions can also be operated as lists (using python list operators) +```python >>> del c[c.index(Class(exNs.Work, graph=g))] >>> c ( ex:Opera OR ex:CreativeWork ) +``` + The '&' operator can be used to construct class intersection: +```python >>> woman = Class(exNs.Female, graph=g) & Class(exNs.Human, graph=g) >>> woman.identifier = exNs.Woman >>> woman @@ -90,41 +119,51 @@ >>> len(woman) 2 +``` + Enumerated classes can also be manipulated +```python >>> contList = [Class(exNs.Africa, graph=g), Class(exNs.NorthAmerica, graph=g)] >>> EnumeratedClass(members=contList, graph=g) { ex:Africa ex:NorthAmerica } +``` + owl:Restrictions can also be instantiated: +```python >>> Restriction(exNs.hasParent, graph=g, allValuesFrom=exNs.Human) ( ex:hasParent ONLY ex:Human ) -Restrictions can also be created using Manchester OWL syntax in 'colloquial' -Python +``` + +Restrictions can also be created using Manchester OWL syntax in 'colloquial' Python + +```python >>> exNs.hasParent @ some @ Class(exNs.Physician, graph=g) ( ex:hasParent SOME ex:Physician ) - >>> Property(exNs.hasParent, graph=g) @ max @ Literal(1) ( ex:hasParent MAX 1 ) - >>> print(g.serialize(format='pretty-xml')) # doctest: +SKIP - +``` """ from __future__ import annotations import itertools import logging -from typing import Iterable, Union +from typing import TYPE_CHECKING, Union, cast from rdflib.collection import Collection from rdflib.graph import Graph, _ObjectType from rdflib.namespace import OWL, RDF, RDFS, XSD, Namespace, NamespaceManager -from rdflib.term import BNode, Identifier, Literal, URIRef, Variable +from rdflib.term import BNode, IdentifiedNode, Identifier, Literal, URIRef, Variable from rdflib.util import first +if TYPE_CHECKING: + from collections.abc import Iterable + logger = logging.getLogger(__name__) @@ -134,7 +173,6 @@ Python has the wonderful "in" operator and it would be nice to have additional infix operator like this. This recipe shows how (almost) arbitrary infix operators can be defined. - """ __all__ = [ @@ -368,16 +406,21 @@ def _remover(inst): class Individual: """ A typed individual, the base class of the InfixOWL classes. - """ + # Class variable factoryGraph = Graph() # noqa: N815 + # Instance typing + graph: Graph + __identifier: IdentifiedNode + qname: str | None + def serialize(self, graph): for fact in self.factoryGraph.triples((self.identifier, None, None)): graph.add(fact) - def __init__(self, identifier=None, graph=None): + def __init__(self, identifier: IdentifiedNode | None = None, graph=None): self.__identifier = identifier is not None and identifier or BNode() if graph is None: self.graph = self.factoryGraph @@ -422,6 +465,7 @@ def replace(self, other): causing all triples that refer to it to be changed and then delete the individual. + ```python >>> g = Graph() >>> b = Individual(OWL.Restriction, g) >>> b.type = RDFS.Resource @@ -430,6 +474,8 @@ def replace(self, other): >>> del b.type >>> len(list(b.type)) 0 + + ``` """ for s, p, _o in self.graph.triples((None, None, self.identifier)): self.graph.add((s, p, classOrIdentifier(other))) @@ -452,6 +498,7 @@ def _set_type(self, kind: Union[Individual, Identifier, Iterable[_ObjectType]]): @TermDeletionHelper(RDF.type) def _delete_type(self): """ + ```python >>> g = Graph() >>> b = Individual(OWL.Restriction, g) >>> b.type = RDFS.Resource @@ -460,16 +507,17 @@ def _delete_type(self): >>> del b.type >>> len(list(b.type)) 0 + + ``` """ pass # pragma: no cover type = property(_get_type, _set_type, _delete_type) - def _get_identifier(self) -> Identifier: + def _get_identifier(self) -> IdentifiedNode: return self.__identifier - def _set_identifier(self, i: Identifier): - assert i + def _set_identifier(self, i: IdentifiedNode): if i != self.__identifier: oldstatements_out = [ (p, o) @@ -522,15 +570,13 @@ def _delete_sameAs(self): # noqa: N802 class AnnotatableTerms(Individual): - """ - Terms in an OWL ontology with rdfs:label and rdfs:comment - + """Terms in an OWL ontology with rdfs:label and rdfs:comment - ## Interface with ATTEMPTO (http://attempto.ifi.uzh.ch/site) + Interface with ATTEMPTO (http://attempto.ifi.uzh.ch/site) - ### Verbalisation of OWL entity IRIS + ## Verbalisation of OWL entity IRIS - #### How are OWL entity IRIs verbalized? + ### How are OWL entity IRIs verbalized? The OWL verbalizer maps OWL entity IRIs to ACE content words such that @@ -565,39 +611,38 @@ class AnnotatableTerms(Individual): It is possible to specify the mapping of IRIs to surface forms using the following annotation properties: - .. code-block:: none - - http://attempto.ifi.uzh.ch/ace_lexicon#PN_sg - http://attempto.ifi.uzh.ch/ace_lexicon#CN_sg - http://attempto.ifi.uzh.ch/ace_lexicon#CN_pl - http://attempto.ifi.uzh.ch/ace_lexicon#TV_sg - http://attempto.ifi.uzh.ch/ace_lexicon#TV_pl - http://attempto.ifi.uzh.ch/ace_lexicon#TV_vbg + ``` + http://attempto.ifi.uzh.ch/ace_lexicon#PN_sg + http://attempto.ifi.uzh.ch/ace_lexicon#CN_sg + http://attempto.ifi.uzh.ch/ace_lexicon#CN_pl + http://attempto.ifi.uzh.ch/ace_lexicon#TV_sg + http://attempto.ifi.uzh.ch/ace_lexicon#TV_pl + http://attempto.ifi.uzh.ch/ace_lexicon#TV_vbg + ``` For example, the following axioms state that if the IRI "#man" is used as a plural common noun, then the wordform men must be used by the verbalizer. If, however, it is used as a singular transitive verb, then mans must be used. - .. code-block:: none - - - - #man - men - - - - - #man - mans - - + ```xml + + + #man + men + + + + + #man + mans + + ``` """ def __init__( self, - identifier, + identifier: IdentifiedNode | None, graph=None, nameAnnotation=None, # noqa: N803 nameIsLabel=False, # noqa: N803 @@ -653,10 +698,13 @@ def _get_comment(self): ): yield comment - def _set_comment(self, comment): + def _set_comment( + self, + comment: IdentifiedNode | Literal | list[IdentifiedNode | Literal] | None, + ): if not comment: return - if isinstance(comment, Identifier): + if isinstance(comment, (IdentifiedNode, Literal)): self.graph.add((self.identifier, RDFS.comment, comment)) else: for c in comment: @@ -690,10 +738,12 @@ def _get_label(self): for label in self.graph.objects(subject=self.identifier, predicate=RDFS.label): yield label - def _set_label(self, label): + def _set_label( + self, label: IdentifiedNode | Literal | list[IdentifiedNode | Literal] | None + ): if not label: return - if isinstance(label, Identifier): + if isinstance(label, (IdentifiedNode, Literal)): self.graph.add((self.identifier, RDFS.label, label)) else: for l_ in label: @@ -702,6 +752,7 @@ def _set_label(self, label): @TermDeletionHelper(RDFS.label) def _delete_label(self): """ + ```python >>> g = Graph() >>> b = Individual(OWL.Restriction,g) >>> b.label = Literal('boo') @@ -710,6 +761,8 @@ def _delete_label(self): >>> del b.label >>> len(list(b.label)) 0 + + ``` """ pass # pragma: no cover @@ -869,6 +922,7 @@ def DeepClassClear(class_to_prune): # noqa: N802 Recursively clear the given class, continuing where any related class is an anonymous class + ```python >>> EX = Namespace("http://example.com/") >>> g = Graph() >>> g.bind("ex", EX, override=False) @@ -905,6 +959,8 @@ def DeepClassClear(class_to_prune): # noqa: N802 >>> otherClass.delete() >>> list(g.triples((otherClass.identifier, None, None))) [] + + ``` """ def deepClearIfBNode(_class): # noqa: N802 @@ -933,8 +989,8 @@ def deepClearIfBNode(_class): # noqa: N802 class MalformedClass(ValueError): # noqa: N818 """ - .. deprecated:: TODO-NEXT-VERSION - This class will be removed in version ``7.0.0``. + !!! warning "Deprecated" + This class will be removed in version `7.0.0`. """ pass @@ -983,19 +1039,20 @@ def CastClass(c, graph=None): # noqa: N802 class Class(AnnotatableTerms): - """ - 'General form' for classes: + """'General form' for classes: The Manchester Syntax (supported in Protege) is used as the basis for the form of this class See: http://owl-workshop.man.ac.uk/acceptedLong/submission_9.pdf: + ``` [Annotation] ‘Class:’ classID {Annotation ( (‘SubClassOf:’ ClassExpression) | (‘EquivalentTo’ ClassExpression) | (’DisjointWith’ ClassExpression)) } + ``` Appropriate excerpts from OWL Reference: @@ -1009,7 +1066,6 @@ class Class(AnnotatableTerms): "..An owl:complementOf property links a class to precisely one class description." - """ def _serialize(self, graph): @@ -1045,7 +1101,7 @@ def setupNounAnnotations(self, noun_annotations): # noqa: N802 def __init__( self, - identifier=None, + identifier: IdentifiedNode | None = None, subClassOf=None, # noqa: N803 equivalentClass=None, # noqa: N803 disjointWith=None, # noqa: N803 @@ -1152,6 +1208,7 @@ def __and__(self, other): Chaining 3 intersections + ```python >>> exNs = Namespace("http://example.com/") >>> g = Graph() >>> g.bind("ex", exNs, override=False) @@ -1165,6 +1222,8 @@ def __and__(self, other): True >>> isinstance(youngWoman.identifier, BNode) True + + ``` """ return BooleanClass( operator=OWL.intersectionOf, members=[self, other], graph=self.graph @@ -1174,7 +1233,10 @@ def _get_subclassof(self): for anc in self.graph.objects( subject=self.identifier, predicate=RDFS.subClassOf ): - yield Class(anc, graph=self.graph, skipOWLClassMembership=True) + # We must assume all objects we get back are URIRef or BNodes + yield Class( + cast(IdentifiedNode, anc), graph=self.graph, skipOWLClassMembership=True + ) def _set_subclassof(self, other): if not other: @@ -1194,7 +1256,7 @@ def _get_equivalentclass(self): for ec in self.graph.objects( subject=self.identifier, predicate=OWL.equivalentClass ): - yield Class(ec, graph=self.graph) + yield Class(cast(IdentifiedNode, ec), graph=self.graph) def _set_equivalentclass(self, other): if not other: @@ -1216,7 +1278,7 @@ def _get_disjointwith(self): for dc in self.graph.objects( subject=self.identifier, predicate=OWL.disjointWith ): - yield Class(dc, graph=self.graph) + yield Class(cast(IdentifiedNode, dc), graph=self.graph) def _set_disjointwith(self, other): if not other: @@ -1239,7 +1301,7 @@ def _get_complementof(self): if not comp: return None elif len(comp) == 1: - return Class(comp[0], graph=self.graph) + return Class(cast(IdentifiedNode, comp[0]), graph=self.graph) else: raise Exception(len(comp)) @@ -1261,6 +1323,7 @@ def _get_parents(self): computed attributes that returns a generator over taxonomic 'parents' by disjunction, conjunction, and subsumption + ```python >>> from rdflib.util import first >>> exNs = Namespace('http://example.com/') >>> g = Graph() @@ -1281,6 +1344,7 @@ def _get_parents(self): >>> list(father.parents) [Class: ex:Parent , Class: ex:Male ] + ``` """ for parent in itertools.chain(self.subClassOf, self.equivalentClass): yield parent @@ -1310,7 +1374,7 @@ def isPrimitive(self): # noqa: N802 # sc = list(self.subClassOf) ec = list(self.equivalentClass) for _boolclass, p, rdf_list in self.graph.triples_choices( - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, List[URIRef], None]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, list[URIRef], None]"; expected "Union[Tuple[List[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" (self.identifier, [OWL.intersectionOf, OWL.unionOf], None) # type: ignore[arg-type] ): ec.append(manchesterSyntax(rdf_list, self.graph, boolean=p)) @@ -1340,7 +1404,7 @@ def manchesterClass(self, full=False, normalization=True): # noqa: N802 sc = list(self.subClassOf) ec = list(self.equivalentClass) for _boolclass, p, rdf_list in self.graph.triples_choices( - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, List[URIRef], None]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, list[URIRef], None]"; expected "Union[Tuple[List[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" (self.identifier, [OWL.intersectionOf, OWL.unionOf], None) # type: ignore[arg-type] ): ec.append(manchesterSyntax(rdf_list, self.graph, boolean=p)) @@ -1349,10 +1413,8 @@ def manchesterClass(self, full=False, normalization=True): # noqa: N802 if c: dc.append(c) klasskind = "" - label = list(self.graph.objects(self.identifier, RDFS.label)) - # type error: Incompatible types in assignment (expression has type "str", variable has type "List[Node]") - # type error: Unsupported operand types for + ("str" and "Node") - label = label and "(" + label[0] + ")" or "" # type: ignore[assignment, operator] + label_list = list(self.graph.objects(self.identifier, RDFS.label)) + label = "" if len(label_list) < 1 else "(" + label_list[0] + ")" if sc: if full: scjoin = "\n " @@ -1494,14 +1556,14 @@ def __iadd__(self, other): class EnumeratedClass(OWLRDFListProxy, Class): - """ - Class for owl:oneOf forms: + """Class for owl:oneOf forms: OWL Abstract Syntax is used axiom ::= 'EnumeratedClass(' classID ['Deprecated'] { annotation } { individualID } ')' + ```python >>> exNs = Namespace("http://example.com/") >>> g = Graph() >>> g.bind("ex", exNs, override=False) @@ -1525,6 +1587,8 @@ class EnumeratedClass(OWLRDFListProxy, Class): owl:oneOf ( ex:chime ex:uche ex:ejike ) . + + ``` """ _operator = OWL.oneOf @@ -1564,6 +1628,7 @@ def serialize(self, graph): class BooleanClassExtentHelper: """ + ```python >>> testGraph = Graph() >>> Individual.factoryGraph = testGraph >>> EX = Namespace("http://example.com/") @@ -1579,6 +1644,8 @@ class BooleanClassExtentHelper: >>> for c in BooleanClass.getUnions(): ... print(c) #doctest: +SKIP ( ex:Fire OR ex:Water ) + + ``` """ def __init__(self, operator): @@ -1605,7 +1672,6 @@ class BooleanClass(OWLRDFListProxy, Class): See: http://www.w3.org/TR/owl-ref/#Boolean owl:complementOf is an attribute of Class, however - """ @BooleanClassExtentHelper(OWL.intersectionOf) @@ -1672,6 +1738,7 @@ def changeOperator(self, newOperator): # noqa: N802, N803 Converts a unionOf / intersectionOf class expression into one that instead uses the given operator + ```python >>> testGraph = Graph() >>> Individual.factoryGraph = testGraph >>> EX = Namespace("http://example.com/") @@ -1690,6 +1757,7 @@ def changeOperator(self, newOperator): # noqa: N802, N803 ... print(e) # doctest: +SKIP The new operator is already being used! + ``` """ assert newOperator != self._operator, "The new operator is already being used!" self.graph.remove((self.identifier, self._operator, self._rdfList.uri)) @@ -1720,20 +1788,20 @@ def AllDifferent(members): # noqa: N802 TODO: implement this function DisjointClasses(' description description { description } ')' - """ pass # pragma: no cover class Restriction(Class): """ + ``` restriction ::= 'restriction(' datavaluedPropertyID dataRestrictionComponent { dataRestrictionComponent } ')' | 'restriction(' individualvaluedPropertyID individualRestrictionComponent { individualRestrictionComponent } ')' - + ``` """ restrictionKinds = [ # noqa: N815 @@ -1748,27 +1816,34 @@ class Restriction(Class): def __init__( self, onProperty, # noqa: N803 - graph=None, - allValuesFrom=None, # noqa: N803 - someValuesFrom=None, # noqa: N803 - value=None, - cardinality=None, - maxCardinality=None, # noqa: N803 - minCardinality=None, # noqa: N803 - identifier=None, + graph: Graph | None = None, + allValuesFrom: ( # noqa: N803 + IdentifiedNode | Literal | Class | bool | None + ) = None, + someValuesFrom: ( # noqa: N803 + IdentifiedNode | Literal | Class | bool | None + ) = None, + value: IdentifiedNode | Literal | Class | bool | None = None, + cardinality: IdentifiedNode | Literal | Class | bool | None = None, + maxCardinality: ( # noqa: N803 + IdentifiedNode | Literal | Class | bool | None + ) = None, + minCardinality: ( # noqa: N803 + IdentifiedNode | Literal | Class | bool | None + ) = None, + identifier: IdentifiedNode | None = None, ): graph = Graph() if graph is None else graph super(Restriction, self).__init__( identifier, graph=graph, skipOWLClassMembership=True ) + self_id_node: IdentifiedNode = self.identifier if ( - self.identifier, + self_id_node, OWL.onProperty, propertyOrIdentifier(onProperty), ) not in graph: - graph.add( - (self.identifier, OWL.onProperty, propertyOrIdentifier(onProperty)) - ) + graph.add((self_id_node, OWL.onProperty, propertyOrIdentifier(onProperty))) self.onProperty = onProperty restr_types = [ (allValuesFrom, OWL.allValuesFrom), @@ -1787,7 +1862,7 @@ def __init__( ) restriction_range, restriction_type = valid_restr_props.pop() self.restrictionType = restriction_type - if isinstance(restriction_range, Identifier): + if isinstance(restriction_range, (IdentifiedNode, Literal)): self.restrictionRange = restriction_range elif isinstance(restriction_range, Class): self.restrictionRange = classOrIdentifier(restriction_range) @@ -1795,21 +1870,22 @@ def __init__( # error: Incompatible types in assignment (expression has type "Optional[Identifier]", variable has type "Identifier") self.restrictionRange = first( # type: ignore[assignment] # type error: Argument 1 to "first" has incompatible type "Generator[Node, None, None]"; expected "Iterable[Identifier]" - self.graph.objects(self.identifier, restriction_type) # type: ignore[arg-type] + self.graph.objects(self_id_node, restriction_type) # type: ignore[arg-type] ) if ( - self.identifier, + self_id_node, restriction_type, self.restrictionRange, ) not in self.graph: - self.graph.add((self.identifier, restriction_type, self.restrictionRange)) - assert self.restrictionRange is not None, Class(self.identifier) - if (self.identifier, RDF.type, OWL.Restriction) not in self.graph: - self.graph.add((self.identifier, RDF.type, OWL.Restriction)) - self.graph.remove((self.identifier, RDF.type, OWL.Class)) + self.graph.add((self_id_node, restriction_type, self.restrictionRange)) + assert self.restrictionRange is not None, Class(self_id_node) + if (self_id_node, RDF.type, OWL.Restriction) not in self.graph: + self.graph.add((self_id_node, RDF.type, OWL.Restriction)) + self.graph.remove((self_id_node, RDF.type, OWL.Class)) def serialize(self, graph): """ + ```python >>> g1 = Graph() >>> g2 = Graph() >>> EX = Namespace("http://example.com/") @@ -1829,6 +1905,8 @@ def serialize(self, graph): ... ) #doctest: +NORMALIZE_WHITESPACE +SKIP [rdflib.term.URIRef( 'http://www.w3.org/2002/07/owl#DatatypeProperty')] + + ``` """ Property(self.onProperty, graph=self.graph, baseType=None).serialize(graph) for s, p, o in self.graph.triples((self.identifier, None, None)): @@ -1883,7 +1961,7 @@ def _get_allvaluesfrom(self): for i in self.graph.objects( subject=self.identifier, predicate=OWL.allValuesFrom ): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_allvaluesfrom(self, other): @@ -1907,7 +1985,7 @@ def _get_somevaluesfrom(self): for i in self.graph.objects( subject=self.identifier, predicate=OWL.someValuesFrom ): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_somevaluesfrom(self, other): @@ -1929,7 +2007,7 @@ def _del_somevaluesfrom(self): def _get_hasvalue(self): for i in self.graph.objects(subject=self.identifier, predicate=OWL.hasValue): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_hasvalue(self, other): @@ -1949,7 +2027,7 @@ def _del_hasvalue(self): def _get_cardinality(self): for i in self.graph.objects(subject=self.identifier, predicate=OWL.cardinality): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_cardinality(self, other): @@ -1971,7 +2049,7 @@ def _get_maxcardinality(self): for i in self.graph.objects( subject=self.identifier, predicate=OWL.maxCardinality ): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_maxcardinality(self, other): @@ -1995,7 +2073,7 @@ def _get_mincardinality(self): for i in self.graph.objects( subject=self.identifier, predicate=OWL.minCardinality ): - return Class(i, graph=self.graph) + return Class(cast(IdentifiedNode, i), graph=self.graph) return None def _set_mincardinality(self, other): @@ -2016,12 +2094,12 @@ def _del_mincardinality(self): ) def restrictionKind(self): # noqa: N802 + self_id_node: IdentifiedNode = self.identifier for s, p, o in self.graph.triples_choices( - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, List[URIRef], None]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" - (self.identifier, self.restrictionKinds, None) # type: ignore[arg-type] + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type + (self_id_node, self.restrictionKinds, None) # type: ignore[arg-type] ): - # type error: "Node" has no attribute "split" - return p.split(str(OWL))[-1] # type: ignore[attr-defined] + return p.split(str(OWL))[-1] return None def __repr__(self): @@ -2061,6 +2139,7 @@ def __repr__(self): class Property(AnnotatableTerms): """ + ``` axiom ::= 'DatatypeProperty(' datavaluedPropertyID ['Deprecated'] { annotation } { 'super(' datavaluedPropertyID ')'} ['Functional'] @@ -2073,25 +2152,21 @@ class Property(AnnotatableTerms): 'Functional' 'InverseFunctional' | 'Transitive' ] { 'domain(' description ')' } { 'range(' description ')' } ') - + ``` """ def setupVerbAnnotations(self, verb_annotations): # noqa: N802 - """ - - OWL properties map to ACE transitive verbs (TV) + """OWL properties map to ACE transitive verbs (TV) There are 6 morphological categories that determine the surface form of an IRI: - singular form of a transitive verb (e.g. mans) - plural form of a transitive verb (e.g. man) - past participle form a transitive verb (e.g. manned) - - http://attempto.ifi.uzh.ch/ace_lexicon#TV_sg - http://attempto.ifi.uzh.ch/ace_lexicon#TV_pl - http://attempto.ifi.uzh.ch/ace_lexicon#TV_vbg - + - singular form of a transitive verb (e.g. mans) + - plural form of a transitive verb (e.g. man) + - past participle form a transitive verb (e.g. manned) + - http://attempto.ifi.uzh.ch/ace_lexicon#TV_sg + - http://attempto.ifi.uzh.ch/ace_lexicon#TV_pl + - http://attempto.ifi.uzh.ch/ace_lexicon#TV_vbg """ if isinstance(verb_annotations, tuple): @@ -2190,7 +2265,7 @@ def __repr__(self): ) ) for _s, _p, roletype in self.graph.triples_choices( - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, URIRef, List[URIRef]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Any, URIRef, list[URIRef]]"; expected "Union[Tuple[List[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" ( # type: ignore[arg-type] self.identifier, RDF.type, @@ -2201,8 +2276,7 @@ def __repr__(self): ], ) ): - # type error: "Node" has no attribute "split" - rt.append(str(roletype.split(str(OWL))[-1])) # type: ignore[attr-defined] + rt.append(str(roletype).split(str(OWL), 1)[-1]) else: rt.append( "DatatypeProperty( %s %s" @@ -2296,7 +2370,7 @@ def _del_inverseof(self): def _get_domain(self): for dom in self.graph.objects(subject=self.identifier, predicate=RDFS.domain): - yield Class(dom, graph=self.graph) + yield Class(cast(IdentifiedNode, dom), graph=self.graph) def _set_domain(self, other): if not other: @@ -2315,7 +2389,7 @@ def _del_domain(self): def _get_range(self): for ran in self.graph.objects(subject=self.identifier, predicate=RDFS.range): - yield Class(ran, graph=self.graph) + yield Class(cast(IdentifiedNode, ran), graph=self.graph) def _set_range(self, ranges): if not ranges: diff --git a/rdflib/extras/shacl.py b/rdflib/extras/shacl.py index 30fdab07b..1330a16ac 100644 --- a/rdflib/extras/shacl.py +++ b/rdflib/extras/shacl.py @@ -4,34 +4,49 @@ from __future__ import annotations -from typing import Optional, Union +from typing import TYPE_CHECKING -from rdflib import Graph, Literal, URIRef, paths +from rdflib import BNode, Graph, Literal, URIRef, paths +from rdflib.collection import Collection from rdflib.namespace import RDF, SH from rdflib.paths import Path -from rdflib.term import Node + +if TYPE_CHECKING: + from rdflib.graph import _ObjectType + from rdflib.term import IdentifiedNode class SHACLPathError(Exception): pass +# Map the variable length path operators to the corresponding SHACL path predicates +_PATH_MOD_TO_PRED = { + paths.ZeroOrMore: SH.zeroOrMorePath, + paths.OneOrMore: SH.oneOrMorePath, + paths.ZeroOrOne: SH.zeroOrOnePath, +} + + # This implementation is roughly based on # pyshacl.helper.sparql_query_helper::SPARQLQueryHelper._shacl_path_to_sparql_path def parse_shacl_path( shapes_graph: Graph, - path_identifier: Node, -) -> Union[URIRef, Path]: + path_identifier: _ObjectType, +) -> URIRef | Path: """ Parse a valid SHACL path (e.g. the object of a triple with predicate sh:path) - from a :class:`~rdflib.graph.Graph` as a :class:`~rdflib.term.URIRef` if the path - is simply a predicate or a :class:`~rdflib.paths.Path` otherwise. + from a [`Graph`][rdflib.graph.Graph] as a [`URIRef`][rdflib.term.URIRef] if the path + is simply a predicate or a [`Path`][rdflib.paths.Path] otherwise. - :param shapes_graph: A :class:`~rdflib.graph.Graph` containing the path to be parsed - :param path_identifier: A :class:`~rdflib.term.Node` of the path - :return: A :class:`~rdflib.term.URIRef` or a :class:`~rdflib.paths.Path` + Args: + shapes_graph: A [`Graph`][rdflib.graph.Graph] containing the path to be parsed + path_identifier: A [`Node`][rdflib.term.Node] of the path + + Returns: + A [`URIRef`][rdflib.term.URIRef] or a [`Path`][rdflib.paths.Path] """ - path: Optional[Union[URIRef, Path]] = None + path: URIRef | Path | None = None # Literals are not allowed. if isinstance(path_identifier, Literal): @@ -91,3 +106,116 @@ def parse_shacl_path( raise SHACLPathError(f"Cannot parse {repr(path_identifier)} as a SHACL Path.") return path + + +def _build_path_component( + graph: Graph, path_component: URIRef | Path +) -> IdentifiedNode: + """ + Helper method that implements the recursive component of SHACL path + triple construction. + + Args: + graph: A [`Graph`][rdflib.graph.Graph] into which to insert triples + graph_component: A [`URIRef`][rdflib.term.URIRef] or + [`Path`][rdflib.paths.Path] that is part of a path expression + + Returns: + The [`IdentifiedNode`][rdflib.term.IdentifiedNode] of the resource in the + graph that corresponds to the provided path_component + """ + # Literals or other types are not allowed + if not isinstance(path_component, (URIRef, Path)): + raise TypeError( + f"Objects of type {type(path_component)} are not valid " + + "components of a SHACL path." + ) + + # If the path component is a URI, return it + elif isinstance(path_component, URIRef): + return path_component + # Otherwise, the path component is represented as a blank node + bnode = BNode() + + # Handle Sequence Paths + if isinstance(path_component, paths.SequencePath): + # Sequence paths are a Collection directly with at least two items + if len(path_component.args) < 2: + raise SHACLPathError( + "A list of SHACL Sequence Paths must contain at least two path items." + ) + Collection( + graph, + bnode, + [_build_path_component(graph, arg) for arg in path_component.args], + ) + + # Handle Inverse Paths + elif isinstance(path_component, paths.InvPath): + graph.add( + (bnode, SH.inversePath, _build_path_component(graph, path_component.arg)) + ) + + # Handle Alternative Paths + elif isinstance(path_component, paths.AlternativePath): + # Alternative paths are a Collection but referenced by sh:alternativePath + # with at least two items + if len(path_component.args) < 2: + raise SHACLPathError( + "List of SHACL alternate paths must have at least two path items." + ) + coll = Collection( + graph, + BNode(), + [_build_path_component(graph, arg) for arg in path_component.args], + ) + graph.add((bnode, SH.alternativePath, coll.uri)) + + # Handle Variable Length Paths + elif isinstance(path_component, paths.MulPath): + # Get the predicate corresponding to the path modifiier + pred = _PATH_MOD_TO_PRED.get(path_component.mod) + if pred is None: + raise SHACLPathError(f"Unknown path modifier {path_component.mod}") + graph.add((bnode, pred, _build_path_component(graph, path_component.path))) + + # Return the blank node created for the provided path_component + return bnode + + +def build_shacl_path( + path: URIRef | Path, target_graph: Graph | None = None +) -> tuple[IdentifiedNode, Graph | None]: + """ + Build the SHACL Path triples for a path given by a [`URIRef`][rdflib.term.URIRef] for + simple paths or a [`Path`][rdflib.paths.Path] for complex paths. + + Returns an [`IdentifiedNode`][rdflib.term.IdentifiedNode] for the path (which should be + the object of a triple with predicate `sh:path`) and the graph into which any + new triples were added. + + Args: + path: A [`URIRef`][rdflib.term.URIRef] or a [`Path`][rdflib.paths.Path] + target_graph: Optionally, a [`Graph`][rdflib.graph.Graph] into which to put + constructed triples. If not provided, a new graph will be created + + Returns: + A (path_identifier, graph) tuple where: + - path_identifier: If path is a [`URIRef`][rdflib.term.URIRef], this is simply + the provided path. If path is a [`Path`][rdflib.paths.Path], this is + the [`BNode`][rdflib.term.BNode] corresponding to the root of the SHACL + path expression added to the graph. + - graph: None if path is a [`URIRef`][rdflib.term.URIRef] (as no new triples + are constructed). If path is a [`Path`][rdflib.paths.Path], this is either the + target_graph provided or a new graph into which the path triples were added. + """ + # If a path is a URI, that's the whole path. No graph needs to be constructed. + if isinstance(path, URIRef): + return path, None + + # Create a graph if one was not provided + if target_graph is None: + target_graph = Graph() + + # Recurse through the path to build the graph representation + return _build_path_component(target_graph, path), target_graph diff --git a/rdflib/graph.py b/rdflib/graph.py index 80ccc3fa8..7a4fb910a 100644 --- a/rdflib/graph.py +++ b/rdflib/graph.py @@ -1,40 +1,36 @@ -"""\ - +""" RDFLib defines the following kinds of Graphs: -* :class:`~rdflib.graph.Graph` -* :class:`~rdflib.graph.QuotedGraph` -* :class:`~rdflib.graph.ConjunctiveGraph` -* :class:`~rdflib.graph.Dataset` +* [`Graph`][rdflib.graph.Graph] +* [`QuotedGraph`][rdflib.graph.QuotedGraph] +* [`ConjunctiveGraph`][rdflib.graph.ConjunctiveGraph] +* [`Dataset`][rdflib.graph.Dataset] -Graph ------ +## Graph -An RDF graph is a set of RDF triples. Graphs support the python ``in`` +An RDF graph is a set of RDF triples. Graphs support the python `in` operator, as well as iteration and some operations like union, difference and intersection. -see :class:`~rdflib.graph.Graph` +See [`Graph`][rdflib.graph.Graph] -Conjunctive Graph ------------------ +## Conjunctive Graph -.. warning:: - ConjunctiveGraph is deprecated, use :class:`~rdflib.graph.Dataset` instead. +!!! warning "Deprecation notice" + `ConjunctiveGraph` is deprecated, use [`Dataset`][rdflib.graph.Dataset] instead. A Conjunctive Graph is the most relevant collection of graphs that are -considered to be the boundary for closed world assumptions. This +considered to be the boundary for closed world assumptions. This boundary is equivalent to that of the store instance (which is itself uniquely identified and distinct from other instances of -:class:`~rdflib.store.Store` that signify other Conjunctive Graphs). It is +[`Store`][rdflib.store.Store] that signify other Conjunctive Graphs). It is equivalent to all the named graphs within it and associated with a -``_default_`` graph which is automatically assigned a -:class:`~rdflib.term.BNode` for an identifier - if one isn't given. +`_default_` graph which is automatically assigned a +[`BNode`][rdflib.term.BNode] for an identifier - if one isn't given. -see :class:`~rdflib.graph.ConjunctiveGraph` +See [`ConjunctiveGraph`][rdflib.graph.ConjunctiveGraph] -Quoted graph ------------- +## Quoted graph The notion of an RDF graph [14] is extended to include the concept of a formula node. A formula node may occur wherever any other kind of @@ -48,10 +44,9 @@ This is intended to map the idea of "{ N3-expression }" that is used by N3 into an RDF graph upon which RDF semantics is defined. -see :class:`~rdflib.graph.QuotedGraph` +See [`QuotedGraph`][rdflib.graph.QuotedGraph] -Dataset -------- +## Dataset The RDF 1.1 Dataset, a small extension to the Conjunctive Graph. The primary term is "graphs in the datasets" and not "contexts with quads" @@ -62,73 +57,84 @@ at creation time). This implementation includes a convenience method to directly add a single quad to a dataset graph. -see :class:`~rdflib.graph.Dataset` +See [`Dataset`][rdflib.graph.Dataset] -Working with graphs -=================== +## Working with graphs Instantiating Graphs with default store (Memory) and default identifier (a BNode): - >>> g = Graph() - >>> g.store.__class__ - - >>> g.identifier.__class__ - +```python +>>> g = Graph() +>>> g.store.__class__ + +>>> g.identifier.__class__ + + +``` Instantiating Graphs with a Memory store and an identifier - : - >>> g = Graph('Memory', URIRef("https://rdflib.github.io")) - >>> g.identifier - rdflib.term.URIRef('https://rdflib.github.io') - >>> str(g) # doctest: +NORMALIZE_WHITESPACE - " a rdfg:Graph;rdflib:storage - [a rdflib:Store;rdfs:label 'Memory']." +```python +>>> g = Graph('Memory', URIRef("https://rdflib.github.io")) +>>> g.identifier +rdflib.term.URIRef('https://rdflib.github.io') +>>> str(g) # doctest: +NORMALIZE_WHITESPACE +" a rdfg:Graph;rdflib:storage + [a rdflib:Store;rdfs:label 'Memory']." + +``` Creating a ConjunctiveGraph - The top level container for all named Graphs in a "database": - >>> g = ConjunctiveGraph() - >>> str(g.default_context) - "[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']]." +```python +>>> g = ConjunctiveGraph() +>>> str(g.default_context) +"[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']]." + +``` Adding / removing reified triples to Graph and iterating over it directly or via triple pattern: - >>> g = Graph() - >>> statementId = BNode() - >>> print(len(g)) - 0 - >>> g.add((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS - )> - >>> g.add((statementId, RDF.subject, - ... URIRef("https://rdflib.github.io/store/ConjunctiveGraph"))) # doctest: +ELLIPSIS - )> - >>> g.add((statementId, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS - )> - >>> g.add((statementId, RDF.object, Literal("Conjunctive Graph"))) # doctest: +ELLIPSIS - )> - >>> print(len(g)) - 4 - >>> for s, p, o in g: - ... print(type(s)) - ... - - - - - - >>> for s, p, o in g.triples((None, RDF.object, None)): - ... print(o) - ... - Conjunctive Graph - >>> g.remove((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS - )> - >>> print(len(g)) - 3 +```python +>>> g = Graph() +>>> statementId = BNode() +>>> print(len(g)) +0 +>>> g.add((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS +)> +>>> g.add((statementId, RDF.subject, +... URIRef("https://rdflib.github.io/store/ConjunctiveGraph"))) # doctest: +ELLIPSIS +)> +>>> g.add((statementId, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS +)> +>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph"))) # doctest: +ELLIPSIS +)> +>>> print(len(g)) +4 +>>> for s, p, o in g: +... print(type(s)) +... + + + + + +>>> for s, p, o in g.triples((None, RDF.object, None)): +... print(o) +... +Conjunctive Graph +>>> g.remove((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS +)> +>>> print(len(g)) +3 + +``` -``None`` terms in calls to :meth:`~rdflib.graph.Graph.triples` can be +`None` terms in calls to [`triples()`][rdflib.graph.Graph.triples] can be thought of as "open variables". Graph support set-theoretic operators, you can add/subtract graphs, as @@ -138,113 +144,126 @@ Note that BNode IDs are kept when doing set-theoretic operations, this may or may not be what you want. Two named graphs within the same application probably want share BNode IDs, two graphs with data from -different sources probably not. If your BNode IDs are all generated +different sources probably not. If your BNode IDs are all generated by RDFLib they are UUIDs and unique. - >>> g1 = Graph() - >>> g2 = Graph() - >>> u = URIRef("http://example.com/foo") - >>> g1.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS - )> - >>> g1.add([u, namespace.RDFS.label, Literal("bar")]) # doctest: +ELLIPSIS - )> - >>> g2.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS - )> - >>> g2.add([u, namespace.RDFS.label, Literal("bing")]) # doctest: +ELLIPSIS - )> - >>> len(g1 + g2) # adds bing as label - 3 - >>> len(g1 - g2) # removes foo - 1 - >>> len(g1 * g2) # only foo - 1 - >>> g1 += g2 # now g1 contains everything - +```python +>>> g1 = Graph() +>>> g2 = Graph() +>>> u = URIRef("http://example.com/foo") +>>> g1.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS +)> +>>> g1.add([u, namespace.RDFS.label, Literal("bar")]) # doctest: +ELLIPSIS +)> +>>> g2.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS +)> +>>> g2.add([u, namespace.RDFS.label, Literal("bing")]) # doctest: +ELLIPSIS +)> +>>> len(g1 + g2) # adds bing as label +3 +>>> len(g1 - g2) # removes foo +1 +>>> len(g1 * g2) # only foo +1 +>>> g1 += g2 # now g1 contains everything + +``` Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within the same store: - >>> store = plugin.get("Memory", Store)() - >>> g1 = Graph(store) - >>> g2 = Graph(store) - >>> g3 = Graph(store) - >>> stmt1 = BNode() - >>> stmt2 = BNode() - >>> stmt3 = BNode() - >>> g1.add((stmt1, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS - )> - >>> g1.add((stmt1, RDF.subject, - ... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS - )> - >>> g1.add((stmt1, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS - )> - >>> g1.add((stmt1, RDF.object, Literal('Conjunctive Graph'))) # doctest: +ELLIPSIS - )> - >>> g2.add((stmt2, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS - )> - >>> g2.add((stmt2, RDF.subject, - ... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS - )> - >>> g2.add((stmt2, RDF.predicate, RDF.type)) # doctest: +ELLIPSIS - )> - >>> g2.add((stmt2, RDF.object, namespace.RDFS.Class)) # doctest: +ELLIPSIS - )> - >>> g3.add((stmt3, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS - )> - >>> g3.add((stmt3, RDF.subject, - ... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS - )> - >>> g3.add((stmt3, RDF.predicate, namespace.RDFS.comment)) # doctest: +ELLIPSIS - )> - >>> g3.add((stmt3, RDF.object, Literal( - ... 'The top-level aggregate graph - The sum ' + - ... 'of all named graphs within a Store'))) # doctest: +ELLIPSIS - )> - >>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement))) - 3 - >>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects( - ... RDF.type, RDF.Statement))) - 2 - -ConjunctiveGraphs have a :meth:`~rdflib.graph.ConjunctiveGraph.quads` method +```python +>>> store = plugin.get("Memory", Store)() +>>> g1 = Graph(store) +>>> g2 = Graph(store) +>>> g3 = Graph(store) +>>> stmt1 = BNode() +>>> stmt2 = BNode() +>>> stmt3 = BNode() +>>> g1.add((stmt1, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS +)> +>>> g1.add((stmt1, RDF.subject, +... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS +)> +>>> g1.add((stmt1, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS +)> +>>> g1.add((stmt1, RDF.object, Literal('Conjunctive Graph'))) # doctest: +ELLIPSIS +)> +>>> g2.add((stmt2, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS +)> +>>> g2.add((stmt2, RDF.subject, +... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS +)> +>>> g2.add((stmt2, RDF.predicate, RDF.type)) # doctest: +ELLIPSIS +)> +>>> g2.add((stmt2, RDF.object, namespace.RDFS.Class)) # doctest: +ELLIPSIS +)> +>>> g3.add((stmt3, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS +)> +>>> g3.add((stmt3, RDF.subject, +... URIRef('https://rdflib.github.io/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS +)> +>>> g3.add((stmt3, RDF.predicate, namespace.RDFS.comment)) # doctest: +ELLIPSIS +)> +>>> g3.add((stmt3, RDF.object, Literal( +... 'The top-level aggregate graph - The sum ' + +... 'of all named graphs within a Store'))) # doctest: +ELLIPSIS +)> +>>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement))) +3 +>>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects( +... RDF.type, RDF.Statement))) +2 + +``` + +ConjunctiveGraphs have a [`quads()`][rdflib.graph.ConjunctiveGraph.quads] method which returns quads instead of triples, where the fourth item is the Graph (or subclass thereof) instance in which the triple was asserted: - >>> uniqueGraphNames = set( - ... [graph.identifier for s, p, o, graph in ConjunctiveGraph(store - ... ).quads((None, RDF.predicate, None))]) - >>> len(uniqueGraphNames) - 3 - >>> unionGraph = ReadOnlyGraphAggregate([g1, g2]) - >>> uniqueGraphNames = set( - ... [graph.identifier for s, p, o, graph in unionGraph.quads( - ... (None, RDF.predicate, None))]) - >>> len(uniqueGraphNames) - 2 - -Parsing N3 from a string - - >>> g2 = Graph() - >>> src = ''' - ... @prefix rdf: . - ... @prefix rdfs: . - ... [ a rdf:Statement ; - ... rdf:subject ; - ... rdf:predicate rdfs:label; - ... rdf:object "Conjunctive Graph" ] . - ... ''' - >>> g2 = g2.parse(data=src, format="n3") - >>> print(len(g2)) - 4 +```python +>>> uniqueGraphNames = set( +... [graph.identifier for s, p, o, graph in ConjunctiveGraph(store +... ).quads((None, RDF.predicate, None))]) +>>> len(uniqueGraphNames) +3 +>>> unionGraph = ReadOnlyGraphAggregate([g1, g2]) +>>> uniqueGraphNames = set( +... [graph.identifier for s, p, o, graph in unionGraph.quads( +... (None, RDF.predicate, None))]) +>>> len(uniqueGraphNames) +2 + +``` + +Parsing N3 from a string: + +```python +>>> g2 = Graph() +>>> src = ''' +... @prefix rdf: . +... @prefix rdfs: . +... [ a rdf:Statement ; +... rdf:subject ; +... rdf:predicate rdfs:label; +... rdf:object "Conjunctive Graph" ] . +... ''' +>>> g2 = g2.parse(data=src, format="n3") +>>> print(len(g2)) +4 + +``` Using Namespace class: - >>> RDFLib = Namespace("https://rdflib.github.io/") - >>> RDFLib.ConjunctiveGraph - rdflib.term.URIRef('https://rdflib.github.io/ConjunctiveGraph') - >>> RDFLib["Graph"] - rdflib.term.URIRef('https://rdflib.github.io/Graph') +```python +>>> RDFLib = Namespace("https://rdflib.github.io/") +>>> RDFLib.ConjunctiveGraph +rdflib.term.URIRef('https://rdflib.github.io/ConjunctiveGraph') +>>> RDFLib["Graph"] +rdflib.term.URIRef('https://rdflib.github.io/Graph') +``` """ from __future__ import annotations @@ -259,18 +278,9 @@ TYPE_CHECKING, Any, BinaryIO, - Callable, - Dict, - Generator, - Iterable, - List, - Mapping, NoReturn, Optional, - Set, TextIO, - Tuple, - Type, TypeVar, Union, cast, @@ -279,12 +289,12 @@ from urllib.parse import urlparse from urllib.request import url2pathname +import rdflib.collection # avoid circular dependency import rdflib.exceptions as exceptions import rdflib.namespace as namespace # noqa: F401 # This is here because it is used in a docstring. import rdflib.plugin as plugin -import rdflib.query as query +import rdflib.query import rdflib.util # avoid circular dependency -from rdflib.collection import Collection from rdflib.exceptions import ParserError from rdflib.namespace import RDF, Namespace, NamespaceManager from rdflib.parser import InputSource, Parser, create_input_source @@ -301,64 +311,83 @@ Node, RDFLibGenid, URIRef, + Variable, ) if TYPE_CHECKING: + from collections.abc import Callable, Generator, Iterable, Mapping + import typing_extensions as te - import rdflib.query from rdflib.plugins.sparql.sparql import Query, Update -_SubjectType = Node -_PredicateType = Node -_ObjectType = Node -_ContextIdentifierType = IdentifiedNode +# RDFLib official stance is Subject can be a Literal +# If this ever changes, this part will be one of the first lines to modify. +_SubjectType: te.TypeAlias = Union[IdentifiedNode, Literal, Variable] +_PredicateType: te.TypeAlias = Union[IdentifiedNode, Variable] +_ObjectType: te.TypeAlias = Union[IdentifiedNode, Literal, Variable] +_ContextIdentifierType: te.TypeAlias = Union[IdentifiedNode] + +_TripleType: te.TypeAlias = tuple[_SubjectType, _PredicateType, _ObjectType] +_TriplePathType: te.TypeAlias = tuple[_SubjectType, Path, _ObjectType] +_TripleOrTriplePathType: te.TypeAlias = Union[_TripleType, _TriplePathType] -_TripleType = Tuple["_SubjectType", "_PredicateType", "_ObjectType"] -_QuadType = Tuple["_SubjectType", "_PredicateType", "_ObjectType", "_ContextType"] -_OptionalQuadType = Tuple[ - "_SubjectType", "_PredicateType", "_ObjectType", Optional["_ContextType"] +_QuadType: te.TypeAlias = tuple[ + _SubjectType, _PredicateType, _ObjectType, "_ContextType" +] +_OptionalQuadType: te.TypeAlias = tuple[ + _SubjectType, _PredicateType, _ObjectType, Optional["_ContextType"] ] -_TripleOrOptionalQuadType = Union["_TripleType", "_OptionalQuadType"] -_OptionalIdentifiedQuadType = Tuple[ - "_SubjectType", "_PredicateType", "_ObjectType", Optional["_ContextIdentifierType"] +_TripleOrOptionalQuadType: te.TypeAlias = Union[_TripleType, _OptionalQuadType] +_OptionalIdentifiedQuadType: te.TypeAlias = tuple[ + _SubjectType, _PredicateType, _ObjectType, Optional[_ContextIdentifierType] ] -_TriplePatternType = Tuple[ - Optional["_SubjectType"], Optional["_PredicateType"], Optional["_ObjectType"] +_TriplePatternType: te.TypeAlias = tuple[ + Optional[_SubjectType], Optional[_PredicateType], Optional[_ObjectType] ] -_TriplePathPatternType = Tuple[Optional["_SubjectType"], Path, Optional["_ObjectType"]] -_QuadPatternType = Tuple[ - Optional["_SubjectType"], - Optional["_PredicateType"], - Optional["_ObjectType"], +_TriplePathPatternType: te.TypeAlias = tuple[ + Optional[_SubjectType], Path, Optional[_ObjectType] +] +_QuadPatternType: te.TypeAlias = tuple[ + Optional[_SubjectType], + Optional[_PredicateType], + Optional[_ObjectType], Optional["_ContextType"], ] -_QuadPathPatternType = Tuple[ - Optional["_SubjectType"], +_QuadPathPatternType: te.TypeAlias = tuple[ + Optional[_SubjectType], Path, - Optional["_ObjectType"], + Optional[_ObjectType], Optional["_ContextType"], ] -_TripleOrQuadPatternType = Union["_TriplePatternType", "_QuadPatternType"] -_TripleOrQuadPathPatternType = Union["_TriplePathPatternType", "_QuadPathPatternType"] -_TripleSelectorType = Tuple[ - Optional["_SubjectType"], - Optional[Union["Path", "_PredicateType"]], - Optional["_ObjectType"], +_TripleOrQuadPatternType: te.TypeAlias = Union[_TriplePatternType, _QuadPatternType] +_TripleOrQuadPathPatternType: te.TypeAlias = Union[ + _TriplePathPatternType, _QuadPathPatternType +] + +# The difference between TriplePattern and TripleSelector is that +# TripleSelector can have a Optional[Path] as the predicate, and Subject/Object +# can be a QuotedGraph +_TripleSelectorType: te.TypeAlias = tuple[ + Optional[_SubjectType], + Optional[Union[Path, _PredicateType]], + Optional[_ObjectType], ] -_QuadSelectorType = Tuple[ - Optional["_SubjectType"], - Optional[Union["Path", "_PredicateType"]], - Optional["_ObjectType"], +_QuadSelectorType: te.TypeAlias = tuple[ + Optional[_SubjectType], + Optional[Union[Path, _PredicateType]], + Optional[_ObjectType], Optional["_ContextType"], ] -_TripleOrQuadSelectorType = Union["_TripleSelectorType", "_QuadSelectorType"] -_TriplePathType = Tuple["_SubjectType", Path, "_ObjectType"] -_TripleOrTriplePathType = Union["_TripleType", "_TriplePathType"] +_TripleOrQuadSelectorType: te.TypeAlias = Union[_TripleSelectorType, _QuadSelectorType] + _GraphT = TypeVar("_GraphT", bound="Graph") _ConjunctiveGraphT = TypeVar("_ConjunctiveGraphT", bound="ConjunctiveGraph") _DatasetT = TypeVar("_DatasetT", bound="Dataset") +_QuotedGraphT = TypeVar("_QuotedGraphT", bound="QuotedGraph") + +_builtin_set_t = set # type error: Function "Type[Literal]" could always be true in boolean contex assert Literal # type: ignore[truthy-function] # avoid warning @@ -410,40 +439,85 @@ _TCArgT = TypeVar("_TCArgT") +# Graph is a node because technically a formula-aware graph +# take a Graph as subject or object, but we usually use QuotedGraph for that. class Graph(Node): - """An RDF Graph + """An RDF Graph: a Python object containing nodes and relations between them as + RDF 'triples'. - The constructor accepts one argument, the "store" - that will be used to store the graph data (see the "store" - package for stores currently shipped with rdflib). + This is the central RDFLib object class and Graph objects are almost always present + in all uses of RDFLib. - Stores can be context-aware or unaware. Unaware stores take up - (some) less space but cannot support features that require - context, such as true merging/demerging of sub-graphs and - provenance. + Example: + The basic use is to create a Graph and iterate through or query its content: - Even if used with a context-aware store, Graph will only expose the quads which - belong to the default graph. To access the rest of the data, `ConjunctiveGraph` or - `Dataset` classes can be used instead. + ```python + >>> from rdflib import Graph, URIRef + >>> g = Graph() + >>> g.add(( + ... URIRef("http://example.com/s1"), # subject + ... URIRef("http://example.com/p1"), # predicate + ... URIRef("http://example.com/o1"), # object + ... )) # doctest: +ELLIPSIS + )> - The Graph constructor can take an identifier which identifies the Graph - by name. If none is given, the graph is assigned a BNode for its - identifier. + >>> g.add(( + ... URIRef("http://example.com/s2"), # subject + ... URIRef("http://example.com/p2"), # predicate + ... URIRef("http://example.com/o2"), # object + ... )) # doctest: +ELLIPSIS + )> - For more on named graphs, see: http://www.w3.org/2004/03/trix/ + >>> for triple in sorted(g): # simple looping + ... print(triple) + (rdflib.term.URIRef('http://example.com/s1'), rdflib.term.URIRef('http://example.com/p1'), rdflib.term.URIRef('http://example.com/o1')) + (rdflib.term.URIRef('http://example.com/s2'), rdflib.term.URIRef('http://example.com/p2'), rdflib.term.URIRef('http://example.com/o2')) + + >>> # get the object of the triple with subject s1 and predicate p1 + >>> o = g.value( + ... subject=URIRef("http://example.com/s1"), + ... predicate=URIRef("http://example.com/p1") + ... ) + + ``` + + !!! info "Graph stores" + The constructor accepts one argument, the "store" that will be used to store the + graph data with the default being the [`Memory`][rdflib.plugins.stores.memory.Memory] + (in memory) Store. Other Stores that persist content to disk using various file + databases or Stores that use remote servers (SPARQL systems) are supported. See + the `rdflib.plugins.stores` package for Stores currently shipped with RDFLib. + Other Stores not shipped with RDFLib can be added, such as + [HDT](https://github.com/rdflib/rdflib-hdt/). + + Stores can be context-aware or unaware. Unaware stores take up + (some) less space but cannot support features that require + context, such as true merging/demerging of sub-graphs and + provenance. + + Even if used with a context-aware store, Graph will only expose the quads which + belong to the default graph. To access the rest of the data the + `Dataset` class can be used instead. + + The Graph constructor can take an identifier which identifies the Graph + by name. If none is given, the graph is assigned a BNode for its + identifier. + + For more on Named Graphs, see the RDFLib `Dataset` class and the TriG Specification, + . """ context_aware: bool formula_aware: bool default_union: bool - base: Optional[str] + base: str | None def __init__( self, - store: Union[Store, str] = "default", - identifier: Optional[Union[_ContextIdentifierType, str]] = None, - namespace_manager: Optional[NamespaceManager] = None, - base: Optional[str] = None, + store: Store | str = "default", + identifier: _ContextIdentifierType | str | None = None, + namespace_manager: NamespaceManager | None = None, + base: str | None = None, bind_namespaces: _NamespaceSetString = "rdflib", ): super(Graph, self).__init__() @@ -451,7 +525,7 @@ def __init__( self.__identifier: _ContextIdentifierType self.__identifier = identifier or BNode() # type: ignore[assignment] if not isinstance(self.__identifier, IdentifiedNode): - self.__identifier = URIRef(self.__identifier) # type: ignore[unreachable] + self.__identifier = URIRef(self.__identifier) self.__store: Store if not isinstance(store, Store): # TODO: error handling @@ -464,6 +538,15 @@ def __init__( self.formula_aware = False self.default_union = False + def __getnewargs__(self) -> tuple[Any, ...]: + return ( + self.store, + self.__identifier, + self.__namespace_manager, + self.base, + self._bind_namespaces, + ) + @property def store(self) -> Store: return self.__store @@ -502,7 +585,7 @@ def toPython(self: _GraphT) -> _GraphT: # noqa: N802 return self def destroy(self: _GraphT, configuration: str) -> _GraphT: - """Destroy the store identified by ``configuration`` if supported""" + """Destroy the store identified by `configuration` if supported""" self.__store.destroy(configuration) return self @@ -517,7 +600,7 @@ def rollback(self: _GraphT) -> _GraphT: self.__store.rollback() return self - def open(self, configuration: str, create: bool = False) -> Optional[int]: + def open(self, configuration: str, create: bool = False) -> int | None: """Open the graph store Might be necessary for stores that require opening a connection to a @@ -534,7 +617,14 @@ def close(self, commit_pending_transaction: bool = False) -> None: return self.__store.close(commit_pending_transaction=commit_pending_transaction) def add(self: _GraphT, triple: _TripleType) -> _GraphT: - """Add a triple with self as context""" + """Add a triple with self as context. + + Args: + triple: The triple to add to the graph. + + Returns: + The graph instance. + """ s, p, o = triple assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,) assert isinstance(p, Node), "Predicate %s must be an rdflib term" % (p,) @@ -585,10 +675,17 @@ def triples( self, triple: _TripleSelectorType, ) -> Generator[_TripleOrTriplePathType, None, None]: - """Generator over the triple store + """Generator over the triple store. - Returns triples that match the given triple pattern. If triple pattern + Returns triples that match the given triple pattern. If the triple pattern does not provide a context, all contexts will be searched. + + Args: + triple: A triple pattern where each component can be a specific value or None + as a wildcard. The predicate can also be a path expression. + + Yields: + Triples matching the given pattern. """ s, p, o = triple if isinstance(p, Path): @@ -600,11 +697,13 @@ def triples( def __getitem__(self, item): """ - A graph can be "sliced" as a shortcut for the triples method - The python slice syntax is (ab)used for specifying triples. - A generator over matches is returned, - the returned tuples include only the parts not given + A graph can be "sliced" as a shortcut for the triples method. + The Python slice syntax is (ab)used for specifying triples. + A generator over matches is returned, the returned tuples include only the + parts not given. + + ```python >>> import rdflib >>> g = rdflib.Graph() >>> g.add((rdflib.URIRef("urn:bob"), namespace.RDFS.label, rdflib.Literal("Bob"))) # doctest: +ELLIPSIS @@ -619,71 +718,84 @@ def __getitem__(self, item): >>> list(g[::rdflib.Literal("Bob")]) # all triples with bob as object [(rdflib.term.URIRef('urn:bob'), rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'))] + ``` + Combined with SPARQL paths, more complex queries can be written concisely: - Name of all Bobs friends: - - g[bob : FOAF.knows/FOAF.name ] - - Some label for Bob: - - g[bob : DC.title|FOAF.name|RDFS.label] - - All friends and friends of friends of Bob - - g[bob : FOAF.knows * "+"] - - etc. - - .. versionadded:: 4.0 + - Name of all Bobs friends: `g[bob : FOAF.knows/FOAF.name ]` + - Some label for Bob: `g[bob : DC.title|FOAF.name|RDFS.label]` + - All friends and friends of friends of Bob: `g[bob : FOAF.knows * "+"]` + - etc. + !!! example "New in version 4.0" """ - if isinstance(item, slice): + if isinstance(item, IdentifiedNode): + yield from self.predicate_objects(item) + elif isinstance(item, slice): s, p, o = item.start, item.stop, item.step + # type narrowing since we cannot use typing within a slice() + assert isinstance(s, IdentifiedNode) or isinstance(s, Variable) or s is None + assert ( + isinstance(p, IdentifiedNode) + or isinstance(p, Path) + or isinstance(p, Variable) + or p is None + ) + assert isinstance(o, Node) or isinstance(o, Variable) or o is None + if s is None and p is None and o is None: - return self.triples((s, p, o)) + yield from self.triples((s, p, o)) elif s is None and p is None: - return self.subject_predicates(o) + yield from self.subject_predicates(o) # type: ignore[arg-type] elif s is None and o is None: - return self.subject_objects(p) + yield from self.subject_objects(p) elif p is None and o is None: - return self.predicate_objects(s) + yield from self.predicate_objects(s) elif s is None: - return self.subjects(p, o) + yield from self.subjects(p, o) # type: ignore[arg-type] elif p is None: - return self.predicates(s, o) + yield from self.predicates(s, o) # type: ignore[arg-type] elif o is None: - return self.objects(s, p) + yield from self.objects(s, p) else: # all given - return (s, p, o) in self - - elif isinstance(item, (Path, Node)): - # type error: Argument 1 to "predicate_objects" of "Graph" has incompatible type "Union[Path, Node]"; expected "Optional[Node]" - return self.predicate_objects(item) # type: ignore[arg-type] - + yield s, p, o else: raise TypeError( "You can only index a graph by a single rdflib term or path, or a slice of rdflib terms." ) def __len__(self) -> int: - """Returns the number of triples in the graph + """Returns the number of triples in the graph. If context is specified then the number of triples in the context is returned instead. + + Returns: + The number of triples in the graph. """ # type error: Unexpected keyword argument "context" for "__len__" of "Store" return self.__store.__len__(context=self) # type: ignore[call-arg] def __iter__(self) -> Generator[_TripleType, None, None]: - """Iterates over all triples in the store""" + """Iterates over all triples in the store. + + Returns: + A generator yielding all triples in the store. + """ return self.triples((None, None, None)) def __contains__(self, triple: _TripleSelectorType) -> bool: - """Support for 'triple in graph' syntax""" + """Support for 'triple in graph' syntax. + + Args: + triple: The triple pattern to check for. + + Returns: + True if the triple pattern exists in the graph, False otherwise. + """ for triple in self.triples(triple): return True return False @@ -786,7 +898,7 @@ def __xor__(self, other: Graph) -> Graph: # Conv. methods def set( - self: _GraphT, triple: Tuple[_SubjectType, _PredicateType, _ObjectType] + self: _GraphT, triple: tuple[_SubjectType, _PredicateType, _ObjectType] ) -> _GraphT: """Convenience method to update the value of object @@ -806,12 +918,20 @@ def set( def subjects( self, - predicate: Union[None, Path, _PredicateType] = None, - object: Optional[_ObjectType] = None, + predicate: Path | _PredicateType | None = None, + object: _ObjectType | None = None, unique: bool = False, ) -> Generator[_SubjectType, None, None]: - """A generator of (optionally unique) subjects with the given - predicate and object""" + """Generate subjects with the given predicate and object. + + Args: + predicate: A specific predicate to match or None to match any predicate. + object: A specific object to match or None to match any object. + unique: If True, only yield unique subjects. + + Yields: + Subjects matching the given predicate and object. + """ if not unique: for s, p, o in self.triples((None, predicate, object)): yield s @@ -830,12 +950,20 @@ def subjects( def predicates( self, - subject: Optional[_SubjectType] = None, - object: Optional[_ObjectType] = None, + subject: _SubjectType | None = None, + object: _ObjectType | None = None, unique: bool = False, ) -> Generator[_PredicateType, None, None]: - """A generator of (optionally unique) predicates with the given - subject and object""" + """Generate predicates with the given subject and object. + + Args: + subject: A specific subject to match or None to match any subject. + object: A specific object to match or None to match any object. + unique: If True, only yield unique predicates. + + Yields: + Predicates matching the given subject and object. + """ if not unique: for s, p, o in self.triples((subject, None, object)): yield p @@ -854,12 +982,20 @@ def predicates( def objects( self, - subject: Optional[_SubjectType] = None, - predicate: Union[None, Path, _PredicateType] = None, + subject: _SubjectType | None = None, + predicate: Path | _PredicateType | None = None, unique: bool = False, ) -> Generator[_ObjectType, None, None]: - """A generator of (optionally unique) objects with the given - subject and predicate""" + """Generate objects with the given subject and predicate. + + Args: + subject: A specific subject to match or None to match any subject. + predicate: A specific predicate to match or None to match any predicate. + unique: If True, only yield unique objects. + + Yields: + Objects matching the given subject and predicate. + """ if not unique: for s, p, o in self.triples((subject, predicate, None)): yield o @@ -877,8 +1013,8 @@ def objects( raise def subject_predicates( - self, object: Optional[_ObjectType] = None, unique: bool = False - ) -> Generator[Tuple[_SubjectType, _PredicateType], None, None]: + self, object: _ObjectType | None = None, unique: bool = False + ) -> Generator[tuple[_SubjectType, _PredicateType], None, None]: """A generator of (optionally unique) (subject, predicate) tuples for the given object""" if not unique: @@ -899,9 +1035,9 @@ def subject_predicates( def subject_objects( self, - predicate: Union[None, Path, _PredicateType] = None, + predicate: Path | _PredicateType | None = None, unique: bool = False, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + ) -> Generator[tuple[_SubjectType, _ObjectType], None, None]: """A generator of (optionally unique) (subject, object) tuples for the given predicate""" if not unique: @@ -921,8 +1057,8 @@ def subject_objects( raise def predicate_objects( - self, subject: Optional[_SubjectType] = None, unique: bool = False - ) -> Generator[Tuple[_PredicateType, _ObjectType], None, None]: + self, subject: _SubjectType | None = None, unique: bool = False + ) -> Generator[tuple[_PredicateType, _ObjectType], None, None]: """A generator of (optionally unique) (predicate, object) tuples for the given subject""" if not unique: @@ -943,15 +1079,27 @@ def predicate_objects( def triples_choices( self, - triple: Union[ - Tuple[List[_SubjectType], _PredicateType, _ObjectType], - Tuple[_SubjectType, List[_PredicateType], _ObjectType], - Tuple[_SubjectType, _PredicateType, List[_ObjectType]], - ], - context: Optional[_ContextType] = None, + triple: ( + tuple[ + list[_SubjectType] | tuple[_SubjectType, ...], + _PredicateType, + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + list[_PredicateType] | tuple[_PredicateType, ...], + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + _PredicateType, + list[_ObjectType] | tuple[_ObjectType, ...], + ] + ), + context: _ContextType | None = None, ) -> Generator[_TripleType, None, None]: subject, predicate, object_ = triple - # type error: Argument 1 to "triples_choices" of "Store" has incompatible type "Tuple[Union[List[Node], Node], Union[Node, List[Node]], Union[Node, List[Node]]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Store" has incompatible type "tuple[Union[list[Node], Node], Union[Node, list[Node]], Union[Node, list[Node]]]"; expected "Union[tuple[list[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" # type error note: unpacking discards type info for (s, p, o), cg in self.store.triples_choices( (subject, predicate, object_), context=self # type: ignore[arg-type] @@ -963,18 +1111,18 @@ def value( self, subject: None = ..., predicate: None = ..., - object: Optional[_ObjectType] = ..., - default: Optional[Node] = ..., + object: _ObjectType | None = ..., + default: Node | None = ..., any: bool = ..., ) -> None: ... @overload def value( self, - subject: Optional[_SubjectType] = ..., + subject: _SubjectType | None = ..., predicate: None = ..., object: None = ..., - default: Optional[Node] = ..., + default: Node | None = ..., any: bool = ..., ) -> None: ... @@ -982,30 +1130,60 @@ def value( def value( self, subject: None = ..., - predicate: Optional[_PredicateType] = ..., + predicate: _PredicateType | None = ..., object: None = ..., - default: Optional[Node] = ..., + default: Node | None = ..., any: bool = ..., ) -> None: ... @overload def value( self, - subject: Optional[_SubjectType] = ..., - predicate: Optional[_PredicateType] = ..., - object: Optional[_ObjectType] = ..., - default: Optional[Node] = ..., + subject: None = ..., + predicate: _PredicateType | None = ..., + object: _ObjectType | None = ..., + default: _SubjectType | None = ..., + any: bool = ..., + ) -> _SubjectType | None: ... + + @overload + def value( + self, + subject: _SubjectType | None = ..., + predicate: None = ..., + object: _ObjectType | None = ..., + default: _PredicateType | None = ..., + any: bool = ..., + ) -> _PredicateType | None: ... + + @overload + def value( + self, + subject: _SubjectType | None = ..., + predicate: _PredicateType | None = ..., + object: None = ..., + default: _ObjectType | None = ..., any: bool = ..., - ) -> Optional[Node]: ... + ) -> _ObjectType | None: ... + @overload def value( self, - subject: Optional[_SubjectType] = None, - predicate: Optional[_PredicateType] = RDF.value, - object: Optional[_ObjectType] = None, - default: Optional[Node] = None, + subject: _SubjectType | None = ..., + predicate: _PredicateType | None = ..., + object: _ObjectType | None = ..., + default: Node | None = ..., + any: bool = ..., + ) -> Node | None: ... + + def value( + self, + subject: _SubjectType | None = None, + predicate: _PredicateType | None = RDF.value, + object: _ObjectType | None = None, + default: Node | None = None, any: bool = True, - ) -> Optional[Node]: + ) -> Node | None: """Get a value for a pair of two criteria Exactly one of subject, predicate, object must be None. Useful if one @@ -1014,12 +1192,12 @@ def value( It is one of those situations that occur a lot, hence this 'macro' like utility - Parameters: - - - subject, predicate, object: exactly one must be None - - default: value to be returned if no values found - - any: if True, return any value in the case there is more than one, - else, raise UniquenessError + Args: + subject: Subject of the triple pattern, exactly one of subject, predicate, object must be None + predicate: Predicate of the triple pattern, exactly one of subject, predicate, object must be None + object: Object of the triple pattern, exactly one of subject, predicate, object must be None + default: Value to be returned if no values found + any: If True, return any value in the case there is more than one, else, raise UniquenessError """ retval = default @@ -1063,10 +1241,11 @@ def value( pass return retval - def items(self, list: Node) -> Generator[Node, None, None]: + def items(self, list: _SubjectType) -> Generator[_ObjectType, None, None]: """Generator over all items in the resource specified by list - list is an RDF collection. + Args: + list: An RDF collection. """ chain = set([list]) while list: @@ -1083,53 +1262,51 @@ def transitiveClosure( # noqa: N802 self, func: Callable[[_TCArgT, Graph], Iterable[_TCArgT]], arg: _TCArgT, - seen: Optional[Dict[_TCArgT, int]] = None, + seen: dict[_TCArgT, int] | None = None, ): - """ - Generates transitive closure of a user-defined - function against the graph - - >>> from rdflib.collection import Collection - >>> g=Graph() - >>> a=BNode("foo") - >>> b=BNode("bar") - >>> c=BNode("baz") - >>> g.add((a,RDF.first,RDF.type)) # doctest: +ELLIPSIS - )> - >>> g.add((a,RDF.rest,b)) # doctest: +ELLIPSIS - )> - >>> g.add((b,RDF.first,namespace.RDFS.label)) # doctest: +ELLIPSIS - )> - >>> g.add((b,RDF.rest,c)) # doctest: +ELLIPSIS - )> - >>> g.add((c,RDF.first,namespace.RDFS.comment)) # doctest: +ELLIPSIS - )> - >>> g.add((c,RDF.rest,RDF.nil)) # doctest: +ELLIPSIS - )> - >>> def topList(node,g): - ... for s in g.subjects(RDF.rest, node): - ... yield s - >>> def reverseList(node,g): - ... for f in g.objects(node, RDF.first): - ... print(f) - ... for s in g.subjects(RDF.rest, node): - ... yield s - - >>> [rt for rt in g.transitiveClosure( - ... topList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE - [rdflib.term.BNode('baz'), - rdflib.term.BNode('bar'), - rdflib.term.BNode('foo')] - - >>> [rt for rt in g.transitiveClosure( - ... reverseList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE - http://www.w3.org/2000/01/rdf-schema#comment - http://www.w3.org/2000/01/rdf-schema#label - http://www.w3.org/1999/02/22-rdf-syntax-ns#type - [rdflib.term.BNode('baz'), - rdflib.term.BNode('bar'), - rdflib.term.BNode('foo')] - + """Generates transitive closure of a user-defined function against the graph + + ```python + from rdflib.collection import Collection + g = Graph() + a = BNode("foo") + b = BNode("bar") + c = BNode("baz") + g.add((a,RDF.first,RDF.type)) + g.add((a,RDF.rest,b)) + g.add((b,RDF.first,namespace.RDFS.label)) + g.add((b,RDF.rest,c)) + g.add((c,RDF.first,namespace.RDFS.comment)) + g.add((c,RDF.rest,RDF.nil)) + def topList(node,g): + for s in g.subjects(RDF.rest, node): + yield s + def reverseList(node,g): + for f in g.objects(node, RDF.first): + print(f) + for s in g.subjects(RDF.rest, node): + yield s + + [rt for rt in g.transitiveClosure( + topList,RDF.nil)] + # [rdflib.term.BNode('baz'), + # rdflib.term.BNode('bar'), + # rdflib.term.BNode('foo')] + + [rt for rt in g.transitiveClosure( + reverseList,RDF.nil)] + # http://www.w3.org/2000/01/rdf-schema#comment + # http://www.w3.org/2000/01/rdf-schema#label + # http://www.w3.org/1999/02/22-rdf-syntax-ns#type + # [rdflib.term.BNode('baz'), + # rdflib.term.BNode('bar'), + # rdflib.term.BNode('foo')] + ``` + + Args: + func: A function that generates a sequence of nodes + arg: The starting node + seen: A dict of visited nodes """ if seen is None: seen = {} @@ -1143,14 +1320,19 @@ def transitiveClosure( # noqa: N802 def transitive_objects( self, - subject: Optional[_SubjectType], - predicate: Optional[_PredicateType], - remember: Optional[Dict[Optional[_SubjectType], int]] = None, - ) -> Generator[Optional[_SubjectType], None, None]: - """Transitively generate objects for the ``predicate`` relationship + subject: _SubjectType | None, + predicate: _PredicateType | None, + remember: dict[_SubjectType | None, int] | None = None, + ) -> Generator[_SubjectType | None, None, None]: + """Transitively generate objects for the `predicate` relationship Generated objects belong to the depth first transitive closure of the - ``predicate`` relationship starting at ``subject``. + `predicate` relationship starting at `subject`. + + Args: + subject: The subject to start the transitive closure from + predicate: The predicate to follow + remember: A dict of visited nodes """ if remember is None: remember = {} @@ -1164,14 +1346,19 @@ def transitive_objects( def transitive_subjects( self, - predicate: Optional[_PredicateType], - object: Optional[_ObjectType], - remember: Optional[Dict[Optional[_ObjectType], int]] = None, - ) -> Generator[Optional[_ObjectType], None, None]: - """Transitively generate subjects for the ``predicate`` relationship + predicate: _PredicateType | None, + object: _ObjectType | None, + remember: dict[_ObjectType | None, int] | None = None, + ) -> Generator[_ObjectType | None, None, None]: + """Transitively generate subjects for the `predicate` relationship Generated subjects belong to the depth first transitive closure of the - ``predicate`` relationship starting at ``object``. + `predicate` relationship starting at `object`. + + Args: + predicate: The predicate to follow + object: The object to start the transitive closure from + remember: A dict of visited nodes """ if remember is None: remember = {} @@ -1186,12 +1373,12 @@ def transitive_subjects( def qname(self, uri: str) -> str: return self.namespace_manager.qname(uri) - def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, str]: + def compute_qname(self, uri: str, generate: bool = True) -> tuple[str, URIRef, str]: return self.namespace_manager.compute_qname(uri, generate) def bind( self, - prefix: Optional[str], + prefix: str | None, namespace: Any, # noqa: F811 override: bool = True, replace: bool = False, @@ -1203,8 +1390,16 @@ def bind( if replace, replace any existing prefix with the new namespace - for example: graph.bind("foaf", "http://xmlns.com/foaf/0.1/") + Args: + prefix: The prefix to bind + namespace: The namespace to bind the prefix to + override: If True, override any existing prefix binding + replace: If True, replace any existing namespace binding + Example: + ```python + graph.bind("foaf", "http://xmlns.com/foaf/0.1/") + ``` """ # TODO FIXME: This method's behaviour should be simplified and made # more robust. If the method cannot do what it is asked it should raise @@ -1217,8 +1412,12 @@ def bind( prefix, namespace, override=override, replace=replace ) - def namespaces(self) -> Generator[Tuple[str, URIRef], None, None]: - """Generator over all the prefix, namespace tuples""" + def namespaces(self) -> Generator[tuple[str, URIRef], None, None]: + """Generator over all the prefix, namespace tuples + + Returns: + Generator yielding prefix, namespace tuples + """ for prefix, namespace in self.namespace_manager.namespaces(): # noqa: F402 yield prefix, namespace @@ -1232,7 +1431,7 @@ def serialize( self, destination: None, format: str, - base: Optional[str], + base: str | None, encoding: str, **args: Any, ) -> bytes: ... @@ -1243,7 +1442,7 @@ def serialize( self, destination: None = ..., format: str = ..., - base: Optional[str] = ..., + base: str | None = ..., *, encoding: str, **args: Any, @@ -1255,7 +1454,7 @@ def serialize( self, destination: None = ..., format: str = ..., - base: Optional[str] = ..., + base: str | None = ..., encoding: None = ..., **args: Any, ) -> str: ... @@ -1264,10 +1463,10 @@ def serialize( @overload def serialize( self, - destination: Union[str, pathlib.PurePath, IO[bytes]], + destination: str | pathlib.PurePath | IO[bytes], format: str = ..., - base: Optional[str] = ..., - encoding: Optional[str] = ..., + base: str | None = ..., + encoding: str | None = ..., **args: Any, ) -> Graph: ... @@ -1275,51 +1474,41 @@ def serialize( @overload def serialize( self, - destination: Optional[Union[str, pathlib.PurePath, IO[bytes]]] = ..., + destination: str | pathlib.PurePath | IO[bytes] | None = ..., format: str = ..., - base: Optional[str] = ..., - encoding: Optional[str] = ..., + base: str | None = ..., + encoding: str | None = ..., **args: Any, - ) -> Union[bytes, str, Graph]: ... + ) -> bytes | str | Graph: ... def serialize( self: _GraphT, - destination: Optional[Union[str, pathlib.PurePath, IO[bytes]]] = None, + destination: str | pathlib.PurePath | IO[bytes] | None = None, format: str = "turtle", - base: Optional[str] = None, - encoding: Optional[str] = None, + base: str | None = None, + encoding: str | None = None, **args: Any, - ) -> Union[bytes, str, _GraphT]: - """ - Serialize the graph. - - :param destination: - The destination to serialize the graph to. This can be a path as a - :class:`str` or :class:`~pathlib.PurePath` object, or it can be a - :class:`~typing.IO` ``[bytes]`` like object. If this parameter is not - supplied the serialized graph will be returned. - :param format: - The format that the output should be written in. This value - references a :class:`~rdflib.serializer.Serializer` plugin. Format - support can be extended with plugins, but ``"xml"``, ``"n3"``, - ``"turtle"``, ``"nt"``, ``"pretty-xml"``, ``"trix"``, ``"trig"``, - ``"nquads"``, ``"json-ld"`` and ``"hext"`` are built in. Defaults to - ``"turtle"``. - :param base: - The base IRI for formats that support it. For the turtle format this - will be used as the ``@base`` directive. - :param encoding: Encoding of output. - :param args: - Additional arguments to pass to the - :class:`~rdflib.serializer.Serializer` that will be used. - :return: The serialized graph if ``destination`` is `None`. The - serialized graph is returned as `str` if no encoding is specified, - and as `bytes` if an encoding is specified. - :rtype: :class:`bytes` if ``destination`` is `None` and ``encoding`` is not `None`. - :rtype: :class:`str` if ``destination`` is `None` and ``encoding`` is `None`. - :return: ``self`` (i.e. the :class:`~rdflib.graph.Graph` instance) if - ``destination`` is not `None`. - :rtype: :class:`~rdflib.graph.Graph` if ``destination`` is not `None`. + ) -> bytes | str | _GraphT: + """Serialize the graph. + + Args: + destination: The destination to serialize the graph to. This can be a path as a + string or pathlib.PurePath object, or it can be an IO[bytes] like object. + If this parameter is not supplied the serialized graph will be returned. + format: The format that the output should be written in. This value + references a Serializer plugin. Format support can be extended with plugins, + but "xml", "n3", "turtle", "nt", "pretty-xml", "trix", "trig", "nquads", + "json-ld" and "hext" are built in. Defaults to "turtle". + base: The base IRI for formats that support it. For the turtle format this + will be used as the @base directive. + encoding: Encoding of output. + args: Additional arguments to pass to the Serializer that will be used. + + Returns: + The serialized graph if `destination` is None. The serialized graph is returned + as str if no encoding is specified, and as bytes if an encoding is specified. + + self (i.e. the Graph instance) if `destination` is not None. """ # if base is not given as attribute use the base set for the graph @@ -1354,14 +1543,14 @@ def serialize( else: os_path = location with open(os_path, "wb") as stream: - serializer.serialize(stream, encoding=encoding, **args) + serializer.serialize(stream, base=base, encoding=encoding, **args) return self def print( self, format: str = "turtle", encoding: str = "utf-8", - out: Optional[TextIO] = None, + out: TextIO | None = None, ) -> None: print( self.serialize(None, format=format, encoding=encoding).decode(encoding), @@ -1371,97 +1560,99 @@ def print( def parse( self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes]] = None, + source: ( + IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None + ) = None, + publicID: str | None = None, # noqa: N803 + format: str | None = None, + location: str | None = None, + file: BinaryIO | TextIO | None = None, + data: str | bytes | None = None, **args: Any, ) -> Graph: - """ - Parse an RDF source adding the resulting triples to the Graph. + """Parse an RDF source adding the resulting triples to the Graph. The source is specified using one of source, location, file or data. - .. caution:: - - This method can access directly or indirectly requested network or - file resources, for example, when parsing JSON-LD documents with - ``@context`` directives that point to a network location. - - When processing untrusted or potentially malicious documents, - measures should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - :param source: An `xml.sax.xmlreader.InputSource`, file-like object, - `pathlib.Path` like object, or string. In the case of a string the string - is the location of the source. - :param location: A string indicating the relative or absolute URL of the - source. `Graph`'s absolutize method is used if a relative location - is specified. - :param file: A file-like object. - :param data: A string containing the data to be parsed. - :param format: Used if format can not be determined from source, e.g. - file extension or Media Type. Defaults to text/turtle. Format - support can be extended with plugins, but "xml", "n3" (use for - turtle), "nt" & "trix" are built in. - :param publicID: the logical URI to use as the document base. If None - specified the document location is used (at least in the case where - there is a document location). This is used as the base URI when - resolving relative URIs in the source document, as defined in `IETF - RFC 3986 - `_, - given the source document does not define a base URI. - :return: ``self``, i.e. the :class:`~rdflib.graph.Graph` instance. - - Examples: - - >>> my_data = ''' - ... - ... - ... Example - ... This is really just an example. - ... - ... - ... ''' - >>> import os, tempfile - >>> fd, file_name = tempfile.mkstemp() - >>> f = os.fdopen(fd, "w") - >>> dummy = f.write(my_data) # Returns num bytes written - >>> f.close() - - >>> g = Graph() - >>> result = g.parse(data=my_data, format="application/rdf+xml") - >>> len(g) - 2 - - >>> g = Graph() - >>> result = g.parse(location=file_name, format="application/rdf+xml") - >>> len(g) - 2 - - >>> g = Graph() - >>> with open(file_name, "r") as f: - ... result = g.parse(f, format="application/rdf+xml") - >>> len(g) - 2 - - >>> os.remove(file_name) - - >>> # default turtle parsing - >>> result = g.parse(data=" .") - >>> len(g) - 3 - + Args: + source: An `xml.sax.xmlreader.InputSource`, file-like object, + `pathlib.Path` like object, or string. In the case of a string the string + is the location of the source. + publicID: The logical URI to use as the document base. If None + specified the document location is used (at least in the case where + there is a document location). This is used as the base URI when + resolving relative URIs in the source document, as defined in `IETF + RFC 3986 `_, + given the source document does not define a base URI. + format: Used if format can not be determined from source, e.g. + file extension or Media Type. Defaults to text/turtle. Format + support can be extended with plugins, but "xml", "n3" (use for + turtle), "nt" & "trix" are built in. + location: A string indicating the relative or absolute URL of the + source. `Graph`'s absolutize method is used if a relative location + is specified. + file: A file-like object. + data: A string containing the data to be parsed. + args: Additional arguments to pass to the parser. + + Returns: + self, i.e. the Graph instance. + + Example: + ```python + >>> my_data = ''' + ... + ... + ... Example + ... This is really just an example. + ... + ... + ... ''' + >>> import os, tempfile + >>> fd, file_name = tempfile.mkstemp() + >>> f = os.fdopen(fd, "w") + >>> dummy = f.write(my_data) # Returns num bytes written + >>> f.close() + + >>> g = Graph() + >>> result = g.parse(data=my_data, format="application/rdf+xml") + >>> len(g) + 2 + + >>> g = Graph() + >>> result = g.parse(location=file_name, format="application/rdf+xml") + >>> len(g) + 2 + + >>> g = Graph() + >>> with open(file_name, "r") as f: + ... result = g.parse(f, format="application/rdf+xml") + >>> len(g) + 2 + + >>> os.remove(file_name) + + >>> # default turtle parsing + >>> result = g.parse(data=" .") + >>> len(g) + 3 + + ``` + + !!! warning "Caution" + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + `@context` directives that point to a network location. + + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + [Security Considerations](../security_considerations.md) + documentation. """ source = create_input_source( @@ -1516,39 +1707,41 @@ def parse( def query( self, - query_object: Union[str, Query], - processor: Union[str, query.Processor] = "sparql", - result: Union[str, Type[query.Result]] = "sparql", - initNs: Optional[Mapping[str, Any]] = None, # noqa: N803 - initBindings: Optional[Mapping[str, Identifier]] = None, # noqa: N803 + query_object: str | Query, + processor: str | rdflib.query.Processor = "sparql", + result: str | type[rdflib.query.Result] = "sparql", + initNs: Mapping[str, Any] | None = None, # noqa: N803 + initBindings: Mapping[str, Identifier] | None = None, # noqa: N803 use_store_provided: bool = True, **kwargs: Any, - ) -> query.Result: - """ - Query this graph. - - A type of 'prepared queries' can be realised by providing initial - variable bindings with initBindings - - Initial namespaces are used to resolve prefixes used in the query, if - none are given, the namespaces from the graph's namespace manager are - used. - - .. caution:: - - This method can access indirectly requested network endpoints, for - example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. - - When processing untrusted or potentially malicious queries, measures - should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - :returntype: :class:`~rdflib.query.Result` - + ) -> rdflib.query.Result: + """Query this graph. + + Args: + query_object: The query string or object to execute. + processor: The query processor to use. Default is "sparql". + result: The result format to use. Default is "sparql". + initNs: Initial namespaces to use for resolving prefixes in the query. + If none are given, the namespaces from the graph's namespace manager are used. + initBindings: Initial variable bindings to use. A type of 'prepared queries' + can be realized by providing these bindings. + use_store_provided: Whether to use the store's query method if available. + kwargs: Additional arguments to pass to the query processor. + + Returns: + A [`rdflib.query.Result`][`rdflib.query.Result`] instance. + + !!! warning "Caution" + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in `SERVICE` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + [Security Considerations](../security_considerations.md) + documentation. """ initBindings = initBindings or {} # noqa: N806 @@ -1572,38 +1765,46 @@ def query( except NotImplementedError: pass # store has no own implementation - if not isinstance(result, query.Result): - result = plugin.get(cast(str, result), query.Result) - if not isinstance(processor, query.Processor): - processor = plugin.get(processor, query.Processor)(self) + if not isinstance(result, rdflib.query.Result): + result = plugin.get(cast(str, result), rdflib.query.Result) + if not isinstance(processor, rdflib.query.Processor): + processor = plugin.get(processor, rdflib.query.Processor)(self) # type error: Argument 1 to "Result" has incompatible type "Mapping[str, Any]"; expected "str" return result(processor.query(query_object, initBindings, initNs, **kwargs)) # type: ignore[arg-type] def update( self, - update_object: Union[Update, str], - processor: Union[str, rdflib.query.UpdateProcessor] = "sparql", - initNs: Optional[Mapping[str, Any]] = None, # noqa: N803 - initBindings: Optional[Mapping[str, Identifier]] = None, # noqa: N803 + update_object: Update | str, + processor: str | rdflib.query.UpdateProcessor = "sparql", + initNs: Mapping[str, Any] | None = None, # noqa: N803 + initBindings: ( # noqa: N803 + Mapping[str, rdflib.query.QueryBindingsValueType] | None + ) = None, use_store_provided: bool = True, **kwargs: Any, ) -> None: - """ - Update this graph with the given update query. - - .. caution:: - - This method can access indirectly requested network endpoints, for - example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. - - When processing untrusted or potentially malicious queries, measures - should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. + """Update this graph with the given update query. + + Args: + update_object: The update query string or object to execute. + processor: The update processor to use. Default is "sparql". + initNs: Initial namespaces to use for resolving prefixes in the query. + If none are given, the namespaces from the graph's namespace manager are used. + initBindings: Initial variable bindings to use. + use_store_provided: Whether to use the store's update method if available. + kwargs: Additional arguments to pass to the update processor. + + !!! warning "Caution" + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in `SERVICE` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + Security Considerations documentation. """ initBindings = initBindings or {} # noqa: N806 initNs = initNs or dict(self.namespaces()) # noqa: N806 @@ -1627,16 +1828,16 @@ def update( except NotImplementedError: pass # store has no own implementation - if not isinstance(processor, query.UpdateProcessor): - processor = plugin.get(processor, query.UpdateProcessor)(self) + if not isinstance(processor, rdflib.query.UpdateProcessor): + processor = plugin.get(processor, rdflib.query.UpdateProcessor)(self) return processor.update(update_object, initBindings, initNs, **kwargs) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: """Return an n3 identifier for the Graph""" return "[%s]" % self.identifier.n3(namespace_manager=namespace_manager) - def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]]: + def __reduce__(self) -> tuple[type[Graph], tuple[Store, _ContextIdentifierType]]: return ( Graph, ( @@ -1646,11 +1847,20 @@ def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]] ) def isomorphic(self, other: Graph) -> bool: - """ - does a very basic check if these graphs are the same + """Check if this graph is isomorphic to another graph. + + Performs a basic check if these graphs are the same. If no BNodes are involved, this is accurate. - See rdflib.compare for a correct implementation of isomorphism checks + Args: + other: The graph to compare with. + + Returns: + True if the graphs are isomorphic, False otherwise. + + Note: + This is only an approximation. See rdflib.compare for a correct + implementation of isomorphism checks. """ # TODO: this is only an approximation. if len(self) != len(other): @@ -1667,14 +1877,18 @@ def isomorphic(self, other: Graph) -> bool: return True def connected(self) -> bool: - """Check if the Graph is connected + """Check if the Graph is connected. The Graph is considered undirectional. - Performs a search on the Graph, starting from a random node. Then - iteratively goes depth-first through the triplets where the node is - subject and object. Return True if all nodes have been visited and - False if it cannot continue and there are still unvisited nodes left. + Returns: + True if all nodes have been visited and there are no unvisited nodes left, + False otherwise. + + Note: + Performs a search on the Graph, starting from a random node. Then + iteratively goes depth-first through the triplets where the node is + subject and object. """ all_nodes = list(self.all_nodes()) discovered = [] @@ -1703,40 +1917,45 @@ def connected(self) -> bool: else: return False - def all_nodes(self) -> Set[Node]: + def all_nodes(self) -> _builtin_set_t[_SubjectType | _ObjectType]: res = set(self.objects()) res.update(self.subjects()) return res - def collection(self, identifier: _SubjectType) -> Collection: - """Create a new ``Collection`` instance. + def collection(self, identifier: IdentifiedNode) -> rdflib.collection.Collection: + """Create a new `Collection` instance. - Parameters: + Args: + identifier: A URIRef or BNode instance. - - ``identifier``: a URIRef or BNode instance. - - Example:: + Returns: + A new Collection instance. + Example: + ```python >>> graph = Graph() >>> uri = URIRef("http://example.org/resource") >>> collection = graph.collection(uri) - >>> assert isinstance(collection, Collection) + >>> assert isinstance(collection, rdflib.collection.Collection) >>> assert collection.uri is uri >>> assert collection.graph is graph >>> collection += [ Literal(1), Literal(2) ] - """ - return Collection(self, identifier) - - def resource(self, identifier: Union[Node, str]) -> Resource: - """Create a new ``Resource`` instance. + ``` + """ + return rdflib.collection.Collection(self, identifier) - Parameters: + def resource(self, identifier: Node | str) -> Resource: + """Create a new `Resource` instance. - - ``identifier``: a URIRef or BNode instance. + Args: + identifier: A URIRef or BNode instance. - Example:: + Returns: + A new Resource instance. + Example: + ```python >>> graph = Graph() >>> uri = URIRef("http://example.org/resource") >>> resource = graph.resource(uri) @@ -1744,6 +1963,7 @@ def resource(self, identifier: Union[Node, str]) -> Resource: >>> assert resource.identifier is uri >>> assert resource.graph is graph + ``` """ if not isinstance(identifier, Node): identifier = URIRef(identifier) @@ -1757,10 +1977,10 @@ def _process_skolem_tuples( def skolemize( self, - new_graph: Optional[Graph] = None, - bnode: Optional[BNode] = None, - authority: Optional[str] = None, - basepath: Optional[str] = None, + new_graph: Graph | None = None, + bnode: BNode | None = None, + authority: str | None = None, + basepath: str | None = None, ) -> Graph: def do_skolemize(bnode: BNode, t: _TripleType) -> _TripleType: (s, p, o) = t @@ -1793,7 +2013,7 @@ def do_skolemize2(t: _TripleType) -> _TripleType: return retval def de_skolemize( - self, new_graph: Optional[Graph] = None, uriref: Optional[URIRef] = None + self, new_graph: Graph | None = None, uriref: URIRef | None = None ) -> Graph: def do_de_skolemize(uriref: URIRef, t: _TripleType) -> _TripleType: (s, p, o) = t @@ -1811,18 +2031,14 @@ def do_de_skolemize2(t: _TripleType) -> _TripleType: (s, p, o) = t if RDFLibGenid._is_rdflib_skolem(s): - # type error: Argument 1 to "RDFLibGenid" has incompatible type "Node"; expected "str" - s = RDFLibGenid(s).de_skolemize() # type: ignore[arg-type] + s = RDFLibGenid(s).de_skolemize() elif Genid._is_external_skolem(s): - # type error: Argument 1 to "Genid" has incompatible type "Node"; expected "str" - s = Genid(s).de_skolemize() # type: ignore[arg-type] + s = Genid(s).de_skolemize() if RDFLibGenid._is_rdflib_skolem(o): - # type error: Argument 1 to "RDFLibGenid" has incompatible type "Node"; expected "str" - o = RDFLibGenid(o).de_skolemize() # type: ignore[arg-type] + o = RDFLibGenid(o).de_skolemize() elif Genid._is_external_skolem(o): - # type error: Argument 1 to "Genid" has incompatible type "Node"; expected "str" - o = Genid(o).de_skolemize() # type: ignore[arg-type] + o = Genid(o).de_skolemize() return s, p, o @@ -1837,36 +2053,41 @@ def do_de_skolemize2(t: _TripleType) -> _TripleType: return retval def cbd( - self, resource: _SubjectType, *, target_graph: Optional[Graph] = None + self, resource: _SubjectType, *, target_graph: Graph | None = None ) -> Graph: - """Retrieves the Concise Bounded Description of a Resource from a Graph + """Retrieves the Concise Bounded Description of a Resource from a Graph. - Concise Bounded Description (CBD) is defined in [1] as: + Args: + resource: A URIRef object, the Resource to query for. + target_graph: Optionally, a graph to add the CBD to; otherwise, + a new graph is created for the CBD. - Given a particular node (the starting node) in a particular RDF graph (the source graph), a subgraph of that - particular graph, taken to comprise a concise bounded description of the resource denoted by the starting node, - can be identified as follows: + Returns: + A Graph, subgraph of self if no graph was provided otherwise the provided graph. - 1. Include in the subgraph all statements in the source graph where the subject of the statement is the - starting node; + Note: + Concise Bounded Description (CBD) is defined as: - 2. Recursively, for all statements identified in the subgraph thus far having a blank node object, include - in the subgraph all statements in the source graph where the subject of the statement is the blank node - in question and which are not already included in the subgraph. + Given a particular node (the starting node) in a particular RDF graph (the source graph), + a subgraph of that particular graph, taken to comprise a concise bounded description of + the resource denoted by the starting node, can be identified as follows: - 3. Recursively, for all statements included in the subgraph thus far, for all reifications of each statement - in the source graph, include the concise bounded description beginning from the rdf:Statement node of - each reification. + 1. Include in the subgraph all statements in the source graph where the subject of the + statement is the starting node; - This results in a subgraph where the object nodes are either URI references, literals, or blank nodes not - serving as the subject of any statement in the graph. + 2. Recursively, for all statements identified in the subgraph thus far having a blank + node object, include in the subgraph all statements in the source graph where the + subject of the statement is the blank node in question and which are not already + included in the subgraph. - [1] https://www.w3.org/Submission/CBD/ + 3. Recursively, for all statements included in the subgraph thus far, for all + reifications of each statement in the source graph, include the concise bounded + description beginning from the rdf:Statement node of each reification. - :param resource: a URIRef object, of the Resource for queried for - :param target_graph: Optionally, a graph to add the CBD to; otherwise, a new graph is created for the CBD - :return: a Graph, subgraph of self if no graph was provided otherwise the provided graph + This results in a subgraph where the object nodes are either URI references, literals, + or blank nodes not serving as the subject of any statement in the graph. + See: """ if target_graph is None: subgraph = Graph() @@ -1896,18 +2117,18 @@ def add_to_cbd(uri: _SubjectType) -> None: return subgraph -_ContextType = Graph +_ContextType: te.TypeAlias = Union[Graph] class ConjunctiveGraph(Graph): """A ConjunctiveGraph is an (unnamed) aggregation of all the named graphs in a store. - .. warning:: - ConjunctiveGraph is deprecated, use :class:`~rdflib.graph.Dataset` instead. + !!! warning "Deprecation notice" + ConjunctiveGraph is deprecated, use [`rdflib.graph.Dataset`][rdflib.graph.Dataset] instead. - It has a ``default`` graph, whose name is associated with the - graph throughout its life. :meth:`__init__` can take an identifier + It has a `default` graph, whose name is associated with the + graph throughout its life. Constructor can take an identifier to use as the name of this default graph or it will assign a BNode. @@ -1920,9 +2141,9 @@ class ConjunctiveGraph(Graph): def __init__( self, - store: Union[Store, str] = "default", - identifier: Optional[Union[IdentifiedNode, str]] = None, - default_graph_base: Optional[str] = None, + store: Store | str = "default", + identifier: IdentifiedNode | str | None = None, + default_graph_base: str | None = None, ): super(ConjunctiveGraph, self).__init__(store, identifier=identifier) @@ -1942,6 +2163,9 @@ def __init__( store=self.store, identifier=identifier or BNode(), base=default_graph_base ) + def __getnewargs__(self) -> tuple[Any, ...]: + return (self.store, self.__identifier, self.default_context.base) + def __str__(self) -> str: pattern = ( "[a rdflib:ConjunctiveGraph;rdflib:storage " @@ -1959,7 +2183,7 @@ def _spoc( @overload def _spoc( self, - triple_or_quad: Union[_TripleType, _OptionalQuadType], + triple_or_quad: _TripleType | _OptionalQuadType, default: bool = False, ) -> _OptionalQuadType: ... @@ -1968,12 +2192,12 @@ def _spoc( self, triple_or_quad: None, default: bool = False, - ) -> Tuple[None, None, None, Optional[Graph]]: ... + ) -> tuple[None, None, None, Graph | None]: ... @overload def _spoc( self, - triple_or_quad: Optional[_TripleOrQuadPatternType], + triple_or_quad: _TripleOrQuadPatternType | None, default: bool = False, ) -> _QuadPatternType: ... @@ -1987,13 +2211,13 @@ def _spoc( @overload def _spoc( self, - triple_or_quad: Optional[_TripleOrQuadSelectorType], + triple_or_quad: _TripleOrQuadSelectorType | None, default: bool = False, ) -> _QuadSelectorType: ... def _spoc( self, - triple_or_quad: Optional[_TripleOrQuadSelectorType], + triple_or_quad: _TripleOrQuadSelectorType | None, default: bool = False, ) -> _QuadSelectorType: """ @@ -2023,8 +2247,7 @@ def add( self: _ConjunctiveGraphT, triple_or_quad: _TripleOrOptionalQuadType, ) -> _ConjunctiveGraphT: - """ - Add a triple or quad to the store. + """Add a triple or quad to the store. if a triple is given it is added to the default context """ @@ -2038,14 +2261,12 @@ def add( return self @overload - def _graph(self, c: Union[Graph, _ContextIdentifierType, str]) -> Graph: ... + def _graph(self, c: Graph | _ContextIdentifierType | str) -> Graph: ... @overload def _graph(self, c: None) -> None: ... - def _graph( - self, c: Optional[Union[Graph, _ContextIdentifierType, str]] - ) -> Optional[Graph]: + def _graph(self, c: Graph | _ContextIdentifierType | str | None) -> Graph | None: if c is None: return None if not isinstance(c, Graph): @@ -2063,15 +2284,12 @@ def addN( # noqa: N802 ) return self - # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "Tuple[Optional[Node], Optional[Node], Optional[Node]]" + # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "tuple[Optional[Node], Optional[Node], Optional[Node]]" def remove(self: _ConjunctiveGraphT, triple_or_quad: _TripleOrOptionalQuadType) -> _ConjunctiveGraphT: # type: ignore[override] - """ - Removes a triple or quads + """Removes a triple or quads if a triple is given it is removed from all contexts - a quad is removed from the given context only - """ s, p, o, c = self._spoc(triple_or_quad) @@ -2082,30 +2300,29 @@ def remove(self: _ConjunctiveGraphT, triple_or_quad: _TripleOrOptionalQuadType) def triples( self, triple_or_quad: _TripleOrQuadPatternType, - context: Optional[_ContextType] = ..., + context: _ContextType | None = ..., ) -> Generator[_TripleType, None, None]: ... @overload def triples( self, triple_or_quad: _TripleOrQuadPathPatternType, - context: Optional[_ContextType] = ..., + context: _ContextType | None = ..., ) -> Generator[_TriplePathType, None, None]: ... @overload def triples( self, triple_or_quad: _TripleOrQuadSelectorType, - context: Optional[_ContextType] = ..., + context: _ContextType | None = ..., ) -> Generator[_TripleOrTriplePathType, None, None]: ... def triples( self, triple_or_quad: _TripleOrQuadSelectorType, - context: Optional[_ContextType] = None, + context: _ContextType | None = None, ) -> Generator[_TripleOrTriplePathType, None, None]: - """ - Iterate over all the triples in the entire conjunctive graph + """Iterate over all the triples in the entire conjunctive graph For legacy reasons, this can take the context to query either as a fourth element of the quad, or as the explicit context @@ -2133,7 +2350,7 @@ def triples( yield s, p, o def quads( - self, triple_or_quad: Optional[_TripleOrQuadPatternType] = None + self, triple_or_quad: _TripleOrQuadPatternType | None = None ) -> Generator[_OptionalQuadType, None, None]: """Iterate over all the quads in the entire conjunctive graph""" @@ -2145,12 +2362,24 @@ def quads( def triples_choices( self, - triple: Union[ - Tuple[List[_SubjectType], _PredicateType, _ObjectType], - Tuple[_SubjectType, List[_PredicateType], _ObjectType], - Tuple[_SubjectType, _PredicateType, List[_ObjectType]], - ], - context: Optional[_ContextType] = None, + triple: ( + tuple[ + list[_SubjectType] | tuple[_SubjectType, ...], + _PredicateType, + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + list[_PredicateType] | tuple[_PredicateType, ...], + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + _PredicateType, + list[_ObjectType] | tuple[_ObjectType, ...], + ] + ), + context: _ContextType | None = None, ) -> Generator[_TripleType, None, None]: """Iterate over all the triples in the entire conjunctive graph""" s, p, o = triple @@ -2159,7 +2388,7 @@ def triples_choices( context = self.default_context else: context = self._graph(context) - # type error: Argument 1 to "triples_choices" of "Store" has incompatible type "Tuple[Union[List[Node], Node], Union[Node, List[Node]], Union[Node, List[Node]]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Store" has incompatible type "tuple[Union[list[Node], Node], Union[Node, list[Node]], Union[Node, list[Node]]]"; expected "Union[tuple[list[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" # type error note: unpacking discards type info for (s1, p1, o1), cg in self.store.triples_choices((s, p, o), context=context): # type: ignore[arg-type] yield s1, p1, o1 @@ -2169,7 +2398,7 @@ def __len__(self) -> int: return self.store.__len__() def contexts( - self, triple: Optional[_TripleType] = None + self, triple: _TripleType | None = None ) -> Generator[_ContextType, None, None]: """Iterate over all contexts in the graph @@ -2185,15 +2414,15 @@ def contexts( # type error: Statement is unreachable yield self.get_context(context) # type: ignore[unreachable] - def get_graph(self, identifier: _ContextIdentifierType) -> Union[Graph, None]: + def get_graph(self, identifier: _ContextIdentifierType) -> Graph | None: """Returns the graph identified by given identifier""" return [x for x in self.contexts() if x.identifier == identifier][0] def get_context( self, - identifier: Optional[Union[_ContextIdentifierType, str]], + identifier: _ContextIdentifierType | str | None, quoted: bool = False, - base: Optional[str] = None, + base: str | None = None, ) -> Graph: """Return a context graph for the given identifier @@ -2210,7 +2439,7 @@ def remove_context(self, context: _ContextType) -> None: """Removes the given context from the graph""" self.store.remove((None, None, None), context) - def context_id(self, uri: str, context_id: Optional[str] = None) -> URIRef: + def context_id(self, uri: str, context_id: str | None = None) -> URIRef: """URI#context""" uri = uri.split("#", 1)[0] if context_id is None: @@ -2219,51 +2448,56 @@ def context_id(self, uri: str, context_id: Optional[str] = None) -> URIRef: def parse( self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes]] = None, + source: ( + IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None + ) = None, + publicID: str | None = None, # noqa: N803 + format: str | None = None, + location: str | None = None, + file: BinaryIO | TextIO | None = None, + data: str | bytes | None = None, **args: Any, ) -> Graph: - """ - Parse source adding the resulting triples to its own context (sub graph + """Parse source adding the resulting triples to its own context (sub graph of this graph). - See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. - - If the source is in a format that does not support named graphs its triples - will be added to the default graph - (i.e. :attr:`ConjunctiveGraph.default_context`). - - :Returns: - - The graph into which the source was parsed. In the case of n3 it returns - the root context. - - .. caution:: - - This method can access directly or indirectly requested network or - file resources, for example, when parsing JSON-LD documents with - ``@context`` directives that point to a network location. - - When processing untrusted or potentially malicious documents, - measures should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - *Changed in 7.0*: The ``publicID`` argument is no longer used as the - identifier (i.e. name) of the default graph as was the case before - version 7.0. In the case of sources that do not support named graphs, - the ``publicID`` parameter will also not be used as the name for the - graph that the data is loaded into, and instead the triples from sources - that do not support named graphs will be loaded into the default graph - (i.e. :attr:`ConjunctiveGraph.default_context`). + See [`rdflib.graph.Graph.parse`][rdflib.graph.Graph.parse] for documentation on arguments. + + Args: + source: The source to parse + publicID: The public ID of the source + format: The format of the source + location: The location of the source + file: The file object to parse + data: The data to parse + **args: Additional arguments + + Returns: + The graph into which the source was parsed. In the case of n3 it returns + the root context. + + Note: + If the source is in a format that does not support named graphs its triples + will be added to the default graph (i.e. ConjunctiveGraph.default_context). + + !!! warning "Caution" + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + `@context` directives that point to a network location. + + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + Security Considerations documentation. + + !!! example "Changed in 7.0" + The `publicID` argument is no longer used as the identifier (i.e. name) + of the default graph as was the case before version 7.0. In the case of + sources that do not support named graphs, the `publicID` parameter will + also not be used as the name for the graph that the data is loaded into, + and instead the triples from sources that do not support named graphs will + be loaded into the default graph (i.e. ConjunctiveGraph.default_context). """ source = create_input_source( @@ -2288,7 +2522,7 @@ def parse( # TODO: FIXME: This should not return context, but self. return context - def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]]: + def __reduce__(self) -> tuple[type[Graph], tuple[Store, _ContextIdentifierType]]: return ConjunctiveGraph, (self.store, self.identifier) @@ -2297,41 +2531,71 @@ def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]] class Dataset(ConjunctiveGraph): """ - RDF 1.1 Dataset. Small extension to the Conjunctive Graph: - - the primary term is graphs in the datasets and not contexts with quads, - so there is a separate method to set/retrieve a graph in a dataset and - operate with graphs - - graphs cannot be identified with blank nodes - - added a method to directly add a single quad + An RDFLib Dataset is an object that stores multiple Named Graphs - instances of + RDFLib Graph identified by IRI - within it and allows whole-of-dataset or single + Graph use. + + RDFLib's Dataset class is based on the [RDF 1.2. 'Dataset' definition](https://www.w3.org/TR/rdf12-datasets/): - Examples of usage: + An RDF dataset is a collection of RDF graphs, and comprises: + - Exactly one default graph, being an RDF graph. The default graph does not + have a name and MAY be empty. + - Zero or more named graphs. Each named graph is a pair consisting of an IRI or + a blank node (the graph name), and an RDF graph. Graph names are unique + within an RDF dataset. + + Accordingly, a Dataset allows for `Graph` objects to be added to it with + [`URIRef`][rdflib.term.URIRef] or [`BNode`][rdflib.term.BNode] identifiers and always + creats a default graph with the [`URIRef`][rdflib.term.URIRef] identifier + `urn:x-rdflib:default`. + + Dataset extends Graph's Subject, Predicate, Object (s, p, o) 'triple' + structure to include a graph identifier - archaically called Context - producing + 'quads' of s, p, o, g. + + Triples, or quads, can be added to a Dataset. Triples, or quads with the graph + identifer :code:`urn:x-rdflib:default` go into the default graph. + + !!! warning "Deprecation notice" + Dataset builds on the `ConjunctiveGraph` class but that class's direct + use is now deprecated (since RDFLib 7.x) and it should not be used. + `ConjunctiveGraph` will be removed from future RDFLib versions. + + Examples of usage and see also the `examples/datast.py` file: + + ```python >>> # Create a new Dataset >>> ds = Dataset() >>> # simple triples goes to default graph - >>> ds.add((URIRef("http://example.org/a"), - ... URIRef("http://www.example.org/b"), - ... Literal("foo"))) # doctest: +ELLIPSIS + >>> ds.add(( + ... URIRef("http://example.org/a"), + ... URIRef("http://www.example.org/b"), + ... Literal("foo") + ... )) # doctest: +ELLIPSIS )> - >>> + >>> # Create a graph in the dataset, if the graph name has already been >>> # used, the corresponding graph will be returned >>> # (ie, the Dataset keeps track of the constituent graphs) >>> g = ds.graph(URIRef("http://www.example.com/gr")) - >>> + >>> # add triples to the new graph as usual - >>> g.add( - ... (URIRef("http://example.org/x"), + >>> g.add(( + ... URIRef("http://example.org/x"), ... URIRef("http://example.org/y"), - ... Literal("bar")) ) # doctest: +ELLIPSIS + ... Literal("bar") + ... )) # doctest: +ELLIPSIS )> >>> # alternatively: add a quad to the dataset -> goes to the graph - >>> ds.add( - ... (URIRef("http://example.org/x"), + >>> ds.add(( + ... URIRef("http://example.org/x"), ... URIRef("http://example.org/z"), - ... Literal("foo-bar"),g) ) # doctest: +ELLIPSIS + ... Literal("foo-bar"), + ... g + ... )) # doctest: +ELLIPSIS )> - >>> + >>> # querying triples return them all regardless of the graph >>> for t in ds.triples((None,None,None)): # doctest: +SKIP ... print(t) # doctest: +NORMALIZE_WHITESPACE @@ -2344,7 +2608,7 @@ class Dataset(ConjunctiveGraph): (rdflib.term.URIRef("http://example.org/x"), rdflib.term.URIRef("http://example.org/y"), rdflib.term.Literal("bar")) - >>> + >>> # querying quads() return quads; the fourth argument can be unrestricted >>> # (None) or restricted to a graph >>> for q in ds.quads((None, None, None, None)): # doctest: +SKIP @@ -2361,7 +2625,7 @@ class Dataset(ConjunctiveGraph): rdflib.term.URIRef("http://example.org/z"), rdflib.term.Literal("foo-bar"), rdflib.term.URIRef("http://www.example.com/gr")) - >>> + >>> # unrestricted looping is equivalent to iterating over the entire Dataset >>> for q in ds: # doctest: +SKIP ... print(q) # doctest: +NORMALIZE_WHITESPACE @@ -2377,7 +2641,7 @@ class Dataset(ConjunctiveGraph): rdflib.term.URIRef("http://example.org/z"), rdflib.term.Literal("foo-bar"), rdflib.term.URIRef("http://www.example.com/gr")) - >>> + >>> # resticting iteration to a graph: >>> for q in ds.quads((None, None, None, g)): # doctest: +SKIP ... print(q) # doctest: +NORMALIZE_WHITESPACE @@ -2392,11 +2656,11 @@ class Dataset(ConjunctiveGraph): >>> # Note that in the call above - >>> # ds.quads((None,None,None,"http://www.example.com/gr")) >>> # would have been accepted, too - >>> + >>> # graph names in the dataset can be queried: >>> for c in ds.graphs(): # doctest: +SKIP - ... print(c) # doctest: - DEFAULT + ... print(c.identifier) # doctest: + urn:x-rdflib:default http://www.example.com/gr >>> # A graph can be created without specifying a name; a skolemized genid >>> # is created on the fly @@ -2412,22 +2676,23 @@ class Dataset(ConjunctiveGraph): ... print(c) # doctest: +NORMALIZE_WHITESPACE DEFAULT http://www.example.com/gr - >>> + >>> # a graph can also be removed from a dataset via ds.remove_graph(g) + ``` - .. versionadded:: 4.0 + !!! example "New in version 4.0" """ def __init__( self, - store: Union[Store, str] = "default", + store: Store | str = "default", default_union: bool = False, - default_graph_base: Optional[str] = None, + default_graph_base: str | None = None, ): super(Dataset, self).__init__(store=store, identifier=None) if not self.store.graph_aware: - raise Exception("DataSet must be backed by a graph-aware store!") + raise Exception("Dataset must be backed by a graph-aware store!") self.default_context = Graph( store=self.store, identifier=DATASET_DEFAULT_GRAPH_ID, @@ -2436,22 +2701,25 @@ def __init__( self.default_union = default_union + def __getnewargs__(self) -> tuple[Any, ...]: + return (self.store, self.default_union, self.default_context.base) + def __str__(self) -> str: pattern = ( "[a rdflib:Dataset;rdflib:storage " "[a rdflib:Store;rdfs:label '%s']]" ) return pattern % self.store.__class__.__name__ - # type error: Return type "Tuple[Type[Dataset], Tuple[Store, bool]]" of "__reduce__" incompatible with return type "Tuple[Type[Graph], Tuple[Store, IdentifiedNode]]" in supertype "ConjunctiveGraph" - # type error: Return type "Tuple[Type[Dataset], Tuple[Store, bool]]" of "__reduce__" incompatible with return type "Tuple[Type[Graph], Tuple[Store, IdentifiedNode]]" in supertype "Graph" - def __reduce__(self) -> Tuple[Type[Dataset], Tuple[Store, bool]]: # type: ignore[override] - return (type(self), (self.store, self.default_union)) + # type error: Return type "tuple[Type[Dataset], tuple[Store, bool]]" of "__reduce__" incompatible with return type "tuple[Type[Graph], tuple[Store, IdentifiedNode]]" in supertype "ConjunctiveGraph" + # type error: Return type "tuple[Type[Dataset], tuple[Store, bool]]" of "__reduce__" incompatible with return type "tuple[Type[Graph], tuple[Store, IdentifiedNode]]" in supertype "Graph" + def __reduce__(self) -> tuple[type[Dataset], tuple[Store, bool]]: # type: ignore[override] + return type(self), (self.store, self.default_union) - def __getstate__(self) -> Tuple[Store, _ContextIdentifierType, _ContextType, bool]: + def __getstate__(self) -> tuple[Store, _ContextIdentifierType, _ContextType, bool]: return self.store, self.identifier, self.default_context, self.default_union def __setstate__( - self, state: Tuple[Store, _ContextIdentifierType, _ContextType, bool] + self, state: tuple[Store, _ContextIdentifierType, _ContextType, bool] ) -> None: # type error: Property "store" defined in "Graph" is read-only # type error: Property "identifier" defined in "Graph" is read-only @@ -2459,8 +2727,8 @@ def __setstate__( def graph( self, - identifier: Optional[Union[_ContextIdentifierType, _ContextType, str]] = None, - base: Optional[str] = None, + identifier: _ContextIdentifierType | _ContextType | str | None = None, + base: str | None = None, ) -> Graph: if identifier is None: from rdflib.term import _SKOLEM_DEFAULT_AUTHORITY, rdflib_skolem_genid @@ -2480,47 +2748,56 @@ def graph( def parse( self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes]] = None, + source: ( + IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None + ) = None, + publicID: str | None = None, # noqa: N803 + format: str | None = None, + location: str | None = None, + file: BinaryIO | TextIO | None = None, + data: str | bytes | None = None, **args: Any, ) -> Graph: - """ - Parse an RDF source adding the resulting triples to the Graph. - - See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. - - The source is specified using one of source, location, file or data. - - If the source is in a format that does not support named graphs its triples - will be added to the default graph - (i.e. :attr:`.Dataset.default_context`). - - .. caution:: - - This method can access directly or indirectly requested network or - file resources, for example, when parsing JSON-LD documents with - ``@context`` directives that point to a network location. - - When processing untrusted or potentially malicious documents, - measures should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - *Changed in 7.0*: The ``publicID`` argument is no longer used as the - identifier (i.e. name) of the default graph as was the case before - version 7.0. In the case of sources that do not support named graphs, - the ``publicID`` parameter will also not be used as the name for the - graph that the data is loaded into, and instead the triples from sources - that do not support named graphs will be loaded into the default graph - (i.e. :attr:`.Dataset.default_context`). + """Parse an RDF source adding the resulting triples to the Graph. + + See rdflib.graph.Graph.parse for documentation on arguments. + + Args: + source: The source to parse. See rdflib.graph.Graph.parse for details. + publicID: The public ID of the source. + format: The format of the source. + location: The location of the source. + file: The file object to parse. + data: The data to parse. + **args: Additional arguments. + + Returns: + The graph that the source was parsed into. + + Note: + The source is specified using one of source, location, file or data. + + If the source is in a format that does not support named graphs its triples + will be added to the default graph (i.e. Dataset.default_context). + + !!! warning "Caution" + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + `@context` directives that point to a network location. + + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + Security Considerations documentation. + + !!! example "Changed in 7.0" + The `publicID` argument is no longer used as the identifier (i.e. name) + of the default graph as was the case before version 7.0. In the case of + sources that do not support named graphs, the `publicID` parameter will + also not be used as the name for the graph that the data is loaded into, + and instead the triples from sources that do not support named graphs will + be loaded into the default graph (i.e. Dataset.default_context). """ c = ConjunctiveGraph.parse( @@ -2529,14 +2806,12 @@ def parse( self.graph(c) return c - def add_graph( - self, g: Optional[Union[_ContextIdentifierType, _ContextType, str]] - ) -> Graph: + def add_graph(self, g: _ContextIdentifierType | _ContextType | str | None) -> Graph: """alias of graph for consistency""" return self.graph(g) def remove_graph( - self: _DatasetT, g: Optional[Union[_ContextIdentifierType, _ContextType, str]] + self: _DatasetT, g: _ContextIdentifierType | _ContextType | str | None ) -> _DatasetT: if not isinstance(g, Graph): g = self.get_context(g) @@ -2549,7 +2824,7 @@ def remove_graph( return self def contexts( - self, triple: Optional[_TripleType] = None + self, triple: _TripleType | None = None ) -> Generator[_ContextType, None, None]: default = False for c in super(Dataset, self).contexts(triple): @@ -2560,9 +2835,9 @@ def contexts( graphs = contexts - # type error: Return type "Generator[Tuple[Node, Node, Node, Optional[Node]], None, None]" of "quads" incompatible with return type "Generator[Tuple[Node, Node, Node, Optional[Graph]], None, None]" in supertype "ConjunctiveGraph" + # type error: Return type "Generator[tuple[Node, Node, Node, Optional[Node]], None, None]" of "quads" incompatible with return type "Generator[tuple[Node, Node, Node, Optional[Graph]], None, None]" in supertype "ConjunctiveGraph" def quads( # type: ignore[override] - self, quad: Optional[_TripleOrQuadPatternType] = None + self, quad: _TripleOrQuadPatternType | None = None ) -> Generator[_OptionalIdentifiedQuadType, None, None]: for s, p, o, c in super(Dataset, self).quads(quad): # type error: Item "None" of "Optional[Graph]" has no attribute "identifier" @@ -2572,7 +2847,7 @@ def quads( # type: ignore[override] # type error: Item "None" of "Optional[Graph]" has no attribute "identifier" [union-attr] yield s, p, o, c.identifier # type: ignore[union-attr] - # type error: Return type "Generator[Tuple[Node, URIRef, Node, Optional[IdentifiedNode]], None, None]" of "__iter__" incompatible with return type "Generator[Tuple[IdentifiedNode, IdentifiedNode, Union[IdentifiedNode, Literal]], None, None]" in supertype "Graph" + # type error: Return type "Generator[tuple[Node, URIRef, Node, Optional[IdentifiedNode]], None, None]" of "__iter__" incompatible with return type "Generator[tuple[IdentifiedNode, IdentifiedNode, Union[IdentifiedNode, Literal]], None, None]" in supertype "Graph" def __iter__( # type: ignore[override] self, ) -> Generator[_OptionalIdentifiedQuadType, None, None]: @@ -2580,7 +2855,7 @@ def __iter__( # type: ignore[override] return self.quads((None, None, None, None)) -class QuotedGraph(Graph): +class QuotedGraph(Graph, IdentifiedNode): """ Quoted Graphs are intended to implement Notation 3 formulae. They are associated with a required identifier that the N3 parser *must* provide @@ -2588,14 +2863,17 @@ class QuotedGraph(Graph): such as implication and other such processing. """ - def __init__( - self, - store: Union[Store, str], - identifier: Optional[Union[_ContextIdentifierType, str]], + def __new__( + cls, + store: Store | str, + identifier: _ContextIdentifierType | str | None, ): + return str.__new__(cls, identifier) + + def __init__(self, store: Store, identifier: _ContextIdentifierType | None): super(QuotedGraph, self).__init__(store, identifier) - def add(self: _GraphT, triple: _TripleType) -> _GraphT: + def add(self: _QuotedGraphT, triple: _TripleType) -> _QuotedGraphT: """Add a triple with self as context""" s, p, o = triple assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,) @@ -2605,7 +2883,9 @@ def add(self: _GraphT, triple: _TripleType) -> _GraphT: self.store.add((s, p, o), self, quoted=True) return self - def addN(self: _GraphT, quads: Iterable[_QuadType]) -> _GraphT: # noqa: N802 + def addN( # noqa: N802 + self: _QuotedGraphT, quads: Iterable[_QuadType] + ) -> _QuotedGraphT: """Add a sequence of triple with context""" self.store.addN( @@ -2617,7 +2897,7 @@ def addN(self: _GraphT, quads: Iterable[_QuadType]) -> _GraphT: # noqa: N802 ) return self - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: """Return an n3 identifier for the Graph""" return "{%s}" % self.identifier.n3(namespace_manager=namespace_manager) @@ -2630,9 +2910,30 @@ def __str__(self) -> str: ) return pattern % (identifier, label) - def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]]: + def __reduce__( + self, + ) -> tuple[type[QuotedGraph], tuple[Store, _ContextIdentifierType]]: return QuotedGraph, (self.store, self.identifier) + def toPython(self: _QuotedGraphT) -> _QuotedGraphT: # noqa: N802 + return self + + # Resolve conflicts between multiple inheritance + __iter__ = Graph.__iter__ # type: ignore[assignment] + __contains__ = Graph.__contains__ # type: ignore[assignment] + __ge__ = Graph.__ge__ # type: ignore[assignment] + __le__ = Graph.__le__ # type: ignore[assignment] + __gt__ = Graph.__gt__ + __eq__ = Graph.__eq__ + __iadd__ = Graph.__iadd__ + __add__ = Graph.__add__ # type: ignore[assignment] + __isub__ = Graph.__isub__ + __sub__ = Graph.__sub__ + __getitem__ = Graph.__getitem__ + __len__ = Graph.__len__ + __hash__ = Graph.__hash__ + __mul__ = Graph.__mul__ # type: ignore[assignment] + # Make sure QuotedGraph is ordered correctly # wrt to other Terms. @@ -2648,28 +2949,21 @@ class Seq: returned corresponding to the Seq content. It is based on the natural ordering of the predicate names _1, _2, _3, etc, which is the 'implementation' of a sequence in RDF terms. - """ - - def __init__(self, graph: Graph, subject: _SubjectType): - """Parameters: - - - graph: - the graph containing the Seq - - subject: - the subject of a Seq. Note that the init does not + Args: + graph: the graph containing the Seq + subject:the subject of a Seq. Note that the init does not check whether this is a Seq, this is done in whoever creates this instance! - """ + """ - self._list: List[Tuple[int, _ObjectType]] + def __init__(self, graph: Graph, subject: _SubjectType): + self._list: list[tuple[int, _ObjectType]] _list = self._list = list() LI_INDEX = URIRef(str(RDF) + "_") # noqa: N806 for p, o in graph.predicate_objects(subject): - # type error: "Node" has no attribute "startswith" - if p.startswith(LI_INDEX): # type: ignore[attr-defined] # != RDF.Seq: - # type error: "Node" has no attribute "replace" - i = int(p.replace(LI_INDEX, "")) # type: ignore[attr-defined] + if p.startswith(LI_INDEX): + i = int(p.replace(LI_INDEX, "")) _list.append((i, o)) # here is the trick: the predicates are _1, _2, _3, etc. Ie, @@ -2720,7 +3014,7 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph): ConjunctiveGraph over an explicit subset of the entire store. """ - def __init__(self, graphs: List[Graph], store: Union[str, Store] = "default"): + def __init__(self, graphs: list[Graph], store: Union[str, Store] = "default"): if store is not None: super(ReadOnlyGraphAggregate, self).__init__(store) Graph.__init__(self, store) @@ -2765,7 +3059,7 @@ def add(self, triple: _TripleOrOptionalQuadType) -> NoReturn: def addN(self, quads: Iterable[_QuadType]) -> NoReturn: # noqa: N802 raise ModificationException() - # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "Tuple[Optional[Node], Optional[Node], Optional[Node]]" + # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "tuple[Optional[Node], Optional[Node], Optional[Node]]" def remove(self, triple: _TripleOrOptionalQuadType) -> NoReturn: # type: ignore[override] raise ModificationException() @@ -2816,18 +3110,16 @@ def __contains__(self, triple_or_quad: _TripleOrQuadSelectorType) -> bool: def quads( # type: ignore[override] self, triple_or_quad: _TripleOrQuadSelectorType ) -> Generator[ - Tuple[_SubjectType, Union[Path, _PredicateType], _ObjectType, _ContextType], + tuple[_SubjectType, Path | _PredicateType, _ObjectType, _ContextType], None, None, ]: """Iterate over all the quads in the entire aggregate graph""" c = None if len(triple_or_quad) == 4: - # type error: Need more than 3 values to unpack (4 expected) - s, p, o, c = triple_or_quad # type: ignore[misc, unused-ignore] + s, p, o, c = triple_or_quad else: - # type error: Too many values to unpack (3 expected, 4 provided) - s, p, o = triple_or_quad # type: ignore[misc, unused-ignore] + s, p, o = triple_or_quad[:3] if c is not None: for graph in [g for g in self.graphs if g == c]: @@ -2864,16 +3156,28 @@ def __isub__(self: _GraphT, other: Iterable[_TripleType]) -> NoReturn: def triples_choices( self, - triple: Union[ - Tuple[List[_SubjectType], _PredicateType, _ObjectType], - Tuple[_SubjectType, List[_PredicateType], _ObjectType], - Tuple[_SubjectType, _PredicateType, List[_ObjectType]], - ], - context: Optional[_ContextType] = None, + triple: ( + tuple[ + list[_SubjectType] | tuple[_SubjectType, ...], + _PredicateType, + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + list[_PredicateType] | tuple[_PredicateType, ...], + _ObjectType | None, + ] + | tuple[ + _SubjectType | None, + _PredicateType, + list[_ObjectType] | tuple[_ObjectType, ...], + ] + ), + context: _ContextType | None = None, ) -> Generator[_TripleType, None, None]: subject, predicate, object_ = triple for graph in self.graphs: - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Union[List[Node], Node], Union[Node, List[Node]], Union[Node, List[Node]]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "tuple[Union[list[Node], Node], Union[Node, list[Node]], Union[Node, list[Node]]]"; expected "Union[tuple[list[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" # type error note: unpacking discards type info choices = graph.triples_choices((subject, predicate, object_)) # type: ignore[arg-type] for s, p, o in choices: @@ -2884,18 +3188,18 @@ def qname(self, uri: str) -> str: return self.namespace_manager.qname(uri) raise UnSupportedAggregateOperation() - def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, str]: + def compute_qname(self, uri: str, generate: bool = True) -> tuple[str, URIRef, str]: if hasattr(self, "namespace_manager") and self.namespace_manager: return self.namespace_manager.compute_qname(uri, generate) raise UnSupportedAggregateOperation() # type error: Signature of "bind" incompatible with supertype "Graph" def bind( # type: ignore[override] - self, prefix: Optional[str], namespace: Any, override: bool = True # noqa: F811 + self, prefix: str | None, namespace: Any, override: bool = True # noqa: F811 ) -> NoReturn: raise UnSupportedAggregateOperation() - def namespaces(self) -> Generator[Tuple[str, URIRef], None, None]: + def namespaces(self) -> Generator[tuple[str, URIRef], None, None]: if hasattr(self, "namespace_manager"): for prefix, namespace in self.namespace_manager.namespaces(): yield prefix, namespace @@ -2910,16 +3214,16 @@ def absolutize(self, uri: str, defrag: int = 1) -> NoReturn: # type error: Signature of "parse" incompatible with supertype "ConjunctiveGraph" def parse( # type: ignore[override] self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ], - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, + source: ( + IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None + ), + publicID: str | None = None, # noqa: N803 + format: str | None = None, **args: Any, ) -> NoReturn: raise ModificationException() - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> NoReturn: + def n3(self, namespace_manager: NamespaceManager | None = None) -> NoReturn: raise UnSupportedAggregateOperation() def __reduce__(self) -> NoReturn: @@ -2941,22 +3245,20 @@ def _assertnode(*terms: Any) -> bool: class BatchAddGraph: - """ - Wrapper around graph that turns batches of calls to Graph's add + """Wrapper around graph that turns batches of calls to Graph's add (and optionally, addN) into calls to batched calls to addN`. - :Parameters: - - - graph: The graph to wrap - - batch_size: The maximum number of triples to buffer before passing to - Graph's addN - - batch_addn: If True, then even calls to `addN` will be batched according to - batch_size - - graph: The wrapped graph - count: The number of triples buffered since initialization or the last call to reset - batch: The current buffer of triples - + Args: + graph: The graph to wrap + batch_size: The maximum number of triples to buffer before passing to + Graph's addN + batch_addn: If True, then even calls to `addN` will be batched according to + batch_size + + Attributes: + graph: The wrapped graph + count: The number of triples buffered since initialization or the last call to reset + batch: The current buffer of triples """ def __init__(self, graph: Graph, batch_size: int = 1000, batch_addn: bool = False): @@ -2972,31 +3274,28 @@ def reset(self) -> BatchAddGraph: """ Manually clear the buffered triples and reset the count to zero """ - self.batch: List[_QuadType] = [] + self.batch: list[_QuadType] = [] self.count = 0 return self - def add( - self, - triple_or_quad: Union[ - _TripleType, - _QuadType, - ], - ) -> BatchAddGraph: - """ - Add a triple to the buffer + def add(self, triple_or_quad: _TripleType | _QuadType) -> BatchAddGraph: + """Add a triple to the buffer. + + Args: + triple_or_quad: The triple or quad to add - :param triple: The triple to add + Returns: + The BatchAddGraph instance """ if len(self.batch) >= self.__batch_size: self.graph.addN(self.batch) self.batch = [] self.count += 1 if len(triple_or_quad) == 3: - # type error: Argument 1 to "append" of "list" has incompatible type "Tuple[Node, ...]"; expected "Tuple[Node, Node, Node, Graph]" + # type error: Argument 1 to "append" of "list" has incompatible type "tuple[Node, ...]"; expected "tuple[Node, Node, Node, Graph]" self.batch.append(triple_or_quad + self.__graph_tuple) # type: ignore[arg-type, unused-ignore] else: - # type error: Argument 1 to "append" of "list" has incompatible type "Union[Tuple[Node, Node, Node], Tuple[Node, Node, Node, Graph]]"; expected "Tuple[Node, Node, Node, Graph]" + # type error: Argument 1 to "append" of "list" has incompatible type "Union[tuple[Node, Node, Node], tuple[Node, Node, Node, Graph]]"; expected "tuple[Node, Node, Node, Graph]" self.batch.append(triple_or_quad) # type: ignore[arg-type, unused-ignore] return self diff --git a/rdflib/namespace/_GEO.py b/rdflib/namespace/_GEO.py index d7168d64c..2542c1e4f 100644 --- a/rdflib/namespace/_GEO.py +++ b/rdflib/namespace/_GEO.py @@ -9,20 +9,20 @@ class GEO(DefinedNamespace): Generated from: http://schemas.opengis.net/geosparql/1.0/geosparql_vocab_all.rdf Date: 2021-12-27 17:38:15.101187 - .. code-block:: Turtle - - dc:creator "Open Geospatial Consortium"^^xsd:string - dc:date "2012-04-30"^^xsd:date - dc:source - "OGC GeoSPARQL – A Geographic Query Language for RDF Data OGC 11-052r5"^^xsd:string - rdfs:seeAlso - - - owl:imports dc: - - - - owl:versionInfo "OGC GeoSPARQL 1.0"^^xsd:string + ```turtle + dc:creator "Open Geospatial Consortium"^^xsd:string + dc:date "2012-04-30"^^xsd:date + dc:source + "OGC GeoSPARQL – A Geographic Query Language for RDF Data OGC 11-052r5"^^xsd:string + rdfs:seeAlso + + + owl:imports dc: + + + + owl:versionInfo "OGC GeoSPARQL 1.0"^^xsd:string + ``` """ # http://www.w3.org/2000/01/rdf-schema#Datatype diff --git a/rdflib/namespace/__init__.py b/rdflib/namespace/__init__.py index 4077b0be3..871ba24a3 100644 --- a/rdflib/namespace/__init__.py +++ b/rdflib/namespace/__init__.py @@ -1,37 +1,34 @@ """ -=================== -Namespace Utilities -=================== +# Namespace Utilities RDFLib provides mechanisms for managing Namespaces. -In particular, there is a :class:`~rdflib.namespace.Namespace` class +In particular, there is a [`Namespace`][rdflib.namespace.Namespace] class that takes as its argument the base URI of the namespace. -.. code-block:: pycon +```python +>>> from rdflib.namespace import Namespace +>>> RDFS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#") - >>> from rdflib.namespace import Namespace - >>> RDFS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#") +``` Fully qualified URIs in the namespace can be constructed either by attribute or by dictionary access on Namespace instances: -.. code-block:: pycon - - >>> RDFS.seeAlso - rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#seeAlso') - >>> RDFS['seeAlso'] - rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#seeAlso') +```python +>>> RDFS.seeAlso +rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#seeAlso') +>>> RDFS['seeAlso'] +rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#seeAlso') +``` -Automatic handling of unknown predicates ------------------------------------------ +## Automatic handling of unknown predicates As a programming convenience, a namespace binding is automatically -created when :class:`rdflib.term.URIRef` predicates are added to the graph. +created when [`URIRef`][rdflib.term.URIRef] predicates are added to the graph. -Importable namespaces ------------------------ +## Importable namespaces The following namespaces are available by directly importing from rdflib: @@ -63,20 +60,38 @@ * WGS * XSD -.. code-block:: pycon +```python +>>> from rdflib.namespace import RDFS +>>> RDFS.seeAlso +rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#seeAlso') - >>> from rdflib.namespace import RDFS - >>> RDFS.seeAlso - rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#seeAlso') +``` """ from __future__ import annotations import logging import warnings +from collections.abc import Iterable + +try: + # Python >= 3.14 + from annotationlib import ( + get_annotations, # type: ignore[attr-defined,unused-ignore] + ) +except ImportError: # pragma: no cover + try: + # Python >= 3.10 + from inspect import get_annotations # type: ignore[attr-defined,unused-ignore] + except ImportError: + + def get_annotations(thing: Any) -> dict: + return thing.__annotations__ + + from functools import lru_cache from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Any from unicodedata import category from urllib.parse import urldefrag, urljoin @@ -128,9 +143,9 @@ class Namespace(str): - """ - Utility class for quickly generating URIRefs with a common prefix + """Utility class for quickly generating URIRefs with a common prefix. + ```python >>> from rdflib.namespace import Namespace >>> n = Namespace("http://example.org/") >>> n.Person # as attribute @@ -142,9 +157,11 @@ class Namespace(str): >>> n2 = Namespace("http://example2.org/") >>> n.Person in n2 False + + ``` """ - def __new__(cls, value: Union[str, bytes]) -> Namespace: + def __new__(cls, value: str | bytes) -> Namespace: try: rt = str.__new__(cls, value) except UnicodeDecodeError: @@ -175,6 +192,7 @@ def __repr__(self) -> str: def __contains__(self, ref: str) -> bool: # type: ignore[override] """Allows to check if a URI is within (starts with) this Namespace. + ```python >>> from rdflib import URIRef >>> namespace = Namespace('http://example.org/') >>> uri = URIRef('http://example.org/foo') @@ -186,23 +204,27 @@ def __contains__(self, ref: str) -> bool: # type: ignore[override] >>> obj = URIRef('http://not.example.org/bar') >>> obj in namespace False + + ``` """ return ref.startswith(self) # test namespace membership with "ref in ns" syntax class URIPattern(str): - """ - Utility class for creating URIs according to some pattern + """Utility class for creating URIs according to some pattern. + This supports either new style formatting with .format - or old-style with % operator + or old-style with % operator. + ```python >>> u=URIPattern("http://example.org/%s/%d/resource") >>> u%('books', 12345) rdflib.term.URIRef('http://example.org/books/12345/resource') + ``` """ - def __new__(cls, value: Union[str, bytes]) -> URIPattern: + def __new__(cls, value: str | bytes) -> URIPattern: try: rt = str.__new__(cls, value) except UnicodeDecodeError: @@ -225,7 +247,8 @@ def __repr__(self) -> str: # always raise AttributeError if they are not defined and which should not be # considered part of __dir__ results. These should be all annotations on # `DefinedNamespaceMeta`. -_DFNS_RESERVED_ATTRS: Set[str] = { +_DFNS_RESERVED_ATTRS: set[str] = { + "__slots__", "_NS", "_warn", "_fail", @@ -235,7 +258,7 @@ def __repr__(self) -> str: # Some libraries probe classes for certain attributes or items. # This is a list of those attributes and items that should be ignored. -_IGNORED_ATTR_LOOKUP: Set[str] = { +_IGNORED_ATTR_LOOKUP: set[str] = { "_pytestfixturefunction", # pytest tries to look this up on Defined namespaces "_partialmethod", # sphinx tries to look this up during autodoc generation } @@ -244,10 +267,12 @@ def __repr__(self) -> str: class DefinedNamespaceMeta(type): """Utility metaclass for generating URIRefs with a common prefix.""" + __slots__: tuple[str, ...] = tuple() + _NS: Namespace _warn: bool = True _fail: bool = False # True means mimic ClosedNamespace - _extras: List[str] = [] # List of non-pythonesque items + _extras: list[str] = [] # List of non-pythonesque items _underscore_num: bool = False # True means pass "_n" constructs @lru_cache(maxsize=None) @@ -255,15 +280,11 @@ def __getitem__(cls, name: str, default=None) -> URIRef: name = str(name) if name in _DFNS_RESERVED_ATTRS: - raise AttributeError( - f"DefinedNamespace like object has no attribute {name!r}" + raise KeyError( + f"DefinedNamespace like object has no access item named {name!r}" ) elif name in _IGNORED_ATTR_LOOKUP: raise KeyError() - if str(name).startswith("__"): - # NOTE on type ignore: This seems to be a real bug, super() does not - # implement this method, it will fail if it is ever reached. - return super().__getitem__(name, default) # type: ignore[misc] # undefined in superclass if (cls._warn or cls._fail) and name not in cls: if cls._fail: raise AttributeError(f"term '{name}' not in namespace '{cls._NS}'") @@ -277,28 +298,41 @@ def __getitem__(cls, name: str, default=None) -> URIRef: def __getattr__(cls, name: str): if name in _IGNORED_ATTR_LOOKUP: raise AttributeError() + elif name in _DFNS_RESERVED_ATTRS: + raise AttributeError( + f"DefinedNamespace like object has no attribute {name!r}" + ) + elif name.startswith("__"): + return super(DefinedNamespaceMeta, cls).__getattribute__(name) return cls.__getitem__(name) def __repr__(cls) -> str: - return f"Namespace({str(cls._NS)!r})" + try: + ns_repr = repr(cls._NS) + except AttributeError: + ns_repr = "" + return f"Namespace({ns_repr})" def __str__(cls) -> str: - return str(cls._NS) + try: + return str(cls._NS) + except AttributeError: + return "" def __add__(cls, other: str) -> URIRef: return cls.__getitem__(other) def __contains__(cls, item: str) -> bool: """Determine whether a URI or an individual item belongs to this namespace""" + try: + this_ns = cls._NS + except AttributeError: + return False item_str = str(item) - if item_str.startswith("__"): - # NOTE on type ignore: This seems to be a real bug, super() does not - # implement this method, it will fail if it is ever reached. - return super().__contains__(item) # type: ignore[misc] # undefined in superclass - if item_str.startswith(str(cls._NS)): - item_str = item_str[len(str(cls._NS)) :] + if item_str.startswith(str(this_ns)): + item_str = item_str[len(str(this_ns)) :] return any( - item_str in c.__annotations__ + item_str in get_annotations(c) or item_str in c._extras or (cls._underscore_num and item_str[0] == "_" and item_str[1:].isdigit()) for c in cls.mro() @@ -306,16 +340,16 @@ def __contains__(cls, item: str) -> bool: ) def __dir__(cls) -> Iterable[str]: - attrs = {str(x) for x in cls.__annotations__} + attrs = {str(x) for x in get_annotations(cls)} # Removing these as they should not be considered part of the namespace. attrs.difference_update(_DFNS_RESERVED_ATTRS) values = {cls[str(x)] for x in attrs} return values def as_jsonld_context(self, pfx: str) -> dict: # noqa: N804 - """Returns this DefinedNamespace as a a JSON-LD 'context' object""" + """Returns this DefinedNamespace as a JSON-LD 'context' object""" terms = {pfx: str(self._NS)} - for key, term in self.__annotations__.items(): + for key, term in get_annotations(self).items(): if issubclass(term, URIRef): terms[key] = f"{pfx}:{key}" @@ -323,10 +357,12 @@ def as_jsonld_context(self, pfx: str) -> dict: # noqa: N804 class DefinedNamespace(metaclass=DefinedNamespaceMeta): + """A Namespace with an enumerated list of members. + + Warnings are emitted if unknown members are referenced if _warn is True. """ - A Namespace with an enumerated list of members. - Warnings are emitted if unknown members are referenced if _warn is True - """ + + __slots__: tuple[str, ...] = tuple() def __init__(self): raise TypeError("namespace may not be instantiated") @@ -339,9 +375,9 @@ class ClosedNamespace(Namespace): Trying to create terms not listed is an error """ - __uris: Dict[str, URIRef] + __uris: dict[str, URIRef] - def __new__(cls, uri: str, terms: List[str]): + def __new__(cls, uri: str, terms: list[str]): rt = super().__new__(cls, uri) rt.__uris = {t: URIRef(rt + t) for t in terms} # type: ignore[attr-defined] return rt @@ -371,7 +407,7 @@ def __getattr__(self, name: str) -> URIRef: def __repr__(self) -> str: return f"{self.__module__}.{self.__class__.__name__}({str(self)!r})" - def __dir__(self) -> List[str]: + def __dir__(self) -> list[str]: return list(self.__uris) def __contains__(self, ref: str) -> bool: # type: ignore[override] @@ -379,7 +415,7 @@ def __contains__(self, ref: str) -> bool: # type: ignore[override] ref in self.__uris.values() ) # test namespace membership with "ref in ns" syntax - def _ipython_key_completions_(self) -> List[str]: + def _ipython_key_completions_(self) -> list[str]: return dir(self) @@ -415,39 +451,38 @@ class NamespaceManager: * using prefix bindings from prefix.cc which is a online prefixes database * not implemented yet - this is aspirational - .. attention:: + !!! warning "Breaking changes" - The namespaces bound for specific values of ``bind_namespaces`` + The namespaces bound for specific values of `bind_namespaces` constitute part of RDFLib's public interface, so changes to them should only be additive within the same minor version. Removing values, or removing namespaces that are bound by default, constitutes a breaking change. - See the - Sample usage - - .. code-block:: pycon - - >>> import rdflib - >>> from rdflib import Graph - >>> from rdflib.namespace import Namespace, NamespaceManager - >>> EX = Namespace('http://example.com/') - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', EX, override=False) - >>> g = Graph() - >>> g.namespace_manager = namespace_manager - >>> all_ns = [n for n in g.namespace_manager.namespaces()] - >>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns - >>> + See the sample usage + + ```python + >>> import rdflib + >>> from rdflib import Graph + >>> from rdflib.namespace import Namespace, NamespaceManager + >>> EX = Namespace('http://example.com/') + >>> namespace_manager = NamespaceManager(Graph()) + >>> namespace_manager.bind('ex', EX, override=False) + >>> g = Graph() + >>> g.namespace_manager = namespace_manager + >>> all_ns = [n for n in g.namespace_manager.namespaces()] + >>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns + + ``` """ def __init__(self, graph: Graph, bind_namespaces: _NamespaceSetString = "rdflib"): self.graph = graph - self.__cache: Dict[str, Tuple[str, URIRef, str]] = {} - self.__cache_strict: Dict[str, Tuple[str, URIRef, str]] = {} + self.__cache: dict[str, tuple[str, URIRef, str]] = {} + self.__cache_strict: dict[str, tuple[str, URIRef, str]] = {} self.__log = None - self.__strie: Dict[str, Any] = {} - self.__trie: Dict[str, Any] = {} + self.__strie: dict[str, Any] = {} + self.__trie: dict[str, Any] = {} # This type declaration is here becuase there is no common base class # for all namespaces and without it the inferred type of ns is not # compatible with all prefixes. @@ -510,24 +545,28 @@ def curie(self, uri: str, generate: bool = True) -> str: Result is guaranteed to contain a colon separating the prefix from the name, even if the prefix is an empty string. - .. warning:: - - When ``generate`` is `True` (which is the default) and there is no + !!! warning "Side-effect" + When `generate` is `True` (which is the default) and there is no matching namespace for the URI in the namespace manager then a new - namespace will be added with prefix ``ns{index}``. + namespace will be added with prefix `ns{index}`. - Thus, when ``generate`` is `True`, this function is not a pure + Thus, when `generate` is `True`, this function is not a pure function because of this side-effect. This default behaviour is chosen so that this function operates similarly to `NamespaceManager.qname`. - :param uri: URI to generate CURIE for. - :param generate: Whether to add a prefix for the namespace if one doesn't - already exist. Default: `True`. - :return: CURIE for the URI. - :raises KeyError: If generate is `False` and the namespace doesn't already have - a prefix. + Args: + uri: URI to generate CURIE for. + generate: Whether to add a prefix for the namespace if one doesn't + already exist. Default: `True`. + + Returns: + CURIE for the URI + + Raises: + KeyError: If generate is `False` and the namespace doesn't already have + a prefix. """ prefix, namespace, name = self.compute_qname(uri, generate=generate) return ":".join((prefix, name)) @@ -564,8 +603,8 @@ def normalizeUri(self, rdfTerm: str) -> str: # noqa: N802, N803 qNameParts = self.compute_qname(rdfTerm) # noqa: N806 return ":".join([qNameParts[0], qNameParts[-1]]) - def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, str]: - prefix: Optional[str] + def compute_qname(self, uri: str, generate: bool = True) -> tuple[str, URIRef, str]: + prefix: str | None if uri not in self.__cache: if not _is_valid_uri(uri): raise ValueError( @@ -611,12 +650,12 @@ def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, s def compute_qname_strict( self, uri: str, generate: bool = True - ) -> Tuple[str, str, str]: + ) -> tuple[str, str, str]: # code repeated to avoid branching on strict every time # if output needs to be strict (e.g. for xml) then # only the strict output should bear the overhead namespace: str - prefix: Optional[str] + prefix: str | None prefix, namespace, name = self.compute_qname(uri, generate) if is_ncname(str(name)): return prefix, namespace, name @@ -715,7 +754,7 @@ def _store_bind(self, prefix: str, namespace: URIRef, override: bool) -> None: def bind( self, - prefix: Optional[str], + prefix: str | None, namespace: Any, override: bool = True, replace: bool = False, @@ -726,7 +765,6 @@ def bind( bound to another prefix. If replace, replace any existing prefix with the new namespace - """ namespace = URIRef(str(namespace)) @@ -780,7 +818,7 @@ def bind( insert_trie(self.__trie, str(namespace)) - def namespaces(self) -> Iterable[Tuple[str, URIRef]]: + def namespaces(self) -> Iterable[tuple[str, URIRef]]: for prefix, namespace in self.store.namespaces(): namespace = URIRef(namespace) yield prefix, namespace @@ -863,8 +901,8 @@ def is_ncname(name: str) -> int: def split_uri( - uri: str, split_start: List[str] = SPLIT_START_CATEGORIES -) -> Tuple[str, str]: + uri: str, split_start: list[str] = SPLIT_START_CATEGORIES +) -> tuple[str, str]: if uri.startswith(XMLNS): return (XMLNS, uri.split(XMLNS)[1]) length = len(uri) @@ -886,8 +924,8 @@ def split_uri( def insert_trie( - trie: Dict[str, Any], value: str -) -> Dict[str, Any]: # aka get_subtrie_or_insert + trie: dict[str, Any], value: str +) -> dict[str, Any]: # aka get_subtrie_or_insert """Insert a value into the trie if it is not already contained in the trie. Return the subtree for the value regardless of whether it is a new value or not.""" @@ -910,12 +948,12 @@ def insert_trie( return trie[value] -def insert_strie(strie: Dict[str, Any], trie: Dict[str, Any], value: str) -> None: +def insert_strie(strie: dict[str, Any], trie: dict[str, Any], value: str) -> None: if value not in strie: strie[value] = insert_trie(trie, value) -def get_longest_namespace(trie: Dict[str, Any], value: str) -> Optional[str]: +def get_longest_namespace(trie: dict[str, Any], value: str) -> str | None: for key in trie: if value.startswith(key): out = get_longest_namespace(trie[key], value) diff --git a/rdflib/parser.py b/rdflib/parser.py index 1c652ca21..c6db87060 100644 --- a/rdflib/parser.py +++ b/rdflib/parser.py @@ -1,5 +1,4 @@ -""" -Parser plugin interface. +"""Parser plugin interface. This module defines the parser plugin interface and contains other related parser support code. @@ -7,7 +6,6 @@ The module is mainly useful for those wanting to write a parser that can plugin to rdflib. If you are wanting to invoke a parser you likely want to do so through the Graph class parse method. - """ from __future__ import annotations @@ -22,10 +20,7 @@ TYPE_CHECKING, Any, BinaryIO, - List, - Optional, TextIO, - Tuple, Union, cast, ) @@ -91,13 +86,13 @@ def __init__(self, wrapped: Union[str, StringIO, TextIOBase], encoding="utf-8"): self.wrapped = wrapped self.encoding = encoding self.encoder = codecs.getencoder(self.encoding) - self.enc_str: Optional[Union[BytesIO, BufferedIOBase]] = None - self.text_str: Optional[Union[StringIO, TextIOBase]] = None - self.has_read1: Optional[bool] = None - self.has_seek: Optional[bool] = None - self._name: Optional[str] = None - self._fileno: Optional[Union[int, BaseException]] = None - self._isatty: Optional[Union[bool, BaseException]] = None + self.enc_str: Union[BytesIO, BufferedIOBase] | None = None + self.text_str: Union[StringIO, TextIOBase] | None = None + self.has_read1: bool | None = None + self.has_seek: bool | None = None + self._name: str | None = None + self._fileno: Union[int, BaseException] | None = None + self._isatty: Union[bool, BaseException] | None = None self._leftover: bytes = b"" self._text_bytes_offset: int = 0 norm_encoding = encoding.lower().replace("_", "-") @@ -125,7 +120,7 @@ def __init__(self, wrapped: Union[str, StringIO, TextIOBase], encoding="utf-8"): self._bytes_per_char = 2 def _init(self): - name: Optional[str] = None + name: str | None = None if isinstance(self.wrapped, str): b, blen = self.encoder(self.wrapped) self.enc_str = BytesIO(b) @@ -133,7 +128,7 @@ def _init(self): elif isinstance(self.wrapped, TextIOWrapper): inner = self.wrapped.buffer # type error: TextIOWrapper.buffer cannot be a BytesIOWrapper - if isinstance(inner, BytesIOWrapper): # type: ignore[unreachable] + if isinstance(inner, BytesIOWrapper): # type: ignore[unreachable, unused-ignore] raise Exception( "BytesIOWrapper cannot be wrapped in TextIOWrapper, " "then wrapped in another BytesIOWrapper" @@ -202,7 +197,7 @@ def name(self) -> Any: def closed(self) -> bool: if self.enc_str is None and self.text_str is None: return False - closed: Optional[bool] = None + closed: bool | None = None if self.enc_str is not None: try: closed = self.enc_str.closed @@ -221,7 +216,7 @@ def readable(self) -> bool: def writable(self) -> bool: return False - def truncate(self, size: Optional[int] = None) -> int: + def truncate(self, size: int | None = None) -> int: raise NotImplementedError("Cannot truncate on BytesIOWrapper") def isatty(self) -> bool: @@ -257,7 +252,7 @@ def close(self): def flush(self): return # Does nothing on read-only streams - def _read_bytes_from_text_stream(self, size: Optional[int] = -1, /) -> bytes: + def _read_bytes_from_text_stream(self, size: int | None = -1, /) -> bytes: if TYPE_CHECKING: assert self.text_str is not None if size is None or size < 0: @@ -300,7 +295,7 @@ def _read_bytes_from_text_stream(self, size: Optional[int] = -1, /) -> bytes: self._text_bytes_offset += len(ret_bytes) return ret_bytes - def read(self, size: Optional[int] = -1, /) -> bytes: + def read(self, size: int | None = -1, /) -> bytes: """ Read at most size bytes, returned as a bytes object. @@ -317,7 +312,7 @@ def read(self, size: Optional[int] = -1, /) -> bytes: ret_bytes = self._read_bytes_from_text_stream(size) return ret_bytes - def read1(self, size: Optional[int] = -1, /) -> bytes: + def read1(self, size: int | None = -1, /) -> bytes: """ Read at most size bytes, with at most one call to the underlying raw stream’s read() or readinto() method. Returned as a bytes object. @@ -423,9 +418,9 @@ class InputSource(xmlreader.InputSource): TODO: """ - def __init__(self, system_id: Optional[str] = None): + def __init__(self, system_id: str | None = None): xmlreader.InputSource.__init__(self, system_id=system_id) - self.content_type: Optional[str] = None + self.content_type: str | None = None self.auto_close = False # see Graph.parse(), true if opened by us def close(self) -> None: @@ -459,23 +454,23 @@ class PythonInputSource(InputSource): True """ - def __init__(self, data: Any, system_id: Optional[str] = None): + def __init__(self, data: Any, system_id: str | None = None): self.content_type = None self.auto_close = False # see Graph.parse(), true if opened by us - self.public_id: Optional[str] = None - self.system_id: Optional[str] = system_id + self.public_id: str | None = None + self.system_id: str | None = system_id self.data = data - def getPublicId(self) -> Optional[str]: # noqa: N802 + def getPublicId(self) -> str | None: # noqa: N802 return self.public_id - def setPublicId(self, public_id: Optional[str]) -> None: # noqa: N802 + def setPublicId(self, public_id: str | None) -> None: # noqa: N802 self.public_id = public_id - def getSystemId(self) -> Optional[str]: # noqa: N802 + def getSystemId(self) -> str | None: # noqa: N802 return self.system_id - def setSystemId(self, system_id: Optional[str]) -> None: # noqa: N802 + def setSystemId(self, system_id: str | None) -> None: # noqa: N802 self.system_id = system_id def close(self) -> None: @@ -489,9 +484,9 @@ class StringInputSource(InputSource): def __init__( self, - value: Union[str, bytes], + value: str | bytes, encoding: str = "utf-8", - system_id: Optional[str] = None, + system_id: str | None = None, ): super(StringInputSource, self).__init__(system_id) stream: Union[BinaryIO, TextIO] @@ -520,27 +515,27 @@ class URLInputSource(InputSource): Constructs an RDFLib Parser InputSource from a URL to read it from the Web. """ - links: List[str] + links: list[str] @classmethod - def getallmatchingheaders(cls, message: Message, name) -> List[str]: + def getallmatchingheaders(cls, message: Message, name) -> list[str]: # This is reimplemented here, because the method # getallmatchingheaders from HTTPMessage is broken since Python 3.0 name = name.lower() return [val for key, val in message.items() if key.lower() == name] @classmethod - def get_links(cls, response: addinfourl) -> List[str]: + def get_links(cls, response: addinfourl) -> list[str]: linkslines = cls.getallmatchingheaders(response.headers, "Link") - retarray: List[str] = [] + retarray: list[str] = [] for linksline in linkslines: links = [linkstr.strip() for linkstr in linksline.split(",")] for link in links: retarray.append(link) return retarray - def get_alternates(self, type_: Optional[str] = None) -> List[str]: - typestr: Optional[str] = f'type="{type_}"' if type_ else None + def get_alternates(self, type_: str | None = None) -> list[str]: + typestr: str | None = f'type="{type_}"' if type_ else None relstr = 'rel="alternate"' alts = [] for link in self.links: @@ -554,7 +549,7 @@ def get_alternates(self, type_: Optional[str] = None) -> List[str]: alts.append(parts[0].strip("<>")) return alts - def __init__(self, system_id: Optional[str] = None, format: Optional[str] = None): + def __init__(self, system_id: str | None = None, format: str | None = None): super(URLInputSource, self).__init__(system_id) self.url = system_id @@ -622,7 +617,7 @@ def __init__( self, file: Union[BinaryIO, TextIO, TextIOBase, RawIOBase, BufferedIOBase], /, - encoding: Optional[str] = None, + encoding: str | None = None, ): base = pathlib.Path.cwd().as_uri() system_id = URIRef(pathlib.Path(file.name).absolute().as_uri(), base=base) # type: ignore[union-attr] @@ -653,14 +648,14 @@ def __repr__(self) -> str: def create_input_source( - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes, dict]] = None, - format: Optional[str] = None, + source: ( + Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] | None + ) = None, + publicID: str | None = None, # noqa: N803 + location: str | None = None, + file: BinaryIO | TextIO | None = None, + data: str | bytes | dict | None = None, + format: str | None = None, ) -> InputSource: """ Return an appropriate InputSource instance for the given @@ -700,15 +695,15 @@ def create_input_source( f = source input_source = InputSource() if hasattr(source, "encoding"): - input_source.setCharacterStream(source) + input_source.setCharacterStream(source) # type: ignore[arg-type] input_source.setEncoding(source.encoding) try: b = source.buffer # type: ignore[union-attr] input_source.setByteStream(b) except (AttributeError, LookupError): - input_source.setByteStream(source) + input_source.setByteStream(source) # type: ignore[arg-type] else: - input_source.setByteStream(f) + input_source.setByteStream(f) # type: ignore[arg-type] if f is sys.stdin: input_source.setSystemId("file:///dev/stdin") elif hasattr(f, "name"): @@ -773,11 +768,11 @@ def create_input_source( def _create_input_source_from_location( - file: Optional[Union[BinaryIO, TextIO]], - format: Optional[str], - input_source: Optional[InputSource], + file: BinaryIO | TextIO | None, + format: str | None, + input_source: InputSource | None, location: str, -) -> Tuple[URIRef, bool, Optional[Union[BinaryIO, TextIO]], Optional[InputSource]]: +) -> tuple[URIRef, bool, BinaryIO | TextIO | None, InputSource | None]: # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145 and # https://github.com/RDFLib/rdflib/issues/1430 # NOTE: using pathlib.Path.exists on a URL fails on windows as it is not a diff --git a/rdflib/paths.py b/rdflib/paths.py index 3692bad45..34eed70ab 100644 --- a/rdflib/paths.py +++ b/rdflib/paths.py @@ -1,50 +1,23 @@ r""" - This module implements the SPARQL 1.1 Property path operators, as defined in: - -http://www.w3.org/TR/sparql11-query/#propertypaths +[http://www.w3.org/TR/sparql11-query/#propertypaths](http://www.w3.org/TR/sparql11-query/#propertypaths) In SPARQL the syntax is as follows: -+--------------------+-------------------------------------------------+ -|Syntax | Matches | -+====================+=================================================+ -|iri | An IRI. A path of length one. | -+--------------------+-------------------------------------------------+ -|^elt | Inverse path (object to subject). | -+--------------------+-------------------------------------------------+ -|elt1 / elt2 | A sequence path of elt1 followed by elt2. | -+--------------------+-------------------------------------------------+ -|elt1 | elt2 | A alternative path of elt1 or elt2 | -| | (all possibilities are tried). | -+--------------------+-------------------------------------------------+ -|elt* | A path that connects the subject and object | -| | of the path by zero or more matches of elt. | -+--------------------+-------------------------------------------------+ -|elt+ | A path that connects the subject and object | -| | of the path by one or more matches of elt. | -+--------------------+-------------------------------------------------+ -|elt? | A path that connects the subject and object | -| | of the path by zero or one matches of elt. | -+--------------------+-------------------------------------------------+ -|!iri or | Negated property set. An IRI which is not one of| -|!(iri\ :sub:`1`\ \| | iri\ :sub:`1`...iri\ :sub:`n`. | -|... \|iri\ :sub:`n`)| !iri is short for !(iri). | -+--------------------+-------------------------------------------------+ -|!^iri or | Negated property set where the excluded matches | -|!(^iri\ :sub:`1`\ \|| are based on reversed path. That is, not one of | -|...\|^iri\ :sub:`n`)| iri\ :sub:`1`...iri\ :sub:`n` as reverse paths. | -| | !^iri is short for !(^iri). | -+--------------------+-------------------------------------------------+ -|!(iri\ :sub:`1`\ \| | A combination of forward and reverse | -|...\|iri\ :sub:`j`\ | properties in a negated property set. | -|\|^iri\ :sub:`j+1`\ | | -|\|... \|^iri\ | | -|:sub:`n`)| | | -+--------------------+-------------------------------------------------+ -|(elt) | A group path elt, brackets control precedence. | -+--------------------+-------------------------------------------------+ +| Syntax | Matches | +|---------------------|-------------------------------------------------------------------------| +| `iri` | An IRI. A path of length one. | +| `^elt` | Inverse path (object to subject). | +| `elt1 / elt2` | A sequence path of `elt1` followed by `elt2`. | +| `elt1 \| elt2` | An alternative path of `elt1` or `elt2` (all possibilities are tried). | +| `elt*` | A path that connects subject and object by zero or more matches of `elt`.| +| `elt+` | A path that connects subject and object by one or more matches of `elt`.| +| `elt?` | A path that connects subject and object by zero or one matches of `elt`.| +| `!iri` or
`!(iri1 \| ... \| irin)` | Negated property set. An IRI not among `iri1` to `irin`.
`!iri` is short for `!(iri)`. | +| `!^iri` or
`!(^iri1 \| ... \| ^irin)` | Negated reverse property set. Excludes `^iri1` to `^irin` as reverse paths.
`!^iri` is short for `!(^iri)`. | +| `!(iri1 \| ... \| irij \| ^irij+1 \| ... \| ^irin)` | A combination of forward and reverse properties in a negated property set. | +| `(elt)` | A grouped path `elt`, where parentheses control precedence. | This module is used internally by the SPARQL engine, but the property paths can also be used to query RDFLib Graphs directly. @@ -52,6 +25,7 @@ Where possible the SPARQL syntax is mapped to Python operators, and property path objects can be constructed from existing URIRefs. +```python >>> from rdflib import Graph, Namespace >>> from rdflib.namespace import FOAF @@ -64,16 +38,22 @@ >>> FOAF.name|FOAF.givenName Path(http://xmlns.com/foaf/0.1/name | http://xmlns.com/foaf/0.1/givenName) +``` + Modifiers (?, \*, +) are done using \* (the multiplication operator) and the strings '\*', '?', '+', also defined as constants in this file. +```python >>> FOAF.knows*OneOrMore Path(http://xmlns.com/foaf/0.1/knows+) +``` + The path objects can also be used with the normal graph methods. First some example data: +```python >>> g=Graph() >>> g=g.parse(data=''' @@ -90,19 +70,28 @@ >>> e = Namespace('ex:') +``` + Graph contains: +```python >>> (e.a, e.p1/e.p2, e.e) in g True +``` + Graph generator functions, triples, subjects, objects, etc. : +```python >>> list(g.objects(e.c, (e.p3*OneOrMore)/e.p2)) # doctest: +NORMALIZE_WHITESPACE [rdflib.term.URIRef('ex:j'), rdflib.term.URIRef('ex:g'), rdflib.term.URIRef('ex:f')] +``` + A more complete set of tests: +```python >>> list(eval_path(g, (None, e.p1/e.p2, None)))==[(e.a, e.e)] True >>> list(eval_path(g, (e.a, e.p1|e.p2, None)))==[(e.a,e.c), (e.a,e.f)] @@ -168,8 +157,11 @@ >>> list(eval_path(g, (e.c, (e.p2|e.p3)*ZeroOrMore, e.j))) [(rdflib.term.URIRef('ex:c'), rdflib.term.URIRef('ex:j'))] +``` + No vars specified: +```python >>> sorted(list(eval_path(g, (None, e.p3*OneOrMore, None)))) #doctest: +NORMALIZE_WHITESPACE [(rdflib.term.URIRef('ex:c'), rdflib.term.URIRef('ex:a')), (rdflib.term.URIRef('ex:c'), rdflib.term.URIRef('ex:g')), @@ -178,6 +170,7 @@ (rdflib.term.URIRef('ex:g'), rdflib.term.URIRef('ex:h')), (rdflib.term.URIRef('ex:h'), rdflib.term.URIRef('ex:a'))] +``` """ from __future__ import annotations @@ -185,26 +178,23 @@ import warnings from abc import ABC, abstractmethod from functools import total_ordering -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Generator, - Iterator, - List, - Optional, - Set, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Any from rdflib.term import Node, URIRef if TYPE_CHECKING: + from collections.abc import Callable, Generator, Iterator + + from typing_extensions import TypeAlias + from rdflib._type_checking import _MulPathMod from rdflib.graph import Graph, _ObjectType, _PredicateType, _SubjectType from rdflib.namespace import NamespaceManager + SubjectType: TypeAlias = _SubjectType + PredicateType: TypeAlias = _PredicateType + ObjectType: TypeAlias = _ObjectType + # property paths @@ -213,9 +203,7 @@ ZeroOrOne = "?" -def _n3( - arg: Union[URIRef, Path], namespace_manager: Optional[NamespaceManager] = None -) -> str: +def _n3(arg: URIRef | Path, namespace_manager: NamespaceManager | None = None) -> str: if isinstance(arg, (SequencePath, AlternativePath)) and len(arg.args) > 1: return "(%s)" % arg.n3(namespace_manager) return arg.n3(namespace_manager) @@ -223,22 +211,24 @@ def _n3( @total_ordering class Path(ABC): - __or__: Callable[[Path, Union[URIRef, Path]], AlternativePath] + """Base class for all property paths.""" + + __or__: Callable[[Path, URIRef | Path], AlternativePath] __invert__: Callable[[Path], InvPath] __neg__: Callable[[Path], NegatedPath] - __truediv__: Callable[[Path, Union[URIRef, Path]], SequencePath] + __truediv__: Callable[[Path, URIRef | Path], SequencePath] __mul__: Callable[[Path, str], MulPath] @abstractmethod def eval( self, graph: Graph, - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - ) -> Iterator[Tuple[_SubjectType, _ObjectType]]: ... + subj: SubjectType | None = None, + obj: ObjectType | None = None, + ) -> Iterator[tuple[SubjectType, ObjectType]]: ... @abstractmethod - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: ... + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: ... def __hash__(self): return hash(repr(self)) @@ -255,28 +245,28 @@ def __lt__(self, other: Any) -> bool: class InvPath(Path): - def __init__(self, arg: Union[Path, URIRef]): + def __init__(self, arg: Path | URIRef): self.arg = arg def eval( self, graph: Graph, - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - ) -> Generator[Tuple[_ObjectType, _SubjectType], None, None]: + subj: SubjectType | None = None, + obj: ObjectType | None = None, + ) -> Generator[tuple[ObjectType, SubjectType], None, None]: for s, o in eval_path(graph, (obj, self.arg, subj)): yield o, s def __repr__(self) -> str: return "Path(~%s)" % (self.arg,) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: return "^%s" % _n3(self.arg, namespace_manager) class SequencePath(Path): - def __init__(self, *args: Union[Path, URIRef]): - self.args: List[Union[Path, URIRef]] = [] + def __init__(self, *args: Path | URIRef): + self.args: list[Path | URIRef] = [] for a in args: if isinstance(a, SequencePath): self.args += a.args @@ -286,14 +276,14 @@ def __init__(self, *args: Union[Path, URIRef]): def eval( self, graph: Graph, - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + subj: SubjectType | None = None, + obj: ObjectType | None = None, + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: def _eval_seq( - paths: List[Union[Path, URIRef]], - subj: Optional[_SubjectType], - obj: Optional[_ObjectType], - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + paths: list[Path | URIRef], + subj: SubjectType | None, + obj: ObjectType | None, + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: if paths[1:]: for s, o in eval_path(graph, (subj, paths[0], None)): for r in _eval_seq(paths[1:], o, obj): @@ -304,10 +294,10 @@ def _eval_seq( yield s, o def _eval_seq_bw( - paths: List[Union[Path, URIRef]], - subj: Optional[_SubjectType], - obj: _ObjectType, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + paths: list[Path | URIRef], + subj: SubjectType | None, + obj: ObjectType, + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: if paths[:-1]: for s, o in eval_path(graph, (None, paths[-1], obj)): for r in _eval_seq(paths[:-1], subj, s): @@ -327,13 +317,13 @@ def _eval_seq_bw( def __repr__(self) -> str: return "Path(%s)" % " / ".join(str(x) for x in self.args) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: return "/".join(_n3(a, namespace_manager) for a in self.args) class AlternativePath(Path): - def __init__(self, *args: Union[Path, URIRef]): - self.args: List[Union[Path, URIRef]] = [] + def __init__(self, *args: Path | URIRef): + self.args: list[Path | URIRef] = [] for a in args: if isinstance(a, AlternativePath): self.args += a.args @@ -343,9 +333,9 @@ def __init__(self, *args: Union[Path, URIRef]): def eval( self, graph: Graph, - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + subj: SubjectType | None = None, + obj: ObjectType | None = None, + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: for x in self.args: for y in eval_path(graph, (subj, x, obj)): yield y @@ -353,12 +343,12 @@ def eval( def __repr__(self) -> str: return "Path(%s)" % " | ".join(str(x) for x in self.args) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: return "|".join(_n3(a, namespace_manager) for a in self.args) class MulPath(Path): - def __init__(self, path: Union[Path, URIRef], mod: _MulPathMod): + def __init__(self, path: Path | URIRef, mod: _MulPathMod): self.path = path self.mod = mod @@ -377,10 +367,10 @@ def __init__(self, path: Union[Path, URIRef], mod: _MulPathMod): def eval( self, graph: Graph, - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, + subj: SubjectType | None = None, + obj: ObjectType | None = None, first: bool = True, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: if self.zero and first: if subj and obj: if subj == obj: @@ -391,45 +381,39 @@ def eval( yield obj, obj def _fwd( - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - seen: Optional[Set[_SubjectType]] = None, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: - # type error: Item "None" of "Optional[Set[Node]]" has no attribute "add" - # type error: Argument 1 to "add" of "set" has incompatible type "Optional[Node]"; expected "Node" - seen.add(subj) # type: ignore[union-attr, arg-type] + subj: SubjectType, + obj: ObjectType | None, + seen: set[SubjectType], + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: + seen.add(subj) for s, o in eval_path(graph, (subj, self.path, None)): if not obj or o == obj: yield s, o if self.more: - # type error: Unsupported right operand type for in ("Optional[Set[Node]]") - if o in seen: # type: ignore[operator] + if o in seen: continue for s2, o2 in _fwd(o, obj, seen): yield s, o2 def _bwd( - subj: Optional[_SubjectType] = None, - obj: Optional[_ObjectType] = None, - seen: Optional[Set[_ObjectType]] = None, - ) -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: - # type error: Item "None" of "Optional[Set[Node]]" has no attribute "add" - # type error: Argument 1 to "add" of "set" has incompatible type "Optional[Node]"; expected "Node" - seen.add(obj) # type: ignore[union-attr, arg-type] + subj: SubjectType | None, + obj: ObjectType, + seen: set[ObjectType], + ) -> Generator[tuple[SubjectType, ObjectType], None, None]: + seen.add(obj) for s, o in eval_path(graph, (None, self.path, obj)): if not subj or subj == s: yield s, o if self.more: - # type error: Unsupported right operand type for in ("Optional[Set[Node]]") - if s in seen: # type: ignore[operator] + if s in seen: continue for s2, o2 in _bwd(None, s, seen): yield s2, o - def _all_fwd_paths() -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: + def _all_fwd_paths() -> Generator[tuple[SubjectType, ObjectType], None, None]: if self.zero: seen1 = set() # According to the spec, ALL nodes are possible solutions @@ -458,12 +442,12 @@ def _all_fwd_paths() -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: yield s1, o1 done = set() # the spec does, by defn, not allow duplicates - if subj: + if subj is not None: for x in _fwd(subj, obj, set()): if x not in done: done.add(x) yield x - elif obj: + elif obj is not None: for x in _bwd(subj, obj, set()): if x not in done: done.add(x) @@ -477,13 +461,13 @@ def _all_fwd_paths() -> Generator[Tuple[_SubjectType, _ObjectType], None, None]: def __repr__(self) -> str: return "Path(%s%s)" % (self.path, self.mod) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: return "%s%s" % (_n3(self.path, namespace_manager), self.mod) class NegatedPath(Path): - def __init__(self, arg: Union[AlternativePath, InvPath, URIRef]): - self.args: List[Union[URIRef, Path]] + def __init__(self, arg: AlternativePath | InvPath | URIRef): + self.args: list[URIRef | Path] if isinstance(arg, (URIRef, InvPath)): self.args = [arg] elif isinstance(arg, AlternativePath): @@ -511,7 +495,7 @@ def eval(self, graph, subj=None, obj=None): def __repr__(self) -> str: return "Path(! %s)" % ",".join(str(x) for x in self.args) - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + def n3(self, namespace_manager: NamespaceManager | None = None) -> str: return "!(%s)" % ("|".join(_n3(arg, namespace_manager) for arg in self.args)) @@ -519,7 +503,7 @@ class PathList(list): pass -def path_alternative(self: Union[URIRef, Path], other: Union[URIRef, Path]): +def path_alternative(self: URIRef | Path, other: URIRef | Path): """ alternative path """ @@ -528,7 +512,7 @@ def path_alternative(self: Union[URIRef, Path], other: Union[URIRef, Path]): return AlternativePath(self, other) -def path_sequence(self: Union[URIRef, Path], other: Union[URIRef, Path]): +def path_sequence(self: URIRef | Path, other: URIRef | Path): """ sequence path """ @@ -539,12 +523,12 @@ def path_sequence(self: Union[URIRef, Path], other: Union[URIRef, Path]): def evalPath( # noqa: N802 graph: Graph, - t: Tuple[ - Optional[_SubjectType], - Union[None, Path, _PredicateType], - Optional[_ObjectType], + t: tuple[ + SubjectType | None, + Path | PredicateType | None, + ObjectType | None, ], -) -> Iterator[Tuple[_SubjectType, _ObjectType]]: +) -> Iterator[tuple[SubjectType, ObjectType]]: warnings.warn( DeprecationWarning( "rdflib.path.evalPath() is deprecated, use the (snake-cased) eval_path(). " @@ -557,30 +541,30 @@ def evalPath( # noqa: N802 def eval_path( graph: Graph, - t: Tuple[ - Optional[_SubjectType], - Union[None, Path, _PredicateType], - Optional[_ObjectType], + t: tuple[ + SubjectType | None, + Path | PredicateType | None, + ObjectType | None, ], -) -> Iterator[Tuple[_SubjectType, _ObjectType]]: +) -> Iterator[tuple[SubjectType, ObjectType]]: return ((s, o) for s, p, o in graph.triples(t)) -def mul_path(p: Union[URIRef, Path], mul: _MulPathMod) -> MulPath: +def mul_path(p: URIRef | Path, mul: _MulPathMod) -> MulPath: """ cardinality path """ return MulPath(p, mul) -def inv_path(p: Union[URIRef, Path]) -> InvPath: +def inv_path(p: URIRef | Path) -> InvPath: """ inverse path """ return InvPath(p) -def neg_path(p: Union[URIRef, AlternativePath, InvPath]) -> NegatedPath: +def neg_path(p: URIRef | AlternativePath | InvPath) -> NegatedPath: """ negated path """ @@ -605,7 +589,7 @@ def neg_path(p: Union[URIRef, AlternativePath, InvPath]) -> NegatedPath: Path.__invert__ = inv_path # type error: Incompatible types in assignment (expression has type "Callable[[Union[URIRef, AlternativePath, InvPath]], NegatedPath]", variable has type "Callable[[Path], NegatedPath]") Path.__neg__ = neg_path # type: ignore[assignment] - # type error: Incompatible types in assignment (expression has type "Callable[[Union[URIRef, Path], Literal['*', '+', '?']], MulPath]", variable has type "Callable[[Path, str], MulPath]") + # type error: Incompatible types in assignment (expression has type "Callable[[URIRef|Path, Literal['*', '+', '?']], MulPath]", variable has type "Callable[[Path, str], MulPath]") Path.__mul__ = mul_path # type: ignore[assignment] Path.__or__ = path_alternative Path.__truediv__ = path_sequence diff --git a/rdflib/plugin.py b/rdflib/plugin.py index 23699e68d..97284b7d8 100644 --- a/rdflib/plugin.py +++ b/rdflib/plugin.py @@ -1,42 +1,19 @@ -""" -Plugin support for rdf. - -There are a number of plugin points for rdf: parser, serializer, +"""There are a number of plugin points for rdf: parser, serializer, store, query processor, and query result. Plugins can be registered -either through setuptools entry_points or by calling +either automatically through entry points or by calling rdf.plugin.register directly. -If you have a package that uses a setuptools based setup.py you can add the -following to your setup:: - - entry_points = { - 'rdf.plugins.parser': [ - 'nt = rdf.plugins.parsers.ntriples:NTParser', - ], - 'rdf.plugins.serializer': [ - 'nt = rdf.plugins.serializers.NTSerializer:NTSerializer', - ], - } - -See the `setuptools dynamic discovery of services and plugins`__ for more -information. - -.. __: http://peak.telecommunity.com/DevCenter/setuptools#dynamic-discovery-of-services-and-plugins - +For more details, see the [Plugins Usage Documentation](../plugins.md). """ from __future__ import annotations +from collections.abc import Iterator from importlib.metadata import EntryPoint, entry_points from typing import ( TYPE_CHECKING, Any, - Dict, Generic, - Iterator, - Optional, - Tuple, - Type, TypeVar, overload, ) @@ -75,7 +52,7 @@ "rdf.plugins.updateprocessor": UpdateProcessor, } -_plugins: Dict[Tuple[str, Type[Any]], Plugin] = {} +_plugins: dict[tuple[str, type[Any]], Plugin] = {} class PluginException(Error): # noqa: N818 @@ -88,15 +65,15 @@ class PluginException(Error): # noqa: N818 class Plugin(Generic[PluginT]): def __init__( - self, name: str, kind: Type[PluginT], module_path: str, class_name: str + self, name: str, kind: type[PluginT], module_path: str, class_name: str ): self.name = name self.kind = kind self.module_path = module_path self.class_name = class_name - self._class: Optional[Type[PluginT]] = None + self._class: type[PluginT] | None = None - def getClass(self) -> Type[PluginT]: # noqa: N802 + def getClass(self) -> type[PluginT]: # noqa: N802 if self._class is None: module = __import__(self.module_path, globals(), locals(), [""]) self._class = getattr(module, self.class_name) @@ -104,19 +81,19 @@ def getClass(self) -> Type[PluginT]: # noqa: N802 class PKGPlugin(Plugin[PluginT]): - def __init__(self, name: str, kind: Type[PluginT], ep: EntryPoint): + def __init__(self, name: str, kind: type[PluginT], ep: EntryPoint): self.name = name self.kind = kind self.ep = ep - self._class: Optional[Type[PluginT]] = None + self._class: type[PluginT] | None = None - def getClass(self) -> Type[PluginT]: # noqa: N802 + def getClass(self) -> type[PluginT]: # noqa: N802 if self._class is None: self._class = self.ep.load() return self._class -def register(name: str, kind: Type[Any], module_path, class_name): +def register(name: str, kind: type[Any], module_path, class_name): """ Register the plugin for (name, kind). The module_path and class_name should be the path to a plugin class. @@ -125,7 +102,7 @@ def register(name: str, kind: Type[Any], module_path, class_name): _plugins[(name, kind)] = p -def get(name: str, kind: Type[PluginT]) -> Type[PluginT]: +def get(name: str, kind: type[PluginT]) -> type[PluginT]: """ Return the class for the specified (name, kind). Raises a PluginException if unable to do so. @@ -153,16 +130,16 @@ def get(name: str, kind: Type[PluginT]) -> Type[PluginT]: @overload def plugins( - name: Optional[str] = ..., kind: Type[PluginT] = ... + name: str | None = ..., kind: type[PluginT] = ... ) -> Iterator[Plugin[PluginT]]: ... @overload -def plugins(name: Optional[str] = ..., kind: None = ...) -> Iterator[Plugin]: ... +def plugins(name: str | None = ..., kind: None = ...) -> Iterator[Plugin]: ... def plugins( - name: Optional[str] = None, kind: Optional[Type[PluginT]] = None + name: str | None = None, kind: type[PluginT] | None = None ) -> Iterator[Plugin[PluginT]]: """ A generator of the plugins. diff --git a/rdflib/plugins/__init__.py b/rdflib/plugins/__init__.py index 4622bb0a1..90c50d5d0 100644 --- a/rdflib/plugins/__init__.py +++ b/rdflib/plugins/__init__.py @@ -1,7 +1,5 @@ """ -Default plugins for rdflib. - -This is a namespace package and contains the default plugins for -rdflib. +Default plugins for RDFLib. +This is a namespace package and contains the default plugins for RDFLib. """ diff --git a/rdflib/plugins/parsers/__init__.py b/rdflib/plugins/parsers/__init__.py index 8062daa4a..8748bfce7 100644 --- a/rdflib/plugins/parsers/__init__.py +++ b/rdflib/plugins/parsers/__init__.py @@ -1,3 +1,2 @@ -""" - +"""Modules for parsing serialized RDF such as Turtle, JSON-LD etc. """ diff --git a/rdflib/plugins/parsers/hext.py b/rdflib/plugins/parsers/hext.py index 99aa47698..27543315f 100644 --- a/rdflib/plugins/parsers/hext.py +++ b/rdflib/plugins/parsers/hext.py @@ -9,7 +9,7 @@ import json import warnings from io import TextIOWrapper -from typing import TYPE_CHECKING, Any, BinaryIO, List, Optional, TextIO, Union +from typing import TYPE_CHECKING, Any, BinaryIO, TextIO, Union from rdflib.graph import ConjunctiveGraph, Dataset, Graph from rdflib.parser import InputSource, Parser @@ -36,11 +36,11 @@ class HextuplesParser(Parser): def __init__(self): super(HextuplesParser, self).__init__() - self.default_context: Optional[Graph] = None + self.default_context: Graph | None = None self.skolemize = False def _parse_hextuple( - self, ds: Union[Dataset, ConjunctiveGraph], tup: List[Union[str, None]] + self, ds: Union[Dataset, ConjunctiveGraph], tup: list[Union[str, None]] ) -> None: # all values check # subject, predicate, value, datatype cannot be None @@ -124,11 +124,11 @@ def parse(self, source: InputSource, graph: Graph, skolemize: bool = False, **kw ds.remove_graph(ds_default) # remove the original unused default graph try: - text_stream: Optional[TextIO] = source.getCharacterStream() + text_stream: TextIO | None = source.getCharacterStream() # type: ignore[assignment] except (AttributeError, LookupError): text_stream = None try: - binary_stream: Optional[BinaryIO] = source.getByteStream() + binary_stream: BinaryIO | None = source.getByteStream() # type: ignore[assignment] except (AttributeError, LookupError): binary_stream = None @@ -156,7 +156,7 @@ def parse(self, source: InputSource, graph: Graph, skolemize: bool = False, **kw use_stream = TextIOWrapper(binary_stream, encoding="utf-8") loads = json.loads - for line in use_stream: # type: Union[str, bytes] + for line in use_stream: # type: str|bytes if len(line) == 0 or line.isspace(): # Skipping empty lines because this is what was being done before for the first and last lines, albeit in an rather indirect way. # The result is that we accept input that would otherwise be invalid. @@ -165,7 +165,7 @@ def parse(self, source: InputSource, graph: Graph, skolemize: bool = False, **kw # this complex handing is because the 'value' component is # allowed to be "" but not None # all other "" values are treated as None - raw_line: List[str] = loads(line) + raw_line: list[str] = loads(line) hex_tuple_line = [x if x != "" else None for x in raw_line] if raw_line[2] == "": hex_tuple_line[2] = "" diff --git a/rdflib/plugins/parsers/jsonld.py b/rdflib/plugins/parsers/jsonld.py index 295a97126..2079bbbfd 100644 --- a/rdflib/plugins/parsers/jsonld.py +++ b/rdflib/plugins/parsers/jsonld.py @@ -1,10 +1,8 @@ """ -This parser will interpret a JSON-LD document as an RDF Graph. See: - - http://json-ld.org/ - -Example usage:: +This parser will interpret a JSON-LD document as an RDF Graph. See http://json-ld.org/ +Example: + ```python >>> from rdflib import Graph, URIRef, Literal >>> test_json = ''' ... { @@ -26,6 +24,7 @@ ... Literal("Someone's Homepage", lang='en'))] True + ``` """ # From: https://github.com/RDFLib/rdflib-jsonld/blob/feature/json-ld-1.1/rdflib_jsonld/parser.py @@ -34,14 +33,16 @@ # we should consider streaming the input to deal with arbitrarily large graphs. from __future__ import annotations +import secrets import warnings -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any, Union import rdflib.parser from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import RDF, XSD from rdflib.parser import InputSource, URLInputSource -from rdflib.term import BNode, IdentifiedNode, Literal, Node, URIRef +from rdflib.term import BNode, IdentifiedNode, Literal, URIRef from ..shared.jsonld.context import UNDEF, Context, Term from ..shared.jsonld.keys import ( @@ -70,6 +71,9 @@ source_to_json, ) +if TYPE_CHECKING: + from rdflib.graph import _ObjectType + __all__ = ["JsonLDParser", "to_rdf"] TYPE_TERM = Term(str(RDF.type), TYPE, VOCAB) # type: ignore[call-arg] @@ -87,47 +91,33 @@ def parse( sink: Graph, version: float = 1.1, skolemize: bool = False, - encoding: Optional[str] = "utf-8", - base: Optional[str] = None, - context: Optional[ - Union[ - List[Union[Dict[str, Any], str, None]], - Dict[str, Any], - str, - ] - ] = None, - generalized_rdf: Optional[bool] = False, - extract_all_scripts: Optional[bool] = False, + encoding: str | None = "utf-8", + base: str | None = None, + context: list[dict[str, Any] | str | None] | dict[str, Any] | str | None = None, + generalized_rdf: bool | None = False, + extract_all_scripts: bool | None = False, **kwargs: Any, ) -> None: """Parse JSON-LD from a source document. The source document can be JSON or HTML with embedded JSON script - elements (type attribute = "application/ld+json"). To process as HTML - ``source.content_type`` must be set to "text/html" or - "application/xhtml+xml". - - :param source: InputSource with JSON-formatted data (JSON or HTML) - - :param sink: Graph to receive the parsed triples - - :param version: parse as JSON-LD version, defaults to 1.1 - - :param encoding: character encoding of the JSON (should be "utf-8" - or "utf-16"), defaults to "utf-8" - - :param base: JSON-LD `Base IRI `_, defaults to None - - :param context: JSON-LD `Context `_, defaults to None - - :param generalized_rdf: parse as `Generalized RDF `_, defaults to False - - :param extract_all_scripts: if source is an HTML document then extract - all script elements, defaults to False (extract only the first - script element). This is ignored if ``source.system_id`` contains - a fragment identifier, in which case only the script element with - matching id attribute is extracted. - + elements (type attribute = `application/ld+json`). To process as HTML + `source.content_type` must be set to "text/html" or + `application/xhtml+xml. + + Args: + source: InputSource with JSON-formatted data (JSON or HTML) + sink: Graph to receive the parsed triples + version: parse as JSON-LD version, defaults to 1.1 + skolemize: whether to skolemize blank nodes, defaults to False + encoding: character encoding of the JSON (should be "utf-8" + base: JSON-LD [Base IRI](https://www.w3.org/TR/json-ld/#base-iri), defaults to None + context: JSON-LD [Context](https://www.w3.org/TR/json-ld/#the-context), defaults to None + generalized_rdf: parse as [Generalized RDF](https://www.w3.org/TR/json-ld/#relationship-to-rdf), defaults to False + extract_all_scripts: if source is an HTML document then extract + script element). This is ignored if `source.system_id` contains + a fragment identifier, in which case only the script element with + matching id attribute is extracted. """ if encoding not in ("utf-8", "utf-16"): warnings.warn( @@ -151,7 +141,7 @@ def parse( # Get the optional fragment identifier try: - fragment_id = URIRef(source.getSystemId()).fragment + fragment_id = URIRef(source.getSystemId()).fragment # type: ignore[arg-type] except Exception: fragment_id = None @@ -182,17 +172,13 @@ def parse( def to_rdf( data: Any, dataset: Graph, - base: Optional[str] = None, - context_data: Optional[ - Union[ - List[Union[Dict[str, Any], str, None]], - Dict[str, Any], - str, - ] - ] = None, - version: Optional[float] = None, + base: str | None = None, + context_data: ( + list[dict[str, Any] | str | None] | dict[str, Any] | str | None + ) = None, + version: float | None = None, generalized_rdf: bool = False, - allow_lists_of_lists: Optional[bool] = None, + allow_lists_of_lists: bool | None = None, skolemize: bool = False, ): # TODO: docstring w. args and return value @@ -211,7 +197,7 @@ class Parser: def __init__( self, generalized_rdf: bool = False, - allow_lists_of_lists: Optional[bool] = None, + allow_lists_of_lists: bool | None = None, skolemize: bool = False, ): self.skolemize = skolemize @@ -221,10 +207,11 @@ def __init__( if allow_lists_of_lists is not None else ALLOW_LISTS_OF_LISTS ) + self.invalid_uri_to_bnode: dict[str, BNode] = {} def parse(self, data: Any, context: Context, dataset: Graph) -> Graph: topcontext = False - resources: Union[Dict[str, Any], List[Any]] + resources: Union[dict[str, Any], list[Any]] if isinstance(data, list): resources = data elif isinstance(data, dict): @@ -258,7 +245,7 @@ def _add_to_graph( context: Context, node: Any, topcontext: bool = False, - ) -> Optional[Node]: + ) -> IdentifiedNode | None: if not isinstance(node, dict) or context.get_value(node): # type error: Return value expected return # type: ignore[return-value] @@ -280,12 +267,13 @@ def _add_to_graph( if nested_id is not None and len(nested_id) > 0: id_val = nested_id + subj: IdentifiedNode | None + if isinstance(id_val, str): subj = self._to_rdf_id(context, id_val) else: - subj = BNode() - if self.skolemize: - subj = subj.skolemize() + _bn = BNode() + subj = _bn if not self.skolemize else _bn.skolemize() if subj is None: return None @@ -315,7 +303,7 @@ def _add_to_graph( return subj # type error: Missing return statement - def _get_nested_id(self, context: Context, node: Dict[str, Any]) -> Optional[str]: # type: ignore[return] + def _get_nested_id(self, context: Context, node: dict[str, Any]) -> str | None: # type: ignore[return] for key, obj in node.items(): if context.version >= 1.1 and key in context.get_keys(NEST): term = context.terms.get(key) @@ -339,7 +327,7 @@ def _key_to_graph( dataset: Graph, graph: Graph, context: Context, - subj: Node, + subj: IdentifiedNode, key: str, obj: Any, reverse: bool = False, @@ -369,8 +357,7 @@ def _key_to_graph( if dataset.context_aware and not no_id: if TYPE_CHECKING: assert isinstance(dataset, ConjunctiveGraph) - # type error: Argument 1 to "get_context" of "ConjunctiveGraph" has incompatible type "Node"; expected "Union[IdentifiedNode, str, None]" - subgraph = dataset.get_context(subj) # type: ignore[arg-type] + subgraph = dataset.get_context(subj) else: subgraph = graph for onode in obj_nodes: @@ -409,7 +396,7 @@ def _key_to_graph( context = context.get_context_for_term(term) # Flatten deep nested lists - def flatten(n: Iterable[Any]) -> List[Any]: + def flatten(n: Iterable[Any]) -> list[Any]: flattened = [] for obj in n: if isinstance(obj, dict): @@ -451,8 +438,8 @@ def flatten(n: Iterable[Any]) -> List[Any]: graph.add((subj, pred, obj)) def _parse_container( - self, context: Context, term: Term, obj: Dict[str, Any] - ) -> List[Any]: + self, context: Context, term: Term, obj: dict[str, Any] + ) -> list[Any]: if LANG in term.container: obj_nodes = [] for lang, values in obj.items(): @@ -531,7 +518,7 @@ def _parse_container( return [obj] @staticmethod - def _add_type(context: Context, o: Dict[str, Any], k: str) -> Dict[str, Any]: + def _add_type(context: Context, o: dict[str, Any], k: str) -> dict[str, Any]: otype = context.get_type(o) or [] if otype and not isinstance(otype, list): otype = [otype] @@ -544,10 +531,10 @@ def _to_object( dataset: Graph, graph: Graph, context: Context, - term: Optional[Term], + term: Term | None, node: Any, inlist: bool = False, - ) -> Optional[Node]: + ) -> _ObjectType | None: if isinstance(node, tuple): value, lang = node if value is None: @@ -618,7 +605,7 @@ def _to_object( else: return self._add_to_graph(dataset, graph, context, node) - def _to_rdf_id(self, context: Context, id_val: str) -> Optional[IdentifiedNode]: + def _to_rdf_id(self, context: Context, id_val: str) -> IdentifiedNode | None: bid = self._get_bnodeid(id_val) if bid: b = BNode(bid) @@ -629,9 +616,14 @@ def _to_rdf_id(self, context: Context, id_val: str) -> Optional[IdentifiedNode]: uri = context.resolve(id_val) if not self.generalized_rdf and ":" not in uri: return None - return URIRef(uri) - - def _get_bnodeid(self, ref: str) -> Optional[str]: + node: IdentifiedNode = URIRef(uri) + if not str(node): + if id_val not in self.invalid_uri_to_bnode: + self.invalid_uri_to_bnode[id_val] = BNode(secrets.token_urlsafe(20)) + node = self.invalid_uri_to_bnode[id_val] + return node + + def _get_bnodeid(self, ref: str) -> str | None: if not ref.startswith("_:"): # type error: Return value expected return # type: ignore[return-value] @@ -643,7 +635,7 @@ def _add_list( dataset: Graph, graph: Graph, context: Context, - term: Optional[Term], + term: Term | None, node_list: Any, ) -> IdentifiedNode: if not isinstance(node_list, list): @@ -662,7 +654,7 @@ def _add_list( if rest: # type error: Statement is unreachable - graph.add((subj, RDF.rest, rest)) # type: ignore[unreachable] + graph.add((subj, RDF.rest, rest)) subj = rest obj = self._to_object(dataset, graph, context, term, node, inlist=True) @@ -682,7 +674,7 @@ def _add_list( return RDF.nil @staticmethod - def _to_typed_json_value(value: Any) -> Dict[str, str]: + def _to_typed_json_value(value: Any) -> dict[str, str]: if _HAS_ORJSON: val_string: str = orjson.dumps( value, @@ -698,7 +690,7 @@ def _to_typed_json_value(value: Any) -> Dict[str, str]: } @classmethod - def _expand_nested_list(cls, obj_nodes: List[Any]) -> Dict[str, List[Any]]: + def _expand_nested_list(cls, obj_nodes: list[Any]) -> dict[str, list[Any]]: result = [ cls._expand_nested_list(o) if isinstance(o, list) else o for o in obj_nodes ] diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py index da71405e0..56ce90462 100755 --- a/rdflib/plugins/parsers/notation3.py +++ b/rdflib/plugins/parsers/notation3.py @@ -33,25 +33,17 @@ import os import re import sys - -# importing typing for `typing.List` because `List`` is used for something else -import typing from decimal import Decimal +from re import Pattern from typing import ( IO, TYPE_CHECKING, Any, - Callable, - Dict, - Match, - MutableSequence, NoReturn, - Optional, - Pattern, - Set, - Tuple, TypeVar, Union, + cast, + overload, ) from uuid import uuid4 @@ -88,24 +80,28 @@ from rdflib.parser import Parser if TYPE_CHECKING: + from collections.abc import Callable, MutableSequence + from re import Match # Replaces typing.Match in Python 3.9+ + + from rdflib.graph import _ObjectType, _PredicateType, _SubjectType from rdflib.parser import InputSource _AnyT = TypeVar("_AnyT") -def splitFragP(uriref: str, punc: int = 0) -> Tuple[str, str]: - """split a URI reference before the fragment - - Punctuation is kept. +def splitFragP(uriref: str, punc: int = 0) -> tuple[str, str]: + """Split a URI reference before the fragment - e.g. + Punctuation is kept. e.g. + ```python >>> splitFragP("abc#def") ('abc', '#def') >>> splitFragP("abcdef") ('abcdef', '') + ``` """ i = uriref.rfind("#") @@ -123,15 +119,19 @@ def join(here: str, there: str) -> str: (non-ascii characters are supported/doctested; haven't checked the details of the IRI spec though) - ``here`` is assumed to be absolute. - ``there`` is URI reference. + `here` is assumed to be absolute. + `there` is URI reference. + ```python >>> join('http://example/x/y/z', '../abc') 'http://example/x/abc' + ``` + Raise ValueError if there uses relative path syntax but here has no hierarchical path. + ```python >>> join('mid:foo@example', '../foo') # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): raise ValueError(here) @@ -144,13 +144,18 @@ def join(here: str, there: str) -> str: >>> join('mid:foo@example', '#foo') 'mid:foo@example#foo' + ``` + We grok IRIs + ```python >>> len('Andr\\xe9') 5 >>> join('http://example.org/', '#Andr\\xe9') 'http://example.org/#Andr\\xe9' + + ``` """ # assert(here.find("#") < 0), \ @@ -223,7 +228,6 @@ def base() -> str: this yield the URI of the file. If we had a reliable way of getting a computer name, we should put it in the hostname just to prevent ambiguity - """ # return "file://" + hostname + os.getcwd() + "/" return "file://" + _fixslash(os.getcwd()) + "/" @@ -276,7 +280,7 @@ def _fixslash(s: str) -> str: N3_Empty = (SYMBOL, List_NS + "Empty") -runNamespaceValue: Optional[str] = None +runNamespaceValue: str | None = None def runNamespace() -> str: @@ -384,11 +388,11 @@ class SinkParser: def __init__( self, store: RDFSink, - openFormula: Optional[Formula] = None, + openFormula: Formula | None = None, thisDoc: str = "", - baseURI: Optional[str] = None, + baseURI: str | None = None, genPrefix: str = "", - why: Optional[Callable[[], None]] = None, + why: Callable[[], None] | None = None, turtle: bool = False, ): """note: namespace names should *not* end in # ; @@ -410,10 +414,10 @@ def __init__( self._genPrefix = genPrefix self.keywords = ["a", "this", "bind", "has", "is", "of", "true", "false"] self.keywordsSet = 0 # Then only can others be considered qnames - self._anonymousNodes: Dict[str, BNode] = {} + self._anonymousNodes: dict[str, BNode] = {} # Dict of anon nodes already declared ln: Term - self._variables: Dict[str, Variable] = {} - self._parentVariables: Dict[str, Variable] = {} + self._variables: dict[str, Variable] = {} + self._parentVariables: dict[str, Variable] = {} self._reason = why # Why the parser was asked to parse this self.turtle = turtle # raise exception when encountering N3 extensions @@ -421,7 +425,7 @@ def __init__( # only allows double quotes. self.string_delimiters = ('"', "'") if turtle else ('"',) - self._reason2: Optional[Callable[..., None]] = None # Why these triples + self._reason2: Callable[..., None] | None = None # Why these triples # was: diag.tracking if tracking: # type error: "BecauseOfData" does not return a value @@ -429,7 +433,7 @@ def __init__( store.newSymbol(thisDoc), because=self._reason ) - self._baseURI: Optional[str] + self._baseURI: str | None if baseURI: self._baseURI = baseURI else: @@ -446,7 +450,7 @@ def __init__( else: self._genPrefix = uniqueURI() - self._formula: Optional[Formula] + self._formula: Formula | None if openFormula is None and not turtle: if self._thisDoc: # TODO FIXME: store.newFormula does not take any arguments @@ -456,8 +460,8 @@ def __init__( else: self._formula = openFormula - self._context: Optional[Formula] = self._formula - self._parentContext: Optional[Formula] = None + self._context: Formula | None = self._formula + self._parentContext: Formula | None = None def here(self, i: int) -> str: """String generated from position in file @@ -472,20 +476,20 @@ def here(self, i: int) -> str: return "%s_L%iC%i" % (self._genPrefix, self.lines, i - self.startOfLine + 1) - def formula(self) -> Optional[Formula]: + def formula(self) -> Formula | None: return self._formula - def loadStream(self, stream: Union[IO[str], IO[bytes]]) -> Optional[Formula]: + def loadStream(self, stream: Union[IO[str], IO[bytes]]) -> Formula | None: return self.loadBuf(stream.read()) # Not ideal - def loadBuf(self, buf: Union[str, bytes]) -> Optional[Formula]: + def loadBuf(self, buf: str | bytes) -> Formula | None: """Parses a buffer and returns its top level formula""" self.startDoc() self.feed(buf) return self.endDoc() # self._formula - def feed(self, octets: Union[str, bytes]) -> None: + def feed(self, octets: str | bytes) -> None: """Feed an octet stream to the parser if BadSyntax is raised, the string @@ -541,7 +545,7 @@ def tok(self, tok: str, argstr: str, i: int, colon: bool = False) -> int: we must not be at end of file. if colon, then keyword followed by colon is ok - (@prefix: is ok, rdf:type shortcut a must be followed by ws) + (`@prefix:` is ok, rdf:type shortcut a must be followed by ws) """ assert tok[0] not in _notNameChars # not for punctuation @@ -582,7 +586,7 @@ def directive(self, argstr: str, i: int) -> int: j = self.skipSpace(argstr, i) if j < 0: return j # eof - res: typing.List[str] = [] + res: list[str] = [] j = self.tok("bind", argstr, i) # implied "#". Obsolete. if j > 0: @@ -631,7 +635,7 @@ def directive(self, argstr: str, i: int) -> int: j = self.tok("prefix", argstr, i, colon=True) # no implied "#" if j >= 0: - t: typing.List[Union[Identifier, Tuple[str, str]]] = [] + t: list[Union[Identifier, tuple[str, str]]] = [] i = self.qname(argstr, j, t) if i < 0: self.BadSyntax(argstr, j, "expected qname after @prefix") @@ -690,7 +694,7 @@ def sparqlDirective(self, argstr: str, i: int) -> int: j = self.sparqlTok("PREFIX", argstr, i) if j >= 0: - t: typing.List[Any] = [] + t: list[Any] = [] i = self.qname(argstr, j, t) if i < 0: self.BadSyntax(argstr, j, "expected qname after @prefix") @@ -747,7 +751,7 @@ def bind(self, qn: str, uri: bytes) -> None: else: self._store.bind(qn, uri) - def setKeywords(self, k: Optional[typing.List[str]]) -> None: + def setKeywords(self, k: list[str] | None) -> None: """Takes a list of strings""" if k is None: self.keywordsSet = 0 @@ -759,7 +763,7 @@ def startDoc(self) -> None: # was: self._store.startDoc() self._store.startDoc(self._formula) - def endDoc(self) -> Optional[Formula]: + def endDoc(self) -> Formula | None: """Signal end of document and stop parsing. returns formula""" self._store.endDoc(self._formula) # don't canonicalize yet return self._formula @@ -770,7 +774,7 @@ def makeStatement(self, quadruple) -> None: self._store.makeStatement(quadruple, why=self._reason2) def statement(self, argstr: str, i: int) -> int: - r: typing.List[Any] = [] + r: list[Any] = [] i = self.object(argstr, i, r) # Allow literal for subject - extends RDF if i < 0: return i @@ -798,7 +802,7 @@ def verb(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: if j < 0: return j # eof - r: typing.List[Any] = [] + r: list[Any] = [] j = self.tok("has", argstr, i) if j >= 0: @@ -876,7 +880,7 @@ def prop(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: def item(self, argstr: str, i, res: MutableSequence[Any]) -> int: return self.path(argstr, i, res) - def blankNode(self, uri: Optional[str] = None) -> BNode: + def blankNode(self, uri: str | None = None) -> BNode: return self._store.newBlankNode(self._context, uri, why=self._reason2) def path(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: @@ -914,13 +918,13 @@ def node( argstr: str, i: int, res: MutableSequence[Any], - subjectAlready: Optional[Node] = None, + subjectAlready: Node | None = None, ) -> int: """Parse the production. Space is now skipped once at the beginning instead of in multiple calls to self.skipSpace(). """ - subj: Optional[Node] = subjectAlready + subj: Node | None = subjectAlready j = self.skipSpace(argstr, i) if j < 0: @@ -940,7 +944,7 @@ def node( argstr, j, "Found '[=' or '[ =' when in turtle mode." ) i = j + 1 - objs: typing.List[Node] = [] + objs: list[Node] = [] j = self.objectList(argstr, i, objs) if j >= 0: subj = objs[0] @@ -1001,7 +1005,7 @@ def node( else: first_run = False - item: typing.List[Any] = [] + item: list[Any] = [] j = self.item(argstr, i, item) # @@@@@ should be path, was object if j < 0: self.BadSyntax(argstr, i, "expected item in set or '$}'") @@ -1052,9 +1056,7 @@ def node( return j if ch == "(": - thing_type: Callable[ - [typing.List[Any], Optional[Formula]], Union[Set[Any], IdentifiedNode] - ] + thing_type: Callable[[list[Any], Formula | None], set[Any] | IdentifiedNode] thing_type = self._store.newList ch2 = argstr[i + 1] if ch2 == "$": @@ -1124,19 +1126,19 @@ def property_list(self, argstr: str, i: int, subj: Node) -> int: if self.turtle: self.BadSyntax(argstr, j, "Found in ':-' in Turtle mode") i = j + 2 - res: typing.List[Any] = [] + res: list[Any] = [] j = self.node(argstr, i, res, subj) if j < 0: self.BadSyntax(argstr, i, "bad {} or () or [] node after :- ") i = j continue i = j - v: typing.List[Any] = [] + v: list[Any] = [] j = self.verb(argstr, i, v) if j <= 0: return i # void but valid - objs: typing.List[Any] = [] + objs: list[Any] = [] i = self.objectList(argstr, j, objs) if i < 0: self.BadSyntax(argstr, j, "objectList expected") @@ -1220,7 +1222,7 @@ def uri_ref2(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: NS and local name is now used though I prefer inserting a '#' to make the namesapces look more like what XML folks expect. """ - qn: typing.List[Any] = [] + qn: list[Any] = [] j = self.qname(argstr, i, qn) if j >= 0: pfx, ln = qn[0] @@ -1247,7 +1249,7 @@ def uri_ref2(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: return -1 if argstr[i] == "?": - v: typing.List[Any] = [] + v: list[Any] = [] j = self.variable(argstr, i, v) if j > 0: # Forget variables as a class, only in context. res.append(v[0]) @@ -1374,7 +1376,7 @@ def qname( self, argstr: str, i: int, - res: MutableSequence[Union[Identifier, Tuple[str, str]]], + res: MutableSequence[Union[Identifier, tuple[str, str]]], ) -> int: """ xyz:def -> ('xyz', 'def') @@ -1571,7 +1573,7 @@ def nodeOrLiteral(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: lang = argstr[j + 1 : i] j = i if argstr[j : j + 2] == "^^": - res2: typing.List[Any] = [] + res2: list[Any] = [] j = self.uri_ref2(argstr, j + 2, res2) # Read datatype URI dt = res2[0] res.append(self._store.newLiteral(s, dt, lang)) @@ -1579,13 +1581,13 @@ def nodeOrLiteral(self, argstr: str, i: int, res: MutableSequence[Any]) -> int: else: return -1 - def uriOf(self, sym: Union[Identifier, Tuple[str, str]]) -> str: + def uriOf(self, sym: Union[Identifier, tuple[str, str]]) -> str: if isinstance(sym, tuple): return sym[1] # old system for --pipe # return sym.uriref() # cwm api return sym - def strconst(self, argstr: str, i: int, delim: str) -> Tuple[int, str]: + def strconst(self, argstr: str, i: int, delim: str) -> tuple[int, str]: """parse an N3 string constant delimited by delim. return index, val """ @@ -1704,7 +1706,7 @@ def _unicodeEscape( reg: Pattern[str], n: int, prefix: str, - ) -> Tuple[int, str]: + ) -> tuple[int, str]: if len(argstr) < i + n: raise BadSyntax( self._thisDoc, startline, argstr, i, "unterminated string literal(3)" @@ -1720,10 +1722,10 @@ def _unicodeEscape( "bad string literal hex escape: " + argstr[i : i + n], ) - def uEscape(self, argstr: str, i: int, startline: int) -> Tuple[int, str]: + def uEscape(self, argstr: str, i: int, startline: int) -> tuple[int, str]: return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, "u") - def UEscape(self, argstr: str, i: int, startline: int) -> Tuple[int, str]: + def UEscape(self, argstr: str, i: int, startline: int) -> tuple[int, str]: return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, "U") def BadSyntax(self, argstr: str, i: int, msg: str) -> NoReturn: @@ -1781,8 +1783,8 @@ def __init__(self, parent: Graph): self.counter = 0 Formula.number += 1 self.number = Formula.number - self.existentials: Dict[str, BNode] = {} - self.universals: Dict[str, BNode] = {} + self.existentials: dict[str, BNode] = {} + self.universals: dict[str, BNode] = {} self.quotedgraph = QuotedGraph(store=parent.store, identifier=self.id()) @@ -1792,9 +1794,7 @@ def __str__(self) -> str: def id(self) -> BNode: return BNode("_:Formula%s" % self.number) - def newBlankNode( - self, uri: Optional[str] = None, why: Optional[Any] = None - ) -> BNode: + def newBlankNode(self, uri: str | None = None, why: Any | None = None) -> BNode: if uri is None: self.counter += 1 bn = BNode("f%sb%s" % (self.uuid, self.counter)) @@ -1802,7 +1802,7 @@ def newBlankNode( bn = BNode(uri.split("#").pop().replace("_", "b")) return bn - def newUniversal(self, uri: str, why: Optional[Any] = None) -> Variable: + def newUniversal(self, uri: str, why: Any | None = None) -> Variable: return Variable(uri.split("#").pop()) def declareExistential(self, x: str) -> None: @@ -1817,7 +1817,7 @@ def close(self) -> QuotedGraph: class RDFSink: def __init__(self, graph: Graph): - self.rootFormula: Optional[Formula] = None + self.rootFormula: Formula | None = None self.uuid = uuid4().hex self.counter = 0 self.graph = graph @@ -1839,9 +1839,9 @@ def newSymbol(self, *args: str) -> URIRef: def newBlankNode( self, - arg: Optional[Union[Formula, Graph, Any]] = None, - uri: Optional[str] = None, - why: Optional[Callable[[], None]] = None, + arg: Formula | Graph | Any | None = None, + uri: str | None = None, + why: Callable[[], None] | None = None, ) -> BNode: if isinstance(arg, Formula): return arg.newBlankNode(uri) @@ -1852,13 +1852,13 @@ def newBlankNode( bn = BNode(str(arg[0]).split("#").pop().replace("_", "b")) return bn - def newLiteral(self, s: str, dt: Optional[URIRef], lang: Optional[str]) -> Literal: + def newLiteral(self, s: str, dt: URIRef | None, lang: str | None) -> Literal: if dt: return Literal(s, datatype=dt) else: return Literal(s, lang=lang) - def newList(self, n: typing.List[Any], f: Optional[Formula]) -> IdentifiedNode: + def newList(self, n: list[Any], f: Formula | None) -> IdentifiedNode: nil = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#nil") if not n: return nil @@ -1876,7 +1876,7 @@ def newList(self, n: typing.List[Any], f: Optional[Formula]) -> IdentifiedNode: self.makeStatement((f, rest, a, nil)) return af - def newSet(self, *args: _AnyT) -> Set[_AnyT]: + def newSet(self, *args: _AnyT) -> set[_AnyT]: return set(args) def setDefaultNamespace(self, *args: bytes) -> str: @@ -1884,8 +1884,8 @@ def setDefaultNamespace(self, *args: bytes) -> str: def makeStatement( self, - quadruple: Tuple[Optional[Union[Formula, Graph]], Node, Node, Node], - why: Optional[Any] = None, + quadruple: tuple[Formula | Graph | None, Node, Node, Node], + why: Any | None = None, ) -> None: f, p, s, o = quadruple @@ -1893,26 +1893,44 @@ def makeStatement( raise ParserError("Formula used as predicate") # type error: Argument 1 to "normalise" of "RDFSink" has incompatible type "Union[Formula, Graph, None]"; expected "Optional[Formula]" - s = self.normalise(f, s) # type: ignore[arg-type] - p = self.normalise(f, p) # type: ignore[arg-type] - o = self.normalise(f, o) # type: ignore[arg-type] + s_normal: _SubjectType = cast("_SubjectType", self.normalise(f, s)) + p_normal: _PredicateType = cast("_PredicateType", self.normalise(f, p)) + o_normal: _ObjectType = cast("_ObjectType", self.normalise(f, o)) if f == self.rootFormula: # print s, p, o, '.' - self.graph.add((s, p, o)) + self.graph.add((s_normal, p_normal, o_normal)) elif isinstance(f, Formula): - f.quotedgraph.add((s, p, o)) + f.quotedgraph.add((s_normal, p_normal, o_normal)) else: # type error: Item "None" of "Optional[Graph]" has no attribute "add" - f.add((s, p, o)) # type: ignore[union-attr] + f.add((s_normal, p_normal, o_normal)) # type: ignore[union-attr] # return str(quadruple) + @overload + def normalise(self, f: Formula | Graph | None, n: tuple[int, str]) -> URIRef: ... + + @overload + def normalise(self, f: Formula | Graph | None, n: bool) -> Literal: ... + + @overload + def normalise(self, f: Formula | Graph | None, n: int) -> Literal: ... + + @overload + def normalise(self, f: Formula | Graph | None, n: Decimal) -> Literal: ... + + @overload + def normalise(self, f: Formula | Graph | None, n: float) -> Literal: ... + + @overload + def normalise(self, f: Formula | Graph | None, n: Node) -> Node: ... + def normalise( self, - f: Optional[Formula], - n: Union[Tuple[int, str], bool, int, Decimal, float, _AnyT], - ) -> Union[URIRef, Literal, BNode, _AnyT]: + f: Formula | Graph | None, + n: Union[tuple[int, str], bool, int, Decimal, float, Node, _AnyT], + ) -> Union[URIRef, Literal, BNode, Node, _AnyT]: if isinstance(n, tuple): return URIRef(str(n[1])) @@ -1955,10 +1973,10 @@ def intern(self, something: _AnyT) -> _AnyT: def bind(self, pfx, uri) -> None: pass # print pfx, ':', uri - def startDoc(self, formula: Optional[Formula]) -> None: + def startDoc(self, formula: Formula | None) -> None: self.rootFormula = formula - def endDoc(self, formula: Optional[Formula]) -> None: + def endDoc(self, formula: Formula | None) -> None: pass @@ -1972,9 +1990,11 @@ def hexify(ustr: str) -> bytes: """Use URL encoding to return an ASCII string corresponding to the given UTF8 string + ```python >>> hexify("http://example/a b") b'http://example/a%20b' + ``` """ # s1=ustr.encode('utf-8') s = "" @@ -1988,8 +2008,7 @@ def hexify(ustr: str) -> bytes: class TurtleParser(Parser): - """ - An RDFLib parser for Turtle + """An RDFLib parser for Turtle See http://www.w3.org/TR/turtle/ """ @@ -2001,7 +2020,7 @@ def parse( self, source: InputSource, graph: Graph, - encoding: Optional[str] = "utf-8", + encoding: str | None = "utf-8", turtle: bool = True, ) -> None: if encoding not in [None, "utf-8"]: @@ -2016,19 +2035,17 @@ def parse( # N3 parser prefers str stream stream = source.getCharacterStream() if not stream: - stream = source.getByteStream() - p.loadStream(stream) + stream = source.getByteStream() # type: ignore[assignment] + p.loadStream(stream) # type: ignore[arg-type] for prefix, namespace in p._bindings.items(): graph.bind(prefix, namespace) class N3Parser(TurtleParser): - """ - An RDFLib parser for Notation3 + """An RDFLib parser for Notation3 See http://www.w3.org/DesignIssues/Notation3.html - """ def __init__(self): @@ -2036,7 +2053,7 @@ def __init__(self): # type error: Signature of "parse" incompatible with supertype "TurtleParser" def parse( # type: ignore[override] - self, source: InputSource, graph: Graph, encoding: Optional[str] = "utf-8" + self, source: InputSource, graph: Graph, encoding: str | None = "utf-8" ) -> None: # we're currently being handed a Graph, not a ConjunctiveGraph # context-aware is this implied by formula_aware diff --git a/rdflib/plugins/parsers/nquads.py b/rdflib/plugins/parsers/nquads.py index 60b793b65..275810d22 100644 --- a/rdflib/plugins/parsers/nquads.py +++ b/rdflib/plugins/parsers/nquads.py @@ -3,6 +3,7 @@ graphs that can be used and queried. The store that backs the graph *must* be able to handle contexts. +```python >>> from rdflib import ConjunctiveGraph, URIRef, Namespace >>> g = ConjunctiveGraph() >>> data = open("test/data/nquads.rdflib/example.nquads", "rb") @@ -21,12 +22,15 @@ >>> s = URIRef("http://bibliographica.org/entity/E10009") >>> FOAF = Namespace("http://xmlns.com/foaf/0.1/") >>> assert(g.value(s, FOAF.name).eq("Arco Publications")) + +``` """ from __future__ import annotations from codecs import getreader -from typing import Any, MutableMapping, Optional +from collections.abc import MutableMapping +from typing import Any from rdflib.exceptions import ParserError as ParseError from rdflib.graph import ConjunctiveGraph, Dataset, Graph @@ -48,20 +52,26 @@ def parse( # type: ignore[override] self, inputsource: InputSource, sink: Graph, - bnode_context: Optional[_BNodeContextType] = None, + bnode_context: _BNodeContextType | None = None, skolemize: bool = False, **kwargs: Any, ): - """ - Parse inputsource as an N-Quads file. - - :type inputsource: `rdflib.parser.InputSource` - :param inputsource: the source of N-Quads-formatted data - :type sink: `rdflib.graph.Graph` - :param sink: where to send parsed triples - :type bnode_context: `dict`, optional - :param bnode_context: a dict mapping blank node identifiers to `~rdflib.term.BNode` instances. - See `.W3CNTriplesParser.parse` + """Parse inputsource as an N-Quads file. + + Args: + inputsource: The source of N-Quads-formatted data. + sink: The graph where parsed quads will be stored. + bnode_context: Optional dictionary mapping blank node identifiers to + [`BNode`][rdflib.term.BNode] instances. + See `.W3CNTriplesParser.parse` for more details. + skolemize: Whether to skolemize blank nodes. + + Returns: + The Dataset containing the parsed quads. + + Raises: + AssertionError: If the sink store is not context-aware. + ParseError: If the input is not a file-like object or contains invalid lines. """ assert ( sink.store.context_aware @@ -87,13 +97,13 @@ def parse( # type: ignore[override] source = inputsource.getCharacterStream() if not source: - source = inputsource.getByteStream() - source = getreader("utf-8")(source) + source = inputsource.getByteStream() # type: ignore[assignment] + source = getreader("utf-8")(source) # type: ignore[arg-type] if not hasattr(source, "read"): raise ParseError("Item to parse must be a file-like object.") - self.file = source + self.file = source # type: ignore[assignment] self.buffer = "" while True: self.line = __line = self.readline() @@ -106,7 +116,7 @@ def parse( # type: ignore[override] return self.sink - def parseline(self, bnode_context: Optional[_BNodeContextType] = None) -> None: + def parseline(self, bnode_context: _BNodeContextType | None = None) -> None: self.eat(r_wspace) if (not self.line) or self.line.startswith("#"): return # The line is empty or a comment diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py index 933e99f3f..1def54a83 100644 --- a/rdflib/plugins/parsers/ntriples.py +++ b/rdflib/plugins/parsers/ntriples.py @@ -1,4 +1,4 @@ -"""\ +""" N-Triples Parser License: GPL 2, W3C, BSD, or MIT Author: Sean B. Palmer, inamidst.com @@ -8,15 +8,13 @@ import codecs import re +from collections.abc import MutableMapping from io import BytesIO, StringIO, TextIOBase +from re import Match, Pattern from typing import ( IO, TYPE_CHECKING, Any, - Match, - MutableMapping, - Optional, - Pattern, TextIO, Union, ) @@ -126,14 +124,17 @@ def uriquote(uri: str) -> str: class W3CNTriplesParser: """An N-Triples Parser. + This is a legacy-style Triples parser for NTriples provided by W3C - Usage:: - p = W3CNTriplesParser(sink=MySink()) - sink = p.parse(f) # file; use parsestring for a string + Example: + ```python + p = W3CNTriplesParser(sink=MySink()) + sink = p.parse(f) # file; use parsestring for a string + ``` To define a context in which blank node identifiers refer to the same blank node - across instances of NTriplesParser, pass the same dict as ``bnode_context`` to each + across instances of NTriplesParser, pass the same dict as `bnode_context` to each instance. By default, a new blank node context is created for each instance of `W3CNTriplesParser`. """ @@ -142,8 +143,8 @@ class W3CNTriplesParser: def __init__( self, - sink: Optional[Union[DummySink, NTGraphSink]] = None, - bnode_context: Optional[_BNodeContextType] = None, + sink: DummySink | NTGraphSink | None = None, + bnode_context: _BNodeContextType | None = None, ): self.skolemize = False @@ -158,26 +159,28 @@ def __init__( else: self.sink = DummySink() - self.buffer: Optional[str] = None - self.file: Optional[Union[TextIO, codecs.StreamReader]] = None - self.line: Optional[str] = "" + self.buffer: str | None = None + self.file: TextIO | codecs.StreamReader | None = None + self.line: str | None = "" def parse( self, - f: Union[TextIO, IO[bytes], codecs.StreamReader], - bnode_context: Optional[_BNodeContextType] = None, + f: TextIO | IO[bytes] | codecs.StreamReader, + bnode_context: _BNodeContextType | None = None, skolemize: bool = False, - ) -> Union[DummySink, NTGraphSink]: - """ - Parse f as an N-Triples file. - - :type f: :term:`file object` - :param f: the N-Triples source - :type bnode_context: `dict`, optional - :param bnode_context: a dict mapping blank node identifiers (e.g., ``a`` in ``_:a``) - to `~rdflib.term.BNode` instances. An empty dict can be - passed in to define a distinct context for a given call to - `parse`. + ) -> DummySink | NTGraphSink: + """Parse f as an N-Triples file. + + Args: + f: The N-Triples source + bnode_context: A dict mapping blank node identifiers (e.g., `a` in `_:a`) + to [`BNode`][rdflib.term.BNode] instances. An empty dict can be + passed in to define a distinct context for a given call to + `parse`. + skolemize: Whether to skolemize blank nodes + + Returns: + The sink containing the parsed triples """ if not hasattr(f, "read"): @@ -211,7 +214,7 @@ def parsestring(self, s: Union[bytes, bytearray, str], **kwargs) -> None: f = StringIO(s) self.parse(f, **kwargs) - def readline(self) -> Optional[str]: + def readline(self) -> str | None: """Read an N-Triples line from buffered input.""" # N-Triples lines end in either CRLF, CR, or LF # Therefore, we can't just use f.readline() @@ -237,7 +240,7 @@ def readline(self) -> Optional[str]: return None self.buffer += buffer - def parseline(self, bnode_context: Optional[_BNodeContextType] = None) -> None: + def parseline(self, bnode_context: _BNodeContextType | None = None) -> None: self.eat(r_wspace) if (not self.line) or self.line.startswith("#"): return # The line is empty or a comment @@ -281,7 +284,7 @@ def predicate(self) -> Union[bNode, URIRef]: return pred def object( - self, bnode_context: Optional[_BNodeContextType] = None + self, bnode_context: _BNodeContextType | None = None ) -> Union[URI, bNode, Literal]: objt = self.uriref() or self.nodeid(bnode_context) or self.literal() if objt is False: @@ -297,7 +300,7 @@ def uriref(self) -> Union[te.Literal[False], URI]: return False def nodeid( - self, bnode_context: Optional[_BNodeContextType] = None + self, bnode_context: _BNodeContextType | None = None ) -> Union[te.Literal[False], bNode, URI]: if self.peek("_"): if self.skolemize: @@ -352,34 +355,33 @@ def triple(self, s: _SubjectType, p: _PredicateType, o: _ObjectType) -> None: class NTParser(Parser): - """parser for the ntriples format, often stored with the .nt extension + """Parser for the N-Triples format, often stored with the .nt extension. - See http://www.w3.org/TR/rdf-testcases/#ntriples""" + See http://www.w3.org/TR/rdf-testcases/#ntriples + """ __slots__ = () @classmethod def parse(cls, source: InputSource, sink: Graph, **kwargs: Any) -> None: - """ - Parse the NT format + """Parse the NT format. - :type source: `rdflib.parser.InputSource` - :param source: the source of NT-formatted data - :type sink: `rdflib.graph.Graph` - :param sink: where to send parsed triples - :param kwargs: Additional arguments to pass to `.W3CNTriplesParser.parse` + Args: + source: The source of NT-formatted data + sink: Where to send parsed triples + **kwargs: Additional arguments to pass to `W3CNTriplesParser.parse` """ f: Union[TextIO, IO[bytes], codecs.StreamReader] - f = source.getCharacterStream() + f = source.getCharacterStream() # type: ignore[assignment] if not f: b = source.getByteStream() # TextIOBase includes: StringIO and TextIOWrapper if isinstance(b, TextIOBase): # f is not really a ByteStream, but a CharacterStream - f = b # type: ignore[assignment] + f = b # type: ignore[unreachable] else: # since N-Triples 1.1 files can and should be utf-8 encoded - f = codecs.getreader("utf-8")(b) + f = codecs.getreader("utf-8")(b) # type: ignore[arg-type] parser = W3CNTriplesParser(NTGraphSink(sink)) parser.parse(f, **kwargs) f.close() diff --git a/rdflib/plugins/parsers/patch.py b/rdflib/plugins/parsers/patch.py index 5e8f12d1f..862d21874 100644 --- a/rdflib/plugins/parsers/patch.py +++ b/rdflib/plugins/parsers/patch.py @@ -1,8 +1,9 @@ from __future__ import annotations from codecs import getreader +from collections.abc import MutableMapping from enum import Enum -from typing import TYPE_CHECKING, Any, MutableMapping, Optional, Union +from typing import TYPE_CHECKING, Any, Union from rdflib.exceptions import ParserError as ParseError from rdflib.graph import Dataset @@ -22,8 +23,7 @@ class Operation(Enum): - """ - Enum of RDF Patch operations. + """Enum of RDF Patch operations. Operations: - `AddTripleOrQuad` (A): Adds a triple or quad. @@ -51,20 +51,17 @@ def parse( # type: ignore[override] self, inputsource: InputSource, sink: Dataset, - bnode_context: Optional[_BNodeContextType] = None, + bnode_context: _BNodeContextType | None = None, skolemize: bool = False, **kwargs: Any, ) -> Dataset: - """ - Parse inputsource as an RDF Patch file. - - :type inputsource: `rdflib.parser.InputSource` - :param inputsource: the source of RDF Patch formatted data - :type sink: `rdflib.graph.Dataset` - :param sink: where to send parsed data - :type bnode_context: `dict`, optional - :param bnode_context: a dict mapping blank node identifiers to `~rdflib.term.BNode` instances. - See `.W3CNTriplesParser.parse` + """Parse inputsource as an RDF Patch file. + + Args: + inputsource: the source of RDF Patch formatted data + sink: where to send parsed data + bnode_context: a dict mapping blank node identifiers to [`BNode`][rdflib.term.BNode] + instances. See `.W3CNTriplesParser.parse` """ assert sink.store.context_aware, ( "RDFPatchParser must be given" " a context aware store." @@ -75,13 +72,13 @@ def parse( # type: ignore[override] source = inputsource.getCharacterStream() if not source: - source = inputsource.getByteStream() - source = getreader("utf-8")(source) + source = inputsource.getByteStream() # type: ignore[assignment] + source = getreader("utf-8")(source) # type: ignore[arg-type] if not hasattr(source, "read"): raise ParseError("Item to parse must be a file-like object.") - self.file = source + self.file = source # type: ignore[assignment] self.buffer = "" while True: self.line = __line = self.readline() @@ -93,7 +90,7 @@ def parse( # type: ignore[override] raise ParseError("Invalid line (%s):\n%r" % (msg, __line)) return self.sink - def parsepatch(self, bnode_context: Optional[_BNodeContextType] = None) -> None: + def parsepatch(self, bnode_context: _BNodeContextType | None = None) -> None: self.eat(r_wspace) # From spec: "No comments should be included (comments start # and run to end # of line)." @@ -112,7 +109,7 @@ def parsepatch(self, bnode_context: Optional[_BNodeContextType] = None) -> None: self.delete_prefix() def add_or_remove_triple_or_quad( - self, operation, bnode_context: Optional[_BNodeContextType] = None + self, operation, bnode_context: _BNodeContextType | None = None ) -> None: self.eat(r_wspace) if (not self.line) or self.line.startswith("#"): @@ -169,7 +166,7 @@ def eat_op(self, op: str) -> None: self.line = self.line.lstrip(op) # type: ignore[union-attr] def nodeid( - self, bnode_context: Optional[_BNodeContextType] = None + self, bnode_context: _BNodeContextType | None = None ) -> Union[te.Literal[False], BNode, URIRef]: if self.peek("_"): return BNode(self.eat(r_nodeid).group(1)) diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py index 54fc69567..9ea1fceca 100644 --- a/rdflib/plugins/parsers/rdfxml.py +++ b/rdflib/plugins/parsers/rdfxml.py @@ -4,7 +4,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List, NoReturn, Optional, Tuple +from typing import TYPE_CHECKING, Any, NoReturn from urllib.parse import urldefrag, urljoin from xml.sax import handler, make_parser, xmlreader from xml.sax.handler import ErrorHandler @@ -15,7 +15,7 @@ from rdflib.namespace import RDF, is_ncname from rdflib.parser import InputSource, Parser from rdflib.plugins.parsers.RDFVOC import RDFVOC -from rdflib.term import BNode, Identifier, Literal, URIRef +from rdflib.term import BNode, IdentifiedNode, Literal, URIRef if TYPE_CHECKING: # from xml.sax.expatreader import ExpatLocator @@ -146,16 +146,16 @@ def reset(self) -> None: document_element = ElementHandler() document_element.start = self.document_element_start document_element.end = lambda name, qname: None - self.stack: List[Optional[ElementHandler]] = [ + self.stack: list[ElementHandler | None] = [ None, document_element, ] - self.ids: Dict[str, int] = {} # remember IDs we have already seen - self.bnode: Dict[str, Identifier] = {} - self._ns_contexts: List[Dict[str, Optional[str]]] = [ + self.ids: dict[str, int] = {} # remember IDs we have already seen + self.bnode: dict[str, IdentifiedNode] = {} + self._ns_contexts: list[dict[str, str | None]] = [ {} ] # contains uri -> prefix dicts - self._current_context: Dict[str, Optional[str]] = self._ns_contexts[-1] + self._current_context: dict[str, str | None] = self._ns_contexts[-1] # ContentHandler methods @@ -165,17 +165,17 @@ def setDocumentLocator(self, locator: Locator): def startDocument(self) -> None: pass - def startPrefixMapping(self, prefix: Optional[str], namespace: str) -> None: + def startPrefixMapping(self, prefix: str | None, namespace: str) -> None: self._ns_contexts.append(self._current_context.copy()) self._current_context[namespace] = prefix self.store.bind(prefix, namespace or "", override=False) - def endPrefixMapping(self, prefix: Optional[str]) -> None: + def endPrefixMapping(self, prefix: str | None) -> None: self._current_context = self._ns_contexts[-1] del self._ns_contexts[-1] def startElementNS( - self, name: Tuple[Optional[str], str], qname, attrs: AttributesImpl + self, name: tuple[str | None, str], qname, attrs: AttributesImpl # type: ignore[override] ) -> None: stack = self.stack stack.append(ElementHandler()) @@ -207,7 +207,7 @@ def startElementNS( current.language = language current.start(name, qname, attrs) - def endElementNS(self, name: Tuple[Optional[str], str], qname) -> None: + def endElementNS(self, name: tuple[str | None, str], qname) -> None: self.current.end(name, qname) self.stack.pop() @@ -222,7 +222,7 @@ def ignorableWhitespace(self, content) -> None: def processingInstruction(self, target, data) -> None: pass - def add_reified(self, sid: Identifier, spo: _TripleType): + def add_reified(self, sid: IdentifiedNode, spo: _TripleType): s, p, o = spo self.store.add((sid, RDF.type, RDF.Statement)) self.store.add((sid, RDF.subject, s)) @@ -238,21 +238,21 @@ def error(self, message: str) -> NoReturn: ) raise ParserError(info + message) - def get_current(self) -> Optional[ElementHandler]: + def get_current(self) -> ElementHandler | None: return self.stack[-2] # Create a read only property called current so that self.current # give the current element handler. current = property(get_current) - def get_next(self) -> Optional[ElementHandler]: + def get_next(self) -> ElementHandler | None: return self.stack[-1] # Create a read only property that gives the element handler to be # used for the next element. next = property(get_next) - def get_parent(self) -> Optional[ElementHandler]: + def get_parent(self) -> ElementHandler | None: return self.stack[-3] # Create a read only property that gives the current parent @@ -267,8 +267,8 @@ def absolutize(self, uri: str) -> URIRef: return URIRef(result) def convert( - self, name: Tuple[Optional[str], str], qname, attrs: AttributesImpl - ) -> Tuple[URIRef, Dict[URIRef, str]]: + self, name: tuple[str | None, str], qname, attrs: AttributesImpl + ) -> tuple[URIRef, dict[URIRef, str]]: if name[0] is None: # type error: Incompatible types in assignment (expression has type "URIRef", variable has type "Tuple[Optional[str], str]") name = URIRef(name[1]) # type: ignore[assignment] @@ -291,15 +291,14 @@ def convert( atts[RDFNS[att]] = v # type: ignore[misc, valid-type] else: atts[URIRef(att)] = v - # type error: Incompatible return value type (got "Tuple[Tuple[Optional[str], str], Dict[Any, Any]]", expected "Tuple[URIRef, Dict[URIRef, str]]") + # type error: Incompatible return value type (got "Tuple[Tuple[Optional[str], str], dict[Any, Any]]", expected "Tuple[URIRef, dict[URIRef, str]]") return name, atts # type: ignore[return-value] def document_element_start( - self, name: Tuple[str, str], qname, attrs: AttributesImpl + self, name: tuple[str, str], qname, attrs: AttributesImpl ) -> None: if name[0] and URIRef("".join(name)) == RDFVOC.RDF: - # Cheap hack so 2to3 doesn't turn it into __next__ - next = getattr(self, "next") + next = self.next next.start = self.node_element_start next.end = self.node_element_end else: @@ -309,15 +308,14 @@ def document_element_start( # another element will cause error def node_element_start( - self, name: Tuple[str, str], qname, attrs: AttributesImpl + self, name: tuple[str, str], qname, attrs: AttributesImpl ) -> None: # type error: Incompatible types in assignment (expression has type "URIRef", variable has type "Tuple[str, str]") name, atts = self.convert(name, qname, attrs) # type: ignore[assignment] current = self.current absolutize = self.absolutize - # Cheap hack so 2to3 doesn't turn it into __next__ - next = getattr(self, "next") + next = self.next next.start = self.property_element_start next.end = self.property_element_end @@ -391,7 +389,7 @@ def node_element_start( current.subject = subject - def node_element_end(self, name: Tuple[str, str], qname) -> None: + def node_element_end(self, name: tuple[str, str], qname) -> None: # repeat node-elements are only allowed # at at top-level @@ -403,16 +401,15 @@ def node_element_end(self, name: Tuple[str, str], qname) -> None: self.parent.object = self.current.subject def property_element_start( - self, name: Tuple[str, str], qname, attrs: AttributesImpl + self, name: tuple[str, str], qname, attrs: AttributesImpl ) -> None: # type error: Incompatible types in assignment (expression has type "URIRef", variable has type "Tuple[str, str]") name, atts = self.convert(name, qname, attrs) # type: ignore[assignment] current = self.current absolutize = self.absolutize - # Cheap hack so 2to3 doesn't turn it into __next__ - next = getattr(self, "next") - object: Optional[_ObjectType] = None + next = self.next + object: _ObjectType | None = None current.data = None current.list = None @@ -533,7 +530,7 @@ def property_element_char(self, data: str) -> None: if current.data is not None: current.data += data - def property_element_end(self, name: Tuple[str, str], qname) -> None: + def property_element_end(self, name: tuple[str, str], qname) -> None: current = self.current if current.data is not None and current.object is None: literalLang = current.language @@ -552,7 +549,7 @@ def property_element_end(self, name: Tuple[str, str], qname) -> None: ) current.subject = None - def list_node_element_end(self, name: Tuple[str, str], qname) -> None: + def list_node_element_end(self, name: tuple[str, str], qname) -> None: current = self.current if self.parent.list == RDF.nil: list = BNode() @@ -571,7 +568,7 @@ def list_node_element_end(self, name: Tuple[str, str], qname) -> None: self.parent.list = list def literal_element_start( - self, name: Tuple[str, str], qname, attrs: AttributesImpl + self, name: tuple[str, str], qname, attrs: AttributesImpl ) -> None: current = self.current self.next.start = self.literal_element_start @@ -607,7 +604,7 @@ def literal_element_start( def literal_element_char(self, data: str) -> None: self.current.object += escape(data) - def literal_element_end(self, name: Tuple[str, str], qname) -> None: + def literal_element_end(self, name: tuple[str, str], qname) -> None: if name[0]: prefix = self._current_context[name[0]] if prefix: @@ -632,12 +629,14 @@ def create_parser(target: InputSource, store: Graph) -> xmlreader.XMLReader: # type error: Argument 1 to "setDocumentLocator" of "RDFXMLHandler" has incompatible type "InputSource"; expected "Locator" rdfxml.setDocumentLocator(target) # type: ignore[arg-type] # rdfxml.setDocumentLocator(_Locator(self.url, self.parser)) - parser.setContentHandler(rdfxml) + parser.setContentHandler(rdfxml) # type: ignore[arg-type] parser.setErrorHandler(ErrorHandler()) return parser class RDFXMLParser(Parser): + """An RDF/XML parser.""" + def __init__(self): pass diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py index 9ed6e8bbc..fcad253dc 100644 --- a/rdflib/plugins/parsers/trig.py +++ b/rdflib/plugins/parsers/trig.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Any, MutableSequence +from collections.abc import MutableSequence +from typing import Any from rdflib.graph import ConjunctiveGraph, Graph from rdflib.parser import InputSource, Parser @@ -168,8 +169,8 @@ def parse(self, source: InputSource, graph: Graph, encoding: str = "utf-8") -> N stream = source.getCharacterStream() # try to get str stream first if not stream: # fallback to get the bytes stream - stream = source.getByteStream() - p.loadStream(stream) + stream = source.getByteStream() # type: ignore[assignment] + p.loadStream(stream) # type: ignore[arg-type] for prefix, namespace in p._bindings.items(): conj_graph.bind(prefix, namespace) diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py index 833e18568..d4a83bfb8 100644 --- a/rdflib/plugins/parsers/trix.py +++ b/rdflib/plugins/parsers/trix.py @@ -4,7 +4,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List, NoReturn, Optional, Tuple +from typing import TYPE_CHECKING, Any, NoReturn from xml.sax import handler, make_parser from xml.sax.handler import ErrorHandler @@ -29,8 +29,8 @@ class TriXHandler(handler.ContentHandler): """An Sax Handler for TriX. See http://sw.nokia.com/trix/""" - lang: Optional[str] - datatype: Optional[str] + lang: str | None + datatype: str | None def __init__(self, store: Store): self.store = store @@ -38,9 +38,9 @@ def __init__(self, store: Store): self.reset() def reset(self) -> None: - self.bnode: Dict[str, BNode] = {} - self.graph: Optional[Graph] = None - self.triple: Optional[List[Identifier]] = None + self.bnode: dict[str, BNode] = {} + self.graph: Graph | None = None + self.triple: list[Identifier] | None = None self.state = 0 self.lang = None self.datatype = None @@ -53,14 +53,14 @@ def setDocumentLocator(self, locator: Locator): def startDocument(self) -> None: pass - def startPrefixMapping(self, prefix: Optional[str], namespace: str) -> None: + def startPrefixMapping(self, prefix: str | None, namespace: str) -> None: pass - def endPrefixMapping(self, prefix: Optional[str]) -> None: + def endPrefixMapping(self, prefix: str | None) -> None: pass def startElementNS( - self, name: Tuple[Optional[str], str], qname, attrs: AttributesImpl + self, name: tuple[str | None, str], qname, attrs: AttributesImpl # type: ignore[override] ) -> None: if name[0] != str(TRIXNS): self.error( @@ -150,7 +150,7 @@ def startElementNS( self.chars = "" - def endElementNS(self, name: Tuple[Optional[str], str], qname) -> None: + def endElementNS(self, name: tuple[str | None, str], qname) -> None: if TYPE_CHECKING: assert self.triple is not None if name[0] != str(TRIXNS): @@ -268,7 +268,7 @@ def create_parser(store: Store) -> XMLReader: pass # Not present in Jython (at least) parser.setFeature(handler.feature_namespaces, 1) trix = TriXHandler(store) - parser.setContentHandler(trix) + parser.setContentHandler(trix) # type: ignore[arg-type] parser.setErrorHandler(ErrorHandler()) return parser diff --git a/rdflib/plugins/serializers/__init__.py b/rdflib/plugins/serializers/__init__.py index e69de29bb..036686ace 100644 --- a/rdflib/plugins/serializers/__init__.py +++ b/rdflib/plugins/serializers/__init__.py @@ -0,0 +1,2 @@ +"""Modules for serializing RDFLib graphs into XML, HexTuples, N3, Turtle etc. +""" diff --git a/rdflib/plugins/serializers/hext.py b/rdflib/plugins/serializers/hext.py index 9a8187c76..17e99ce4a 100644 --- a/rdflib/plugins/serializers/hext.py +++ b/rdflib/plugins/serializers/hext.py @@ -7,7 +7,8 @@ import json import warnings -from typing import IO, Any, Callable, List, Optional, Type, Union, cast +from collections.abc import Callable +from typing import IO, Any, Union, cast from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, ConjunctiveGraph, Dataset, Graph from rdflib.namespace import RDF, XSD @@ -30,18 +31,18 @@ class HextuplesSerializer(Serializer): Serializes RDF graphs to NTriples format. """ - contexts: List[Union[Graph, IdentifiedNode]] + contexts: list[Graph | IdentifiedNode] dumps: Callable - def __new__(cls, store: Union[Graph, Dataset, ConjunctiveGraph]): + def __new__(cls, store: Graph | Dataset | ConjunctiveGraph): if _HAS_ORJSON: - cls.str_local_id: Union[str, Any] = orjson.Fragment(b'"localId"') - cls.str_global_id: Union[str, Any] = orjson.Fragment(b'"globalId"') - cls.empty: Union[str, Any] = orjson.Fragment(b'""') - cls.lang_str: Union[str, Any] = orjson.Fragment( + cls.str_local_id: str | Any = orjson.Fragment(b'"localId"') + cls.str_global_id: str | Any = orjson.Fragment(b'"globalId"') + cls.empty: str | Any = orjson.Fragment(b'""') + cls.lang_str: str | Any = orjson.Fragment( b'"' + RDF.langString.encode("utf-8") + b'"' ) - cls.xsd_string: Union[str, Any] = orjson.Fragment( + cls.xsd_string: str | Any = orjson.Fragment( b'"' + XSD.string.encode("utf-8") + b'"' ) else: @@ -52,9 +53,9 @@ def __new__(cls, store: Union[Graph, Dataset, ConjunctiveGraph]): cls.xsd_string = f"{XSD.string}" return super(cls, cls).__new__(cls) - def __init__(self, store: Union[Graph, Dataset, ConjunctiveGraph]): - self.default_context: Optional[Union[Graph, IdentifiedNode]] - self.graph_type: Union[Type[Graph], Type[Dataset], Type[ConjunctiveGraph]] + def __init__(self, store: Graph | Dataset | ConjunctiveGraph): + self.default_context: Graph | IdentifiedNode | None + self.graph_type: type[Graph] | type[Dataset] | type[ConjunctiveGraph] if isinstance(store, (Dataset, ConjunctiveGraph)): self.graph_type = ( Dataset if isinstance(store, Dataset) else ConjunctiveGraph @@ -75,10 +76,10 @@ def __init__(self, store: Union[Graph, Dataset, ConjunctiveGraph]): def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = "utf-8", - **kwargs, - ): + base: str | None = None, + encoding: str | None = "utf-8", + **kwargs: Any, + ) -> None: if base is not None: warnings.warn( "base has no meaning for Hextuples serialization. " @@ -96,8 +97,8 @@ def serialize( raise Exception( "Hextuple serialization can't (yet) handle formula-aware stores" ) - context: Union[Graph, IdentifiedNode] - context_str: Union[bytes, str] + context: Graph | IdentifiedNode + context_str: bytes | str for context in self.contexts: for triple in context: # Generate context string just once, because it doesn't change @@ -118,7 +119,7 @@ def serialize( if hl is not None: stream.write(hl if _HAS_ORJSON else hl.encode()) - def _hex_line(self, triple, context_str: Union[bytes, str]): + def _hex_line(self, triple, context_str: bytes | str): if isinstance( triple[0], (URIRef, BNode) ): # exclude QuotedGraph and other objects @@ -163,7 +164,7 @@ def _hex_line(self, triple, context_str: Union[bytes, str]): language, context_str, ] - outline: Union[str, bytes] + outline: str | bytes if _HAS_ORJSON: outline = orjson.dumps(line_list, option=orjson.OPT_APPEND_NEWLINE) else: @@ -180,7 +181,7 @@ def _iri_or_bn(self, i_): else: return None - def _context_str(self, context: Union[Graph, IdentifiedNode]) -> str: + def _context_str(self, context: Graph | IdentifiedNode) -> str: context_identifier: IdentifiedNode = ( context.identifier if isinstance(context, Graph) else context ) diff --git a/rdflib/plugins/serializers/jsonld.py b/rdflib/plugins/serializers/jsonld.py index 15f307edf..68ee6b9b7 100644 --- a/rdflib/plugins/serializers/jsonld.py +++ b/rdflib/plugins/serializers/jsonld.py @@ -1,10 +1,8 @@ """ -This serialiser will output an RDF Graph as a JSON-LD formatted document. See: - - http://json-ld.org/ - -Example usage:: +This serialiser will output an RDF Graph as a JSON-LD formatted document. See http://json-ld.org/ +Example: + ```python >>> from rdflib import Graph >>> testrdf = ''' ... @prefix dc: . @@ -27,6 +25,7 @@ } ] + ``` """ # From: https://github.com/RDFLib/rdflib-jsonld/blob/feature/json-ld-1.1/rdflib_jsonld/serializer.py @@ -38,17 +37,23 @@ from __future__ import annotations import warnings -from typing import IO, Any, Dict, List, Optional +from typing import IO, TYPE_CHECKING, Any, Union, cast -from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Graph, _ObjectType +from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Graph from rdflib.namespace import RDF, XSD from rdflib.serializer import Serializer -from rdflib.term import BNode, IdentifiedNode, Identifier, Literal, URIRef +from rdflib.term import BNode, IdentifiedNode, Literal, URIRef from ..shared.jsonld.context import UNDEF, Context from ..shared.jsonld.keys import CONTEXT, GRAPH, ID, LANG, LIST, SET, VOCAB from ..shared.jsonld.util import _HAS_ORJSON, json, orjson +if TYPE_CHECKING: + from rdflib.graph import _ObjectType + + # In JSON-LD, a Literal cannot be Subject. So define a new type + from ..shared.jsonld.context import JSONLDSubjectType, Term + __all__ = ["JsonLDSerializer", "from_rdf"] @@ -56,16 +61,18 @@ class JsonLDSerializer(Serializer): + """JSON-LD RDF graph serializer.""" + def __init__(self, store: Graph): super(JsonLDSerializer, self).__init__(store) def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **kwargs, - ): + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, + ) -> None: # TODO: docstring w. args and return value encoding = encoding or "utf-8" if encoding not in ("utf-8", "utf-16"): @@ -188,7 +195,7 @@ def convert(self, graph: Graph): context = self.context - objs: List[Any] = [] + objs: list[Any] = [] for g in graphs: obj = {} graphname = None @@ -223,7 +230,7 @@ def convert(self, graph: Graph): return objs def from_graph(self, graph: Graph): - nodemap: Dict[Any, Any] = {} + nodemap: dict[Any, Any] = {} for s in set(graph.subjects()): ## only iri:s and unreferenced (rest will be promoted to top if needed) @@ -252,9 +259,11 @@ def process_subject(self, graph: Graph, s: IdentifiedNode, nodemap): nodemap[node_id] = node for p, o in graph.predicate_objects(s): - # type error: Argument 3 to "add_to_node" of "Converter" has incompatible type "Node"; expected "IdentifiedNode" - # type error: Argument 4 to "add_to_node" of "Converter" has incompatible type "Node"; expected "Identifier" - self.add_to_node(graph, s, p, o, node, nodemap) # type: ignore[arg-type] + # predicate_objects can return a lot of different types, + # but we only need to consider it to be a JSON-compatible type. + object_val = cast(Union[IdentifiedNode, Literal], o) + pred_val = cast(IdentifiedNode, p) + self.add_to_node(graph, s, pred_val, object_val, node, nodemap) return node @@ -263,49 +272,62 @@ def add_to_node( graph: Graph, s: IdentifiedNode, p: IdentifiedNode, - o: Identifier, - s_node: Dict[str, Any], + o: IdentifiedNode | Literal, + s_node: dict[str, Any], nodemap, ): context = self.context - + term: Term | None = None if isinstance(o, Literal): - datatype = str(o.datatype) if o.datatype else None + _datatype = str(o.datatype) if o.datatype else None language = o.language - term = context.find_term(str(p), datatype, language=language) + term = context.find_term(str(p), _datatype, language=language) else: containers = [LIST, None] if graph.value(o, RDF.first) else [None] for container in containers: for coercion in (ID, VOCAB, UNDEF): - # type error: Argument 2 to "find_term" of "Context" has incompatible type "object"; expected "Union[str, Defined, None]" - # type error: Argument 3 to "find_term" of "Context" has incompatible type "Optional[str]"; expected "Union[Defined, str]" - term = context.find_term(str(p), coercion, container) # type: ignore[arg-type] + term = context.find_term(str(p), coercion, container) if term: break if term: break + language = None if term is None else term.language - node = None + node: str | list[Any] | dict[str, Any] | None = None use_set = not context.active - if term: + if term is not None: p_key = term.name if term.type: node = self.type_coerce(o, term.type) - # type error: "Identifier" has no attribute "language" - elif term.language and o.language == term.language: # type: ignore[attr-defined] + elif ( + term.language and isinstance(o, Literal) and o.language == term.language + ): node = str(o) - # type error: Right operand of "and" is never evaluated - elif context.language and (term.language is None and o.language is None): # type: ignore[unreachable] - node = str(o) # type: ignore[unreachable] + elif context.language: + # TODO: MyPy thinks this is unreachable? + if isinstance(o, Literal) and ( # type: ignore[unreachable] + term.language is None and o.language is None + ): + node = str(o) if LIST in term.container: - node = [ - self.type_coerce(v, term.type) - or self.to_raw_value(graph, s, v, nodemap) - for v in self.to_collection(graph, o) - ] + assert isinstance( + o, IdentifiedNode + ), "Subject of a @list container must an a URI or BNode" + _col = self.to_collection(graph, o) + if _col is not None: + node = [] + for v in _col: + if isinstance(v, (IdentifiedNode, Literal)): + coerced = self.type_coerce(v, term.type) + else: + coerced = None + if coerced is not None: + node.append(coerced) + else: + node.append(self.to_raw_value(graph, s, v, nodemap)) elif LANG in term.container and language: value = s_node.setdefault(p_key, {}) values = value.get(language) @@ -323,7 +345,7 @@ def add_to_node( else: p_key = context.to_symbol(p) # TODO: for coercing curies - quite clumsy; unify to_symbol and find_term? - key_term = context.terms.get(p_key) + key_term = context.terms.get(p_key) # type: ignore[arg-type] if key_term and (key_term.type or key_term.container): p_key = p if not term and p == RDF.type and not self.use_rdf_type: @@ -345,7 +367,9 @@ def add_to_node( value = node s_node[p_key] = value - def type_coerce(self, o: Identifier, coerce_type: str): + def type_coerce( + self, o: IdentifiedNode | Literal, coerce_type: str + ) -> str | IdentifiedNode | Literal | None: if coerce_type == ID: if isinstance(o, URIRef): return self.context.shrink_iri(o) @@ -361,15 +385,19 @@ def type_coerce(self, o: Identifier, coerce_type: str): return None def to_raw_value( - self, graph: Graph, s: IdentifiedNode, o: Identifier, nodemap: Dict[str, Any] + self, + graph: Graph, + s: JSONLDSubjectType, + o: _ObjectType, + nodemap: dict[str, Any], ): context = self.context - coll = self.to_collection(graph, o) + if isinstance(o, (URIRef, BNode)): + coll: list[Any] | None = self.to_collection(graph, o) + else: + coll = None if coll is not None: - coll = [ - self.to_raw_value(graph, s, lo, nodemap) - for lo in self.to_collection(graph, o) - ] + coll = [self.to_raw_value(graph, s, lo, nodemap) for lo in coll] return {context.list_key: coll} elif isinstance(o, BNode): embed = ( @@ -407,27 +435,36 @@ def to_raw_value( else: return v - def to_collection(self, graph: Graph, l_: Identifier): + def to_collection( + self, graph: Graph, l_: JSONLDSubjectType + ) -> list[_ObjectType] | None: if l_ != RDF.nil and not graph.value(l_, RDF.first): return None - list_nodes: List[Optional[_ObjectType]] = [] - chain = set([l_]) - while l_: - if l_ == RDF.nil: + list_nodes: list[_ObjectType] = [] + chain: set[_ObjectType] = set([l_]) + list_head: _ObjectType | None = l_ + while list_head: + if list_head == RDF.nil: + # The only way to return a real result is to reach + # a rdf:nil node at the end of a rdf list. return list_nodes - if isinstance(l_, URIRef): + if isinstance(list_head, URIRef): return None first, rest = None, None - for p, o in graph.predicate_objects(l_): + for p, o in graph.predicate_objects(list_head): if not first and p == RDF.first: first = o elif not rest and p == RDF.rest: rest = o elif p != RDF.type or o != RDF.List: return None - list_nodes.append(first) - # type error: Incompatible types in assignment (expression has type "Optional[Node]", variable has type "Identifier") - l_ = rest # type: ignore[assignment] - if l_ in chain: + if first is not None: + list_nodes.append(first) + if rest is None: + # TODO: If no rdf:rest is found, should we return the current list_nodes? return None - chain.add(l_) + list_head = rest + if list_head in chain: + return None # TODO: Should this just return the current list_nodes? + chain.add(list_head) + return None diff --git a/rdflib/plugins/serializers/longturtle.py b/rdflib/plugins/serializers/longturtle.py index e886574f3..fdd7b77af 100644 --- a/rdflib/plugins/serializers/longturtle.py +++ b/rdflib/plugins/serializers/longturtle.py @@ -1,6 +1,6 @@ """ LongTurtle RDF graph serializer for RDFLib. -See for syntax specification. +See http://www.w3.org/TeamSubmission/turtle/ for syntax specification. This variant, longturtle as opposed to just turtle, makes some small format changes to turtle - the original turtle serializer. It: @@ -16,7 +16,13 @@ - Nicholas Car, 2023 """ +from __future__ import annotations + +from typing import IO, Any, Optional + +from rdflib.compare import to_canonical_graph from rdflib.exceptions import Error +from rdflib.graph import Graph from rdflib.namespace import RDF from rdflib.term import BNode, Literal, URIRef @@ -33,16 +39,29 @@ class LongTurtleSerializer(RecursiveSerializer): + """LongTurtle RDF graph serializer.""" + short_name = "longturtle" indentString = " " def __init__(self, store): self._ns_rewrite = {} - super(LongTurtleSerializer, self).__init__(store) + namespace_manager = store.namespace_manager + store = to_canonical_graph(store) + content = store.serialize(format="application/n-triples") + lines = content.split("\n") + lines.sort() + graph = Graph() + graph.parse( + data="\n".join(lines), format="application/n-triples", skolemize=True + ) + graph = graph.de_skolemize() + graph.namespace_manager = namespace_manager + super(LongTurtleSerializer, self).__init__(graph) self.keywords = {RDF.type: "a"} self.reset() self.stream = None - self._spacious = _SPACIOUS_OUTPUT + self._spacious: bool = _SPACIOUS_OUTPUT def addNamespace(self, prefix, namespace): # Turtle does not support prefixes that start with _ @@ -74,7 +93,14 @@ def reset(self): self._started = False self._ns_rewrite = {} - def serialize(self, stream, base=None, encoding=None, spacious=None, **args): + def serialize( + self, + stream: IO[bytes], + base: Optional[str] = None, + encoding: Optional[str] = None, + spacious: Optional[bool] = None, + **kwargs: Any, + ) -> None: self.reset() self.stream = stream # if base is given here, use, if not and a base is set for the graph use that @@ -175,7 +201,7 @@ def s_squared(self, subject): return False self.write("\n" + self.indent() + "[]") self.predicateList(subject, newline=False) - self.write(" ;\n.") + self.write("\n.") return True def path(self, node, position, newline=False): @@ -292,6 +318,8 @@ def objectList(self, objects): if count > 1: if not isinstance(objects[0], BNode): self.write("\n" + self.indent(1)) + else: + self.write(" ") first_nl = True self.path(objects[0], OBJECT, newline=first_nl) for obj in objects[1:]: diff --git a/rdflib/plugins/serializers/n3.py b/rdflib/plugins/serializers/n3.py index d8036bba0..0f224197c 100644 --- a/rdflib/plugins/serializers/n3.py +++ b/rdflib/plugins/serializers/n3.py @@ -2,7 +2,9 @@ Notation 3 (N3) RDF graph serializer for RDFLib. """ -from rdflib.graph import Graph +from typing import cast + +from rdflib.graph import Graph, QuotedGraph from rdflib.namespace import OWL, Namespace from rdflib.plugins.serializers.turtle import OBJECT, SUBJECT, TurtleSerializer @@ -12,6 +14,8 @@ class N3Serializer(TurtleSerializer): + """Notation 3 (N3) RDF graph serializer.""" + short_name = "n3" def __init__(self, store: Graph, parent=None): @@ -68,7 +72,7 @@ def s_clause(self, subject): if isinstance(subject, Graph): self.write("\n" + self.indent()) self.p_clause(subject, SUBJECT) - self.predicateList(subject) + self.predicateList(cast(QuotedGraph, subject)) self.write(" .") return True else: @@ -76,7 +80,7 @@ def s_clause(self, subject): def p_clause(self, node, position): if isinstance(node, Graph): - self.subjectDone(node) + self.subjectDone(cast(QuotedGraph, node)) if position is OBJECT: self.write(" ") self.write("{") diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py index 3c8d02ccc..1f2de0b6d 100644 --- a/rdflib/plugins/serializers/nquads.py +++ b/rdflib/plugins/serializers/nquads.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import IO, Optional +from typing import IO, Any from rdflib.graph import ConjunctiveGraph, Graph from rdflib.plugins.serializers.nt import _quoteLiteral @@ -12,6 +12,8 @@ class NQuadsSerializer(Serializer): + """NQuads RDF graph serializer.""" + def __init__(self, store: Graph): if not store.context_aware: raise Exception( @@ -24,10 +26,10 @@ def __init__(self, store: Graph): def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **args, - ): + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, + ) -> None: if base is not None: warnings.warn("NQuadsSerializer does not support base.") if encoding is not None and encoding.lower() != self.encoding.lower(): diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py index e87f949e3..7fb893f86 100644 --- a/rdflib/plugins/serializers/nt.py +++ b/rdflib/plugins/serializers/nt.py @@ -2,7 +2,7 @@ import codecs import warnings -from typing import IO, TYPE_CHECKING, Optional, Tuple, Union +from typing import IO, TYPE_CHECKING, Any from rdflib.graph import Graph from rdflib.serializer import Serializer @@ -21,9 +21,7 @@ class NTSerializer(Serializer): - """ - Serializes RDF graphs to NTriples format. - """ + """Serializes RDF graphs to NTriples format.""" def __init__(self, store: Graph): Serializer.__init__(self, store) @@ -31,9 +29,9 @@ def __init__(self, store: Graph): def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = "utf-8", - **args, + base: str | None = None, + encoding: str | None = "utf-8", + **kwargs: Any, ) -> None: if base is not None: warnings.warn("NTSerializer does not support base.") @@ -48,8 +46,7 @@ def serialize( class NT11Serializer(NTSerializer): - """ - Serializes RDF graphs to RDF 1.1 NTriples format. + """Serializes RDF graphs to RDF 1.1 NTriples format. Exactly like nt - only utf8 encoded. """ @@ -70,9 +67,7 @@ def _nt_row(triple: _TripleType) -> str: def _quoteLiteral(l_: Literal) -> str: # noqa: N802 - """ - a simpler version of term.Literal.n3() - """ + """A simpler version of term.Literal.n3()""" encoded = _quote_encode(l_) @@ -94,7 +89,7 @@ def _quote_encode(l_: str) -> str: def _nt_unicode_error_resolver( err: UnicodeError, -) -> Tuple[Union[str, bytes], int]: +) -> tuple[str | bytes, int]: """ Do unicode char replaces as defined in https://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#ntrip_strings """ diff --git a/rdflib/plugins/serializers/patch.py b/rdflib/plugins/serializers/patch.py index 3a5d37215..6eade915f 100644 --- a/rdflib/plugins/serializers/patch.py +++ b/rdflib/plugins/serializers/patch.py @@ -1,8 +1,7 @@ from __future__ import annotations import warnings -from typing import IO, Optional -from uuid import uuid4 +from typing import IO, Any from rdflib import Dataset from rdflib.plugins.serializers.nquads import _nq_row @@ -30,17 +29,20 @@ def __init__( def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **kwargs, - ): - """ - Serialize the store to the given stream. - :param stream: The stream to serialize to. - :param base: The base URI to use for the serialization. - :param encoding: The encoding to use for the serialization. - :param kwargs: Additional keyword arguments. + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Serialize the store to the given stream. + + Args: + stream: The stream to serialize to. + base: The base URI to use for the serialization. + encoding: The encoding to use for the serialization. + kwargs: Additional keyword arguments. + Supported keyword arguments: + - operation: The operation to perform. Either 'add' or 'remove'. - target: The target Dataset to compare against. NB: Only one of 'operation' or 'target' should be provided. @@ -51,8 +53,6 @@ def serialize( target = kwargs.get("target") header_id = kwargs.get("header_id") header_prev = kwargs.get("header_prev") - if not header_id: - header_id = f"uuid:{uuid4()}" encoding = self.encoding if base is not None: warnings.warn("PatchSerializer does not support base.") @@ -63,9 +63,10 @@ def serialize( ) def write_header(): - stream.write(f"H id <{header_id}> .\n".encode(encoding, "replace")) + if header_id: + stream.write(f"H id <{header_id}> .\n".encode(encoding, "replace")) if header_prev: - stream.write(f"H prev <{header_prev}>\n".encode(encoding, "replace")) + stream.write(f"H prev <{header_prev}> .\n".encode(encoding, "replace")) stream.write("TX .\n".encode(encoding, "replace")) def write_triples(contexts, op_code, use_passed_contexts=False): diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py index d6a2f6abb..c48ad6542 100644 --- a/rdflib/plugins/serializers/rdfxml.py +++ b/rdflib/plugins/serializers/rdfxml.py @@ -1,7 +1,8 @@ from __future__ import annotations import xml.dom.minidom -from typing import IO, Dict, Generator, Optional, Set, Tuple +from collections.abc import Generator +from typing import IO, TYPE_CHECKING, Any from xml.sax.saxutils import escape, quoteattr from rdflib.collection import Collection @@ -15,21 +16,25 @@ from .xmlwriter import ESCAPE_ENTITIES +if TYPE_CHECKING: + pass + __all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"] class XMLSerializer(Serializer): + """RDF/XML RDF graph serializer.""" + def __init__(self, store: Graph): super(XMLSerializer, self).__init__(store) - def __bindings(self) -> Generator[Tuple[str, URIRef], None, None]: + def __bindings(self) -> Generator[tuple[str, URIRef], None, None]: store = self.store nm = store.namespace_manager - bindings: Dict[str, URIRef] = {} + bindings: dict[str, URIRef] = {} for predicate in set(store.predicates()): - # type error: Argument 1 to "compute_qname_strict" of "NamespaceManager" has incompatible type "Node"; expected "str" - prefix, namespace, name = nm.compute_qname_strict(predicate) # type: ignore[arg-type] + prefix, namespace, name = nm.compute_qname_strict(predicate) bindings[prefix] = URIRef(namespace) RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#") # noqa: N806 @@ -45,9 +50,9 @@ def __bindings(self) -> Generator[Tuple[str, URIRef], None, None]: def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **args, + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, ) -> None: # if base is given here, use that, if not and a base is set for the graph use that if base is not None: @@ -55,7 +60,7 @@ def serialize( elif self.store.base is not None: self.base = self.store.base self.__stream = stream - self.__serialized: Dict[Identifier, int] = {} + self.__serialized: dict[Identifier, int] = {} encoding = self.encoding self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace")) @@ -66,8 +71,8 @@ def serialize( write("\n") @@ -114,9 +118,7 @@ def subject(self, subject: Identifier, depth: int = 1) -> None: write(">\n") for predicate, object in self.store.predicate_objects(subject): - # type error: Argument 1 to "predicate" of "XMLSerializer" has incompatible type "Node"; expected "Identifier" - # type error: Argument 2 to "predicate" of "XMLSerializer" has incompatible type "Node"; expected "Identifier" - self.predicate(predicate, object, depth + 1) # type: ignore[arg-type] + self.predicate(predicate, object, depth + 1) write("%s\n" % (indent, element_name)) else: @@ -167,32 +169,34 @@ def fix(val: str) -> str: class PrettyXMLSerializer(Serializer): + """Pretty RDF/XML RDF graph serializer.""" + def __init__(self, store: Graph, max_depth=3): super(PrettyXMLSerializer, self).__init__(store) - self.forceRDFAbout: Set[URIRef] = set() + self.forceRDFAbout: set[URIRef] = set() def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **args, + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, ) -> None: - self.__serialized: Dict[Identifier, int] = {} + self.__serialized: dict[IdentifiedNode | Literal, int] = {} store = self.store # if base is given here, use that, if not and a base is set for the graph use that if base is not None: self.base = base elif store.base is not None: self.base = store.base - self.max_depth = args.get("max_depth", 3) + self.max_depth = kwargs.get("max_depth", 3) assert self.max_depth > 0, "max_depth must be greater than 0" self.nm = nm = store.namespace_manager self.writer = writer = XMLWriter(stream, nm, encoding) namespaces = {} - possible: Set[Node] = set(store.predicates()).union( + possible: set[Node] = set(store.predicates()).union( store.objects(None, RDF.type) ) @@ -205,14 +209,14 @@ def serialize( writer.push(RDFVOC.RDF) - if "xml_base" in args: - writer.attribute(XMLBASE, args["xml_base"]) + if "xml_base" in kwargs: + writer.attribute(XMLBASE, kwargs["xml_base"]) elif self.base: writer.attribute(XMLBASE, self.base) writer.namespaces(namespaces.items()) - subject: IdentifiedNode + subject: IdentifiedNode | Literal # Write out subjects that can not be inline # type error: Incompatible types in assignment (expression has type "Node", variable has type "IdentifiedNode") for subject in store.subjects(): # type: ignore[assignment] @@ -244,7 +248,7 @@ def serialize( # Set to None so that the memory can get garbage collected. self.__serialized = None # type: ignore[assignment] - def subject(self, subject: Identifier, depth: int = 1): + def subject(self, subject: IdentifiedNode | Literal, depth: int = 1): store = self.store writer = self.writer @@ -265,8 +269,7 @@ def subject(self, subject: Identifier, depth: int = 1): type = None element = type or RDFVOC.Description - # type error: Argument 1 to "push" of "XMLWriter" has incompatible type "Node"; expected "str" - writer.push(element) # type: ignore[arg-type] + writer.push(element) if isinstance(subject, BNode): @@ -284,14 +287,13 @@ def subj_as_obj_more_than(ceil): writer.attribute(RDFVOC.about, self.relativize(subject)) if (subject, None, None) in store: - for predicate, object in store.predicate_objects(subject): - if not (predicate == RDF.type and object == type): - # type error: Argument 1 to "predicate" of "PrettyXMLSerializer" has incompatible type "Node"; expected "Identifier" - # type error: Argument 2 to "predicate" of "PrettyXMLSerializer" has incompatible type "Node"; expected "Identifier" - self.predicate(predicate, object, depth + 1) # type: ignore[arg-type] + for _predicate, _object in store.predicate_objects(subject): + object_: IdentifiedNode | Literal = _object # type: ignore[assignment] + predicate: IdentifiedNode = _predicate # type: ignore[assignment] + if not (predicate == RDF.type and object_ == type): + self.predicate(predicate, object_, depth + 1) - # type error: Argument 1 to "pop" of "XMLWriter" has incompatible type "Node"; expected "Optional[str]" - writer.pop(element) # type: ignore[arg-type] + writer.pop(element) elif subject in self.forceRDFAbout: # TODO FIXME?: this looks like a duplicate of first condition @@ -301,7 +303,10 @@ def subj_as_obj_more_than(ceil): self.forceRDFAbout.remove(subject) # type: ignore[arg-type] def predicate( - self, predicate: Identifier, object: Identifier, depth: int = 1 + self, + predicate: IdentifiedNode, + object: IdentifiedNode | Literal, + depth: int = 1, ) -> None: writer = self.writer store = self.store @@ -364,7 +369,7 @@ def predicate( else: if first( store.triples_choices( - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Identifier, URIRef, List[URIRef]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" + # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "tuple[Identifier, URIRef, list[URIRef]]"; expected "Union[tuple[List[Node], Node, Node], tuple[Node, list[Node], Node], tuple[Node, Node, list[Node]]]" (object, RDF.type, [OWL_NS.Class, RDFS.Class]) # type: ignore[arg-type] ) ) and isinstance(object, URIRef): diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py index 984f80c5a..012c4c9c2 100644 --- a/rdflib/plugins/serializers/trig.py +++ b/rdflib/plugins/serializers/trig.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import IO, TYPE_CHECKING, Any, Union from rdflib.graph import ConjunctiveGraph, Graph from rdflib.plugins.serializers.turtle import TurtleSerializer @@ -18,11 +18,13 @@ class TrigSerializer(TurtleSerializer): + """TriG RDF graph serializer.""" + short_name = "trig" indentString = 4 * " " def __init__(self, store: Union[Graph, ConjunctiveGraph]): - self.default_context: Optional[Node] + self.default_context: Node | None if store.context_aware: if TYPE_CHECKING: assert isinstance(store, ConjunctiveGraph) @@ -56,18 +58,18 @@ def preprocess(self) -> None: def reset(self) -> None: super(TrigSerializer, self).reset() - self._contexts: Dict[ + self._contexts: dict[ _ContextType, - Tuple[List[_SubjectType], Dict[_SubjectType, bool]], + tuple[list[_SubjectType], dict[_SubjectType, bool]], ] = {} def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - spacious: Optional[bool] = None, - **args, + base: str | None = None, + encoding: str | None = None, + spacious: bool | None = None, + **kwargs: Any, ): self.reset() self.stream = stream @@ -96,7 +98,7 @@ def serialize( if self.default_context and store.identifier == self.default_context: self.write(self.indent() + "\n{") else: - iri: Optional[str] + iri: str | None if isinstance(store.identifier, BNode): iri = store.identifier.n3() else: diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py index 008360e6b..c0ddaad31 100644 --- a/rdflib/plugins/serializers/trix.py +++ b/rdflib/plugins/serializers/trix.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import IO, Optional +from typing import IO, Any from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import Namespace @@ -16,6 +16,8 @@ class TriXSerializer(Serializer): + """TriX RDF graph serializer.""" + def __init__(self, store: Graph): super(TriXSerializer, self).__init__(store) if not store.context_aware: @@ -26,9 +28,9 @@ def __init__(self, store: Graph): def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - **args, + base: str | None = None, + encoding: str | None = None, + **kwargs: Any, ): nm = self.store.namespace_manager diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py index a26df04a6..64f58d840 100644 --- a/rdflib/plugins/serializers/turtle.py +++ b/rdflib/plugins/serializers/turtle.py @@ -6,17 +6,11 @@ from __future__ import annotations from collections import defaultdict +from collections.abc import Mapping, Sequence from typing import ( IO, TYPE_CHECKING, Any, - DefaultDict, - Dict, - List, - Mapping, - Optional, - Sequence, - Tuple, ) from rdflib.exceptions import Error @@ -26,21 +20,23 @@ from rdflib.term import BNode, Literal, Node, URIRef if TYPE_CHECKING: - from rdflib.graph import _PredicateType, _SubjectType, _TripleType + from rdflib.graph import _ObjectType, _PredicateType, _SubjectType, _TripleType __all__ = ["RecursiveSerializer", "TurtleSerializer"] class RecursiveSerializer(Serializer): + """Base class for recursive serializers.""" + topClasses = [RDFS.Class] predicateOrder = [RDF.type, RDFS.label] maxDepth = 10 indentString = " " - roundtrip_prefixes: Tuple[Any, ...] = () + roundtrip_prefixes: tuple[Any, ...] = () def __init__(self, store: Graph): super(RecursiveSerializer, self).__init__(store) - self.stream: Optional[IO[bytes]] = None + self.stream: IO[bytes] | None = None self.reset() def addNamespace(self, prefix: str, uri: URIRef) -> None: @@ -66,13 +62,13 @@ def isDone(self, subject: _SubjectType) -> bool: """Return true if subject is serialized""" return subject in self._serialized - def orderSubjects(self) -> List[_SubjectType]: - seen: Dict[_SubjectType, bool] = {} - subjects: List[_SubjectType] = [] + def orderSubjects(self) -> list[_SubjectType]: + seen: dict[_SubjectType, bool] = {} + subjects: list[_SubjectType] = [] for classURI in self.topClasses: members = list(self.store.subjects(RDF.type, classURI)) - members.sort() + members = sorted(members) subjects.extend(members) for member in members: @@ -102,12 +98,12 @@ def preprocessTriple(self, spo: _TripleType) -> None: def reset(self) -> None: self.depth = 0 # Typed none because nothing is using it ... - self.lists: Dict[None, None] = {} - self.namespaces: Dict[str, URIRef] = {} - self._references: DefaultDict[Node, int] = defaultdict(int) - self._serialized: Dict[_SubjectType, bool] = {} - self._subjects: Dict[_SubjectType, bool] = {} - self._topLevels: Dict[_SubjectType, bool] = {} + self.lists: dict[None, None] = {} + self.namespaces: dict[str, URIRef] = {} + self._references: defaultdict[Node, int] = defaultdict(int) + self._serialized: dict[_SubjectType, bool] = {} + self._subjects: dict[_SubjectType, bool] = {} + self._topLevels: dict[_SubjectType, bool] = {} if self.roundtrip_prefixes: if hasattr(self.roundtrip_prefixes, "__iter__"): @@ -120,12 +116,12 @@ def reset(self) -> None: def buildPredicateHash( self, subject: _SubjectType - ) -> Mapping[_PredicateType, List[Node]]: + ) -> Mapping[_PredicateType, list[_ObjectType]]: """ Build a hash key by predicate to a list of objects for the given subject """ - properties: Dict[_PredicateType, List[Node]] = {} + properties: dict[_PredicateType, list[_ObjectType]] = {} for s, p, o in self.store.triples((subject, None, None)): oList = properties.get(p, []) oList.append(o) @@ -133,27 +129,27 @@ def buildPredicateHash( return properties def sortProperties( - self, properties: Mapping[_PredicateType, List[Node]] - ) -> List[_PredicateType]: + self, properties: Mapping[_PredicateType, list[_ObjectType]] + ) -> list[_PredicateType]: """Take a hash from predicate uris to lists of values. Sort the lists of values. Return a sorted list of properties.""" # Sort object lists - for prop, objects in properties.items(): - objects.sort() + property_keys = list(properties.keys()) + for prop in property_keys: + properties[prop].sort() # Make sorted list of properties - propList: List[_PredicateType] = [] - seen: Dict[_PredicateType, bool] = {} - for prop in self.predicateOrder: - if (prop in properties) and (prop not in seen): - propList.append(prop) - seen[prop] = True - props = list(properties.keys()) - props.sort() - for prop in props: - if prop not in seen: - propList.append(prop) - seen[prop] = True + propList: list[_PredicateType] = [] + seen: dict[_PredicateType, bool] = {} + for prop1 in self.predicateOrder: + if (prop1 in property_keys) and (prop1 not in seen): + propList.append(prop1) + seen[prop1] = True + props = sorted(property_keys) + for prop2 in props: + if prop2 not in seen: + propList.append(prop2) + seen[prop2] = True return propList def subjectDone(self, subject: _SubjectType) -> None: @@ -179,13 +175,15 @@ def write(self, text: str) -> None: class TurtleSerializer(RecursiveSerializer): + """Turtle RDF graph serializer.""" + short_name = "turtle" indentString = " " def __init__(self, store: Graph): - self._ns_rewrite: Dict[str, str] = {} + self._ns_rewrite: dict[str, str] = {} super(TurtleSerializer, self).__init__(store) - self.keywords: Dict[Node, str] = {RDF.type: "a"} + self.keywords: dict[Node, str] = {RDF.type: "a"} self.reset() self.stream = None self._spacious = _SPACIOUS_OUTPUT @@ -217,18 +215,18 @@ def addNamespace(self, prefix: str, namespace: URIRef) -> str: # type: ignore[o def reset(self) -> None: super(TurtleSerializer, self).reset() - # typing as Dict[None, None] because nothing seems to be using it - self._shortNames: Dict[None, None] = {} + # typing as dict[None, None] because nothing seems to be using it + self._shortNames: dict[None, None] = {} self._started = False self._ns_rewrite = {} def serialize( self, stream: IO[bytes], - base: Optional[str] = None, - encoding: Optional[str] = None, - spacious: Optional[bool] = None, - **args: Any, + base: str | None = None, + encoding: str | None = None, + spacious: bool | None = None, + **kwargs: Any, ) -> None: self.reset() self.stream = stream @@ -275,7 +273,7 @@ def preprocessTriple(self, triple: _TripleType) -> None: self._references[p] += 1 # TODO: Rename to get_pname - def getQName(self, uri: Node, gen_prefix: bool = True) -> Optional[str]: + def getQName(self, uri: Node, gen_prefix: bool = True) -> str | None: if not isinstance(uri, URIRef): return None @@ -400,7 +398,7 @@ def p_squared(self, node: Node, position: int, newline: bool = False) -> bool: return True - def isValidList(self, l_: Node) -> bool: + def isValidList(self, l_: _SubjectType) -> bool: """ Checks if l is a valid RDF list, i.e. no nodes have other properties. """ @@ -416,7 +414,7 @@ def isValidList(self, l_: Node) -> bool: l_ = self.store.value(l_, RDF.rest) # type: ignore[assignment] return True - def doList(self, l_: Node) -> None: + def doList(self, l_: _SubjectType) -> None: while l_: item = self.store.value(l_, RDF.first) if item is not None: @@ -425,7 +423,7 @@ def doList(self, l_: Node) -> None: # type error: Incompatible types in assignment (expression has type "Optional[Node]", variable has type "Node") l_ = self.store.value(l_, RDF.rest) # type: ignore[assignment] - def predicateList(self, subject: Node, newline: bool = False) -> None: + def predicateList(self, subject: _SubjectType, newline: bool = False) -> None: properties = self.buildPredicateHash(subject) propList = self.sortProperties(properties) if len(propList) == 0: diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py index 8c00521ad..aaee0f50f 100644 --- a/rdflib/plugins/serializers/xmlwriter.py +++ b/rdflib/plugins/serializers/xmlwriter.py @@ -1,7 +1,8 @@ from __future__ import annotations import codecs -from typing import IO, TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple +from collections.abc import Iterable +from typing import IO, TYPE_CHECKING from xml.sax.saxutils import escape, quoteattr from rdflib.term import URIRef @@ -16,13 +17,15 @@ class XMLWriter: + """A simple XML writer that writes to a stream.""" + def __init__( self, stream: IO[bytes], namespace_manager: NamespaceManager, - encoding: Optional[str] = None, + encoding: str | None = None, decl: int = 1, - extra_ns: Optional[Dict[str, Namespace]] = None, + extra_ns: dict[str, Namespace] | None = None, ): encoding = encoding or "utf-8" encoder, decoder, stream_reader, stream_writer = codecs.lookup(encoding) @@ -32,7 +35,7 @@ def __init__( if decl: # type error: No overload variant of "write" of "IO" matches argument type "str" stream.write('' % encoding) # type: ignore[call-overload] - self.element_stack: List[str] = [] + self.element_stack: list[str] = [] self.nm = namespace_manager self.extra_ns = extra_ns or {} self.closed = True @@ -57,7 +60,7 @@ def push(self, uri: str) -> None: self.closed = False self.parent = False - def pop(self, uri: Optional[str] = None) -> None: + def pop(self, uri: str | None = None) -> None: top = self.element_stack.pop() if uri: assert uri == top @@ -73,7 +76,7 @@ def pop(self, uri: Optional[str] = None) -> None: self.parent = True def element( - self, uri: str, content: str, attributes: Dict[URIRef, str] = {} + self, uri: str, content: str, attributes: dict[URIRef, str] = {} ) -> None: """Utility method for adding a complete simple element""" self.push(uri) @@ -82,7 +85,7 @@ def element( self.text(content) self.pop() - def namespaces(self, namespaces: Iterable[Tuple[str, str]] = None) -> None: + def namespaces(self, namespaces: Iterable[tuple[str, str]] = None) -> None: if not namespaces: namespaces = self.nm.namespaces() diff --git a/rdflib/plugins/shared/__init__.py b/rdflib/plugins/shared/__init__.py index e69de29bb..98734b6fb 100644 --- a/rdflib/plugins/shared/__init__.py +++ b/rdflib/plugins/shared/__init__.py @@ -0,0 +1 @@ +"""Modules shared by serializers & parsers. Currently only JSON-LD code.""" diff --git a/rdflib/plugins/shared/jsonld/context.py b/rdflib/plugins/shared/jsonld/context.py index e6b668878..b3493fabe 100644 --- a/rdflib/plugins/shared/jsonld/context.py +++ b/rdflib/plugins/shared/jsonld/context.py @@ -1,24 +1,16 @@ """ -Implementation of the JSON-LD Context structure. See: - - http://json-ld.org/ - +Implementation of the JSON-LD Context structure. See: http://json-ld.org/ """ # https://github.com/RDFLib/rdflib-jsonld/blob/feature/json-ld-1.1/rdflib_jsonld/context.py from __future__ import annotations from collections import namedtuple +from collections.abc import Collection, Generator from typing import ( TYPE_CHECKING, Any, - Collection, - Dict, - Generator, - List, Optional, - Set, - Tuple, Union, ) from urllib.parse import urljoin, urlsplit @@ -56,6 +48,13 @@ ) from .util import norm_url, source_to_json, split_iri +if TYPE_CHECKING: + from typing_extensions import TypeAlias + + from rdflib.term import IdentifiedNode + + JSONLDSubjectType: TypeAlias = IdentifiedNode + NODE_KEYS = {GRAPH, ID, INCLUDED, JSON, LIST, NEST, NONE, REV, SET, TYPE, VALUE, LANG} @@ -69,7 +68,7 @@ class Defined(int): URI_GEN_DELIMS = (":", "/", "?", "#", "[", "]", "@") _ContextSourceType = Union[ - List[Union[Dict[str, Any], str, None]], Dict[str, Any], str, None + list[Union[dict[str, Any], str, None]], dict[str, Any], str, None ] @@ -77,33 +76,33 @@ class Context: def __init__( self, source: _ContextSourceType = None, - base: Optional[str] = None, - version: Optional[float] = 1.1, + base: str | None = None, + version: float | None = 1.1, ): self.version: float = version or 1.1 self.language = None - self.vocab: Optional[str] = None - self._base: Optional[str] + self.vocab: str | None = None + self._base: str | None self.base = base self.doc_base = base - self.terms: Dict[str, Any] = {} + self.terms: dict[str, Any] = {} # _alias maps NODE_KEY to list of aliases - self._alias: Dict[str, List[str]] = {} - self._lookup: Dict[Tuple[str, Any, Union[Defined, str], bool], Term] = {} - self._prefixes: Dict[str, Any] = {} + self._alias: dict[str, list[str]] = {} + self._lookup: dict[tuple[str, Any, Defined | str, bool], Term] = {} + self._prefixes: dict[str, Any] = {} self.active = False - self.parent: Optional[Context] = None + self.parent: Context | None = None self.propagate = True - self._context_cache: Dict[str, Any] = {} + self._context_cache: dict[str, Any] = {} if source: self.load(source) @property - def base(self) -> Optional[str]: + def base(self) -> str | None: return self._base @base.setter - def base(self, base: Optional[str]): + def base(self, base: str | None): if base: hash_index = base.find("#") if hash_index > -1: @@ -147,12 +146,12 @@ def _clear(self) -> None: self.active = False self.propagate = True - def get_context_for_term(self, term: Optional[Term]) -> Context: + def get_context_for_term(self, term: Term | None) -> Context: if term and term.context is not UNDEF: return self._subcontext(term.context, propagate=True) return self - def get_context_for_type(self, node: Any) -> Optional[Context]: + def get_context_for_type(self, node: Any) -> Context | None: if self.version >= 1.1: rtype = self.get_type(node) if isinstance(node, dict) else None if not isinstance(rtype, list): @@ -175,31 +174,31 @@ def get_context_for_type(self, node: Any) -> Optional[Context]: return self.parent if self.propagate is False else self - def get_id(self, obj: Dict[str, Any]) -> Any: + def get_id(self, obj: dict[str, Any]) -> Any: return self._get(obj, ID) - def get_type(self, obj: Dict[str, Any]) -> Any: + def get_type(self, obj: dict[str, Any]) -> Any: return self._get(obj, TYPE) - def get_language(self, obj: Dict[str, Any]) -> Any: + def get_language(self, obj: dict[str, Any]) -> Any: return self._get(obj, LANG) - def get_value(self, obj: Dict[str, Any]) -> Any: + def get_value(self, obj: dict[str, Any]) -> Any: return self._get(obj, VALUE) - def get_graph(self, obj: Dict[str, Any]) -> Any: + def get_graph(self, obj: dict[str, Any]) -> Any: return self._get(obj, GRAPH) - def get_list(self, obj: Dict[str, Any]) -> Any: + def get_list(self, obj: dict[str, Any]) -> Any: return self._get(obj, LIST) - def get_set(self, obj: Dict[str, Any]) -> Any: + def get_set(self, obj: dict[str, Any]) -> Any: return self._get(obj, SET) - def get_rev(self, obj: Dict[str, Any]) -> Any: + def get_rev(self, obj: dict[str, Any]) -> Any: return self._get(obj, REV) - def _get(self, obj: Dict[str, Any], key: str) -> Any: + def _get(self, obj: dict[str, Any], key: str) -> Any: for alias in self._alias.get(key, []): if alias in obj: return obj.get(alias) @@ -228,13 +227,13 @@ def add_term( self, name: str, idref: str, - coercion: Union[Defined, str] = UNDEF, - container: Union[Collection[Any], str, Defined] = UNDEF, - index: Optional[Union[str, Defined]] = None, - language: Optional[Union[str, Defined]] = UNDEF, + coercion: Defined | str = UNDEF, + container: Collection[Any] | str | Defined = UNDEF, + index: str | Defined | None = None, + language: str | Defined | None = UNDEF, reverse: bool = False, context: Any = UNDEF, - prefix: Optional[bool] = None, + prefix: bool | None = None, protected: bool = False, ): if self.version < 1.1 or prefix is None: @@ -270,7 +269,7 @@ def add_term( self.terms[name] = term - container_key: Union[Defined, str] + container_key: Defined | str for container_key in (LIST, LANG, SET): # , INDEX, ID, GRAPH): if container_key in container: break @@ -285,11 +284,11 @@ def add_term( def find_term( self, idref: str, - coercion: Optional[Union[str, Defined]] = None, - container: Union[Defined, str] = UNDEF, - language: Optional[str] = None, + coercion: str | Defined | None = None, + container: Defined | str | None = UNDEF, + language: str | None = None, reverse: bool = False, - ): + ) -> Term | None: lu = self._lookup if coercion is None: @@ -375,16 +374,16 @@ def shrink_iri(self, iri: str) -> str: elif self._base: if str(iri) == self._base: return "" - # type error: Argument 1 to "startswith" of "str" has incompatible type "Optional[str]"; expected "Union[str, Tuple[str, ...]]" + # type error: Argument 1 to "startswith" of "str" has incompatible type "Optional[str]"; expected "Union[str, tuple[str, ...]]" elif iri.startswith(self._basedomain): # type: ignore[arg-type] # type error: Argument 1 to "len" has incompatible type "Optional[str]"; expected "Sized" return iri[len(self._basedomain) :] # type: ignore[arg-type] return iri - def to_symbol(self, iri: str) -> Optional[str]: + def to_symbol(self, iri: str) -> str | None: iri = str(iri) - term = self.find_term(iri) - if term: + term: Term | None = self.find_term(iri) + if term is not None: return term.name ns, name = split_iri(iri) if ns == self.vocab: @@ -398,13 +397,13 @@ def to_symbol(self, iri: str) -> Optional[str]: def load( self, source: _ContextSourceType, - base: Optional[str] = None, - referenced_contexts: Set[Any] = None, + base: str | None = None, + referenced_contexts: set[Any] = None, ): self.active = True - sources: List[Tuple[Optional[str], Union[Dict[str, Any], str, None]]] = [] - # "Union[List[Union[Dict[str, Any], str]], List[Dict[str, Any]], List[str]]" : expression - # "Union[List[Dict[str, Any]], Dict[str, Any], List[str], str]" : variable + sources: list[tuple[str | None, dict[str, Any] | str | None]] = [] + # "Union[List[Union[Dict[str, Any], str]], list[Dict[str, Any]], list[str]]" : expression + # "Union[List[Dict[str, Any]], dict[str, Any], list[str], str]" : variable source = source if isinstance(source, list) else [source] referenced_contexts = referenced_contexts or set() self._prep_sources(base, source, sources, referenced_contexts) @@ -425,11 +424,11 @@ def _accept_term(self, key: str) -> bool: def _prep_sources( self, - base: Optional[str], - inputs: Union[List[Union[Dict[str, Any], str, None]], List[str]], - sources: List[Tuple[Optional[str], Union[Dict[str, Any], str, None]]], - referenced_contexts: Set[str], - in_source_url: Optional[str] = None, + base: str | None, + inputs: list[dict[str, Any] | str | None] | list[str], + sources: list[tuple[str | None, dict[str, Any] | str | None]], + referenced_contexts: set[str], + in_source_url: str | None = None, ): for source in inputs: source_url = in_source_url @@ -459,14 +458,14 @@ def _prep_sources( if isinstance(source, list): # type error: Statement is unreachable - self._prep_sources( # type: ignore[unreachable] + self._prep_sources( new_base, source, sources, referenced_contexts, source_url ) else: sources.append((source_url, source)) def _fetch_context( - self, source: str, base: Optional[str], referenced_contexts: Set[str] + self, source: str, base: str | None, referenced_contexts: set[str] ): # type error: Value of type variable "AnyStr" of "urljoin" cannot be "Optional[str]" source_url = urljoin(base, source) # type: ignore[type-var] @@ -492,9 +491,9 @@ def _fetch_context( def _read_source( self, - source: Dict[str, Any], - source_url: Optional[str] = None, - referenced_contexts: Optional[Set[str]] = None, + source: dict[str, Any], + source_url: str | None = None, + referenced_contexts: set[str] | None = None, ): imports = source.get(IMPORT) if imports: @@ -530,9 +529,9 @@ def _read_source( def _read_term( self, - source: Dict[str, Any], + source: dict[str, Any], name: str, - dfn: Union[Dict[str, Any], str], + dfn: dict[str, Any] | str, protected: bool = False, ) -> None: idref = None @@ -560,7 +559,7 @@ def _read_term( self.add_term( name, - idref, + idref, # type: ignore[arg-type] coercion, dfn.get(CONTAINER, UNDEF), dfn.get(INDEX, UNDEF), @@ -587,12 +586,12 @@ def _read_term( v.remove(name) def _rec_expand( - self, source: Dict[str, Any], expr: Optional[str], prev: Optional[str] = None - ) -> Optional[str]: + self, source: dict[str, Any], expr: str | None, prev: str | None = None + ) -> str | None: if expr == prev or expr in NODE_KEYS: return expr - nxt: Optional[str] + nxt: str | None # type error: Argument 1 to "_prep_expand" of "Context" has incompatible type "Optional[str]"; expected "str" is_term, pfx, nxt = self._prep_expand(expr) # type: ignore[arg-type] if pfx: @@ -616,7 +615,7 @@ def _rec_expand( return self._rec_expand(source, nxt, expr) - def _prep_expand(self, expr: str) -> Tuple[bool, Optional[str], str]: + def _prep_expand(self, expr: str) -> tuple[bool, str | None, str]: if ":" not in expr: return True, None, expr pfx, local = expr.split(":", 1) @@ -625,7 +624,7 @@ def _prep_expand(self, expr: str) -> Tuple[bool, Optional[str], str]: else: return False, None, expr - def _get_source_id(self, source: Dict[str, Any], key: str) -> Optional[str]: + def _get_source_id(self, source: dict[str, Any], key: str) -> str | None: # .. from source dict or if already defined term = source.get(key) if term is None: @@ -636,8 +635,8 @@ def _get_source_id(self, source: Dict[str, Any], key: str) -> Optional[str]: term = term.get(ID) return term - def _term_dict(self, term: Term) -> Union[Dict[str, Any], str]: - tdict: Dict[str, Any] = {} + def _term_dict(self, term: Term) -> dict[str, Any] | str: + tdict: dict[str, Any] = {} if term.type != UNDEF: tdict[TYPE] = self.shrink_iri(term.type) if term.container: @@ -652,19 +651,20 @@ def _term_dict(self, term: Term) -> Union[Dict[str, Any], str]: return tdict[ID] return tdict - def to_dict(self) -> Dict[str, Any]: + def to_dict(self) -> dict[str, Any]: """ Returns a dictionary representation of the context that can be serialized to JSON. - :return: a dictionary representation of the context. + Returns: + a dictionary representation of the context. """ r = {v: k for (k, v) in self._prefixes.items()} r.update({term.name: self._term_dict(term) for term in self._lookup.values()}) if self.base: r[BASE] = self.base if self.language: - r[LANG] = self.language + r[LANG] = self.language # type: ignore[unreachable] return r diff --git a/rdflib/plugins/shared/jsonld/util.py b/rdflib/plugins/shared/jsonld/util.py index 097a90b70..370f02055 100644 --- a/rdflib/plugins/shared/jsonld/util.py +++ b/rdflib/plugins/shared/jsonld/util.py @@ -5,7 +5,7 @@ import pathlib from html.parser import HTMLParser from io import StringIO, TextIOBase, TextIOWrapper -from typing import IO, TYPE_CHECKING, Any, Dict, List, Optional, TextIO, Tuple, Union +from typing import IO, TYPE_CHECKING, Any, TextIO, Union if TYPE_CHECKING: import json @@ -41,24 +41,22 @@ def source_to_json( - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ], - fragment_id: Optional[str] = None, - extract_all_scripts: Optional[bool] = False, -) -> Tuple[Union[Dict, List[Dict]], Any]: + source: IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None, + fragment_id: str | None = None, + extract_all_scripts: bool | None = False, +) -> tuple[Union[dict, list[dict]], Any]: """Extract JSON from a source document. The source document can be JSON or HTML with embedded JSON script elements (type attribute = "application/ld+json"). - To process as HTML ``source.content_type`` must be set to "text/html" or "application/xhtml+xml". + To process as HTML `source.content_type` must be set to "text/html" or "application/xhtml+xml". - :param source: the input source document (JSON or HTML) + Args: + source: the input source document (JSON or HTML) + fragment_id: if source is an HTML document then extract only the script element with matching id attribute, defaults to None + extract_all_scripts: if source is an HTML document then extract all script elements (unless fragment_id is provided), defaults to False (extract only the first script element) - :param fragment_id: if source is an HTML document then extract only the script element with matching id attribute, defaults to None - - :param extract_all_scripts: if source is an HTML document then extract all script elements (unless fragment_id is provided), defaults to False (extract only the first script element) - - :return: Tuple with the extracted JSON document and value of the HTML base element + Returns: + Tuple with the extracted JSON document and value of the HTML base element """ if isinstance(source, PythonInputSource): @@ -70,8 +68,8 @@ def source_to_json( # We can get the original string from the StringInputSource # It's hidden in the BytesIOWrapper 'wrapped' attribute b_stream = source.getByteStream() - original_string: Optional[str] = None - json_dict: Union[Dict, List[Dict]] + original_string: str | None = None + json_dict: dict | list[dict] if isinstance(b_stream, BytesIOWrapper): wrapped_inner = cast(Union[str, StringIO, TextIOBase], b_stream.wrapped) if isinstance(wrapped_inner, str): @@ -84,16 +82,16 @@ def source_to_json( elif isinstance(b_stream, BytesIOWrapper): # use the CharacterStream instead c_stream = source.getCharacterStream() - json_dict = orjson.loads(c_stream.read()) + json_dict = orjson.loads(c_stream.read()) # type: ignore[union-attr] else: # orjson assumes its in utf-8 encoding so # don't bother to check the source.getEncoding() - json_dict = orjson.loads(b_stream.read()) + json_dict = orjson.loads(b_stream.read()) # type: ignore[union-attr] else: if original_string is not None: json_dict = json.loads(original_string) else: - json_dict = json.load(source.getCharacterStream()) + json_dict = json.load(source.getCharacterStream()) # type: ignore[arg-type] return json_dict, html_base # TODO: conneg for JSON (fix support in rdflib's URLInputSource!) @@ -108,7 +106,7 @@ def source_to_json( "application/xhtml+xml", ) if is_html: - html_docparser: Optional[HTMLJSONParser] = HTMLJSONParser( + html_docparser: HTMLJSONParser | None = HTMLJSONParser( fragment_id=fragment_id, extract_all_scripts=extract_all_scripts ) else: @@ -126,10 +124,10 @@ def source_to_json( f"Source does not have a character stream or a byte stream and cannot be used {type(source)}" ) try: - b_encoding: Optional[str] = None if b_stream is None else source.getEncoding() + b_encoding: str | None = None if b_stream is None else source.getEncoding() except (AttributeError, LookupError): b_encoding = None - underlying_string: Optional[str] = None + underlying_string: str | None = None if b_stream is not None and isinstance(b_stream, BytesIOWrapper): # Try to find an underlying wrapped Unicode string to use? wrapped_inner = b_stream.wrapped @@ -149,7 +147,7 @@ def source_to_json( assert b_stream is not None if b_encoding is None: b_encoding = "utf-8" - html_string = TextIOWrapper(b_stream, encoding=b_encoding).read() + html_string = TextIOWrapper(b_stream, encoding=b_encoding).read() # type: ignore[type-var] html_docparser.feed(html_string) json_dict, html_base = html_docparser.get_json(), html_docparser.get_base() elif _HAS_ORJSON: @@ -179,7 +177,7 @@ def source_to_json( # b_stream is not None if b_encoding is None: b_encoding = "utf-8" - use_stream = TextIOWrapper(b_stream, encoding=b_encoding) + use_stream = TextIOWrapper(b_stream, encoding=b_encoding) # type: ignore[type-var] json_dict = json.load(use_stream) return json_dict, html_base finally: @@ -198,7 +196,7 @@ def source_to_json( VOCAB_DELIMS = ("#", "/", ":") -def split_iri(iri: str) -> Tuple[str, Optional[str]]: +def split_iri(iri: str) -> tuple[str, str | None]: for delim in VOCAB_DELIMS: at = iri.rfind(delim) if at > -1: @@ -208,6 +206,7 @@ def split_iri(iri: str) -> Tuple[str, Optional[str]]: def norm_url(base: str, url: str) -> str: """ + ```python >>> norm_url('http://example.org/', '/one') 'http://example.org/one' >>> norm_url('http://example.org/', '/one#') @@ -220,6 +219,8 @@ def norm_url(base: str, url: str) -> str: 'http://example.net/one' >>> norm_url('http://example.org/', 'http://example.org//one') 'http://example.org//one' + + ``` """ if "://" in url: return url @@ -251,9 +252,9 @@ def norm_url(base: str, url: str) -> str: # type error: Missing return statement -def context_from_urlinputsource(source: URLInputSource) -> Optional[str]: # type: ignore[return] +def context_from_urlinputsource(source: URLInputSource) -> str | None: # type: ignore[return] """ - Please note that JSON-LD documents served with the application/ld+json media type + Please note that JSON-LD documents served with the `application/ld+json` media type MUST have all context information, including references to external contexts, within the body of the document. Contexts linked via a http://www.w3.org/ns/json-ld#context HTTP Link Header MUST be @@ -288,12 +289,12 @@ def context_from_urlinputsource(source: URLInputSource) -> Optional[str]: # typ class HTMLJSONParser(HTMLParser): def __init__( self, - fragment_id: Optional[str] = None, - extract_all_scripts: Optional[bool] = False, + fragment_id: str | None = None, + extract_all_scripts: bool | None = False, ): super().__init__() self.fragment_id = fragment_id - self.json: List[Dict] = [] + self.json: list[dict] = [] self.contains_json = False self.fragment_id_does_not_match = False self.base = None @@ -348,7 +349,7 @@ def handle_data(self, data): self.script_count += 1 - def get_json(self) -> List[Dict]: + def get_json(self) -> list[dict]: return self.json def get_base(self): diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py index 0ab7f80bf..8d29e8d07 100644 --- a/rdflib/plugins/sparql/__init__.py +++ b/rdflib/plugins/sparql/__init__.py @@ -1,7 +1,4 @@ -""" -SPARQL implementation for RDFLib - -.. versionadded:: 4.0 +"""SPARQL implementation for RDFLib """ from importlib.metadata import entry_points diff --git a/rdflib/plugins/sparql/aggregates.py b/rdflib/plugins/sparql/aggregates.py index 12972e795..cac24059f 100644 --- a/rdflib/plugins/sparql/aggregates.py +++ b/rdflib/plugins/sparql/aggregates.py @@ -4,20 +4,11 @@ from __future__ import annotations +from collections.abc import Callable, Iterable, Mapping, MutableMapping from decimal import Decimal from typing import ( Any, - Callable, - Dict, - Iterable, - List, - Mapping, - MutableMapping, - Optional, - Set, - Tuple, TypeVar, - Union, overload, ) @@ -34,7 +25,7 @@ class Accumulator: """abstract base class for different aggregation functions""" def __init__(self, aggregation: CompValue): - self.get_value: Callable[[], Optional[Literal]] + self.get_value: Callable[[], Literal | None] self.update: Callable[[FrozenBindings, Aggregator], None] self.var = aggregation.res self.expr = aggregation.vars @@ -44,7 +35,7 @@ def __init__(self, aggregation: CompValue): self.distinct = False else: self.distinct = aggregation.distinct - self.seen: Set[Any] = set() + self.seen: set[Any] = set() def dont_care(self, row: FrozenBindings) -> bool: """skips distinct test""" @@ -97,21 +88,19 @@ def use_row(self, row: FrozenBindings) -> bool: @overload -def type_safe_numbers(*args: int) -> Tuple[int]: ... +def type_safe_numbers(*args: int) -> tuple[int]: ... @overload -def type_safe_numbers( - *args: Union[Decimal, float, int] -) -> Tuple[Union[float, int]]: ... +def type_safe_numbers(*args: Decimal | float | int) -> tuple[float | int, ...]: ... -def type_safe_numbers(*args: Union[Decimal, float, int]) -> Iterable[Union[float, int]]: +def type_safe_numbers(*args: Decimal | float | int) -> Iterable[float | int]: if any(isinstance(arg, float) for arg in args) and any( isinstance(arg, Decimal) for arg in args ): return map(float, args) - # type error: Incompatible return value type (got "Tuple[Union[Decimal, float, int], ...]", expected "Iterable[Union[float, int]]") + # type error: Incompatible return value type (got "Tuple[Decimal|float|int, ...]", expected "Iterable[float, int]") # NOTE on type error: if args contains a Decimal it will nopt get here. return args # type: ignore[return-value] @@ -120,7 +109,7 @@ class Sum(Accumulator): def __init__(self, aggregation: CompValue): super(Sum, self).__init__(aggregation) self.value = 0 - self.datatype: Optional[str] = None + self.datatype: str | None = None def update(self, row: FrozenBindings, aggregator: Aggregator) -> None: try: @@ -148,7 +137,7 @@ def __init__(self, aggregation: CompValue): super(Average, self).__init__(aggregation) self.counter = 0 self.sum = 0 - self.datatype: Optional[str] = None + self.datatype: str | None = None def update(self, row: FrozenBindings, aggregator: Aggregator) -> None: try: @@ -246,7 +235,7 @@ def get_value(self) -> None: class GroupConcat(Accumulator): - value: List[Literal] + value: list[Literal] def __init__(self, aggregation: CompValue): super(GroupConcat, self).__init__(aggregation) @@ -291,9 +280,9 @@ class Aggregator: "Aggregate_GroupConcat": GroupConcat, } - def __init__(self, aggregations: List[CompValue]): - self.bindings: Dict[Variable, Identifier] = {} - self.accumulators: Dict[str, Accumulator] = {} + def __init__(self, aggregations: list[CompValue]): + self.bindings: dict[Variable, Identifier] = {} + self.accumulators: dict[str, Accumulator] = {} for a in aggregations: accumulator_class = self.accumulator_classes.get(a.name) if accumulator_class is None: diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py index 5cb22d265..41cd13ae7 100644 --- a/rdflib/plugins/sparql/algebra.py +++ b/rdflib/plugins/sparql/algebra.py @@ -2,27 +2,18 @@ Converting the 'parse-tree' output of pyparsing to a SPARQL Algebra expression http://www.w3.org/TR/sparql11-query/#sparqlQuery - """ from __future__ import annotations -import collections import functools import operator import typing +from collections import defaultdict +from collections.abc import Callable, Iterable, Mapping from functools import reduce from typing import ( Any, - Callable, - DefaultDict, - Dict, - Iterable, - List, - Mapping, - Optional, - Set, - Tuple, overload, ) @@ -46,11 +37,11 @@ from rdflib.term import BNode, Identifier, Literal, URIRef, Variable -def OrderBy(p: CompValue, expr: List[CompValue]) -> CompValue: +def OrderBy(p: CompValue, expr: list[CompValue]) -> CompValue: return CompValue("OrderBy", p=p, expr=expr) -def ToMultiSet(p: typing.Union[List[Dict[Variable, str]], CompValue]) -> CompValue: +def ToMultiSet(p: typing.Union[list[dict[Variable, str]], CompValue]) -> CompValue: return CompValue("ToMultiSet", p=p) @@ -58,7 +49,7 @@ def Union(p1: CompValue, p2: CompValue) -> CompValue: return CompValue("Union", p1=p1, p2=p2) -def Join(p1: CompValue, p2: Optional[CompValue]) -> CompValue: +def Join(p1: CompValue, p2: CompValue | None) -> CompValue: return CompValue("Join", p1=p1, p2=p2) @@ -71,7 +62,7 @@ def Graph(term: Identifier, graph: CompValue) -> CompValue: def BGP( - triples: Optional[List[Tuple[Identifier, Identifier, Identifier]]] = None + triples: list[tuple[Identifier, Identifier, Identifier]] | None = None ) -> CompValue: return CompValue("BGP", triples=triples or []) @@ -90,23 +81,23 @@ def Extend( return CompValue("Extend", p=p, expr=expr, var=var) -def Values(res: List[Dict[Variable, str]]) -> CompValue: +def Values(res: list[dict[Variable, str]]) -> CompValue: return CompValue("values", res=res) -def Project(p: CompValue, PV: List[Variable]) -> CompValue: +def Project(p: CompValue, PV: list[Variable]) -> CompValue: return CompValue("Project", p=p, PV=PV) -def Group(p: CompValue, expr: Optional[List[Variable]] = None) -> CompValue: +def Group(p: CompValue, expr: list[Variable] | None = None) -> CompValue: return CompValue("Group", p=p, expr=expr) def _knownTerms( - triple: Tuple[Identifier, Identifier, Identifier], - varsknown: Set[typing.Union[BNode, Variable]], - varscount: Dict[Identifier, int], -) -> Tuple[int, int, bool]: + triple: tuple[Identifier, Identifier, Identifier], + varsknown: set[typing.Union[BNode, Variable]], + varscount: dict[Identifier, int], +) -> tuple[int, int, bool]: return ( len( [ @@ -121,24 +112,24 @@ def _knownTerms( def reorderTriples( - l_: Iterable[Tuple[Identifier, Identifier, Identifier]] -) -> List[Tuple[Identifier, Identifier, Identifier]]: + l_: Iterable[tuple[Identifier, Identifier, Identifier]] +) -> list[tuple[Identifier, Identifier, Identifier]]: """ Reorder triple patterns so that we execute the ones with most bindings first """ - def _addvar(term: str, varsknown: Set[typing.Union[Variable, BNode]]): + def _addvar(term: str, varsknown: set[typing.Union[Variable, BNode]]): if isinstance(term, (Variable, BNode)): varsknown.add(term) # NOTE on type errors: most of these are because the same variable is used # for different types. - # type error: List comprehension has incompatible type List[Tuple[None, Tuple[Identifier, Identifier, Identifier]]]; expected List[Tuple[Identifier, Identifier, Identifier]] + # type error: List comprehension has incompatible type list[tuple[None, tuple[Identifier, Identifier, Identifier]]]; expected list[tuple[Identifier, Identifier, Identifier]] l_ = [(None, x) for x in l_] # type: ignore[misc] - varsknown: Set[typing.Union[BNode, Variable]] = set() - varscount: Dict[Identifier, int] = collections.defaultdict(int) + varsknown: set[typing.Union[BNode, Variable]] = set() + varscount: dict[Identifier, int] = defaultdict(int) for t in l_: for c in t[1]: if isinstance(c, (Variable, BNode)): @@ -153,10 +144,10 @@ def _addvar(term: str, varsknown: Set[typing.Union[Variable, BNode]]): # we sort by decorate/undecorate, since we need the value of the sort keys while i < len(l_): - # type error: Generator has incompatible item type "Tuple[Any, Identifier]"; expected "Tuple[Identifier, Identifier, Identifier]" - # type error: Argument 1 to "_knownTerms" has incompatible type "Identifier"; expected "Tuple[Identifier, Identifier, Identifier]" + # type error: Generator has incompatible item type "tuple[Any, Identifier]"; expected "tuple[Identifier, Identifier, Identifier]" + # type error: Argument 1 to "_knownTerms" has incompatible type "Identifier"; expected "tuple[Identifier, Identifier, Identifier]" l_[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l_[i:]) # type: ignore[misc,arg-type] - # type error: Incompatible types in assignment (expression has type "str", variable has type "Tuple[Identifier, Identifier, Identifier]") + # type error: Incompatible types in assignment (expression has type "str", variable has type "tuple[Identifier, Identifier, Identifier]") t = l_[i][0][0] # type: ignore[assignment] # top block has this many terms bound j = 0 while i + j < len(l_) and l_[i + j][0][0] == t: @@ -165,15 +156,15 @@ def _addvar(term: str, varsknown: Set[typing.Union[Variable, BNode]]): j += 1 i += 1 - # type error: List comprehension has incompatible type List[Identifier]; expected List[Tuple[Identifier, Identifier, Identifier]] + # type error: List comprehension has incompatible type list[Identifier]; expected list[tuple[Identifier, Identifier, Identifier]] return [x[1] for x in l_] # type: ignore[misc] def triples( l: typing.Union[ # noqa: E741 - List[List[Identifier]], List[Tuple[Identifier, Identifier, Identifier]] + list[list[Identifier]], list[tuple[Identifier, Identifier, Identifier]] ] -) -> List[Tuple[Identifier, Identifier, Identifier]]: +) -> list[tuple[Identifier, Identifier, Identifier]]: _l = reduce(lambda x, y: x + y, l) if (len(_l) % 3) != 0: raise Exception("these aint triples") @@ -183,7 +174,7 @@ def triples( # type error: Missing return statement def translatePName( # type: ignore[return] p: typing.Union[CompValue, str], prologue: Prologue -) -> Optional[Identifier]: +) -> Identifier | None: """ Expand prefixed/relative URIs """ @@ -210,7 +201,7 @@ def translatePath(p: CompValue) -> Path: ... # type error: Missing return statement -def translatePath(p: typing.Union[CompValue, URIRef]) -> Optional[Path]: # type: ignore[return] +def translatePath(p: CompValue | URIRef) -> Path | None: # type: ignore[return] """ Translate PropertyPath expressions """ @@ -276,10 +267,8 @@ def _c(n): return e -def collectAndRemoveFilters(parts: List[CompValue]) -> Optional[Expr]: - """ - - FILTER expressions apply to the whole group graph pattern in which +def collectAndRemoveFilters(parts: list[CompValue]) -> Expr | None: + """FILTER expressions apply to the whole group graph pattern in which they appear. http://www.w3.org/TR/sparql11-query/#sparqlCollectFilters @@ -297,14 +286,14 @@ def collectAndRemoveFilters(parts: List[CompValue]) -> Optional[Expr]: i += 1 if filters: - # type error: Argument 1 to "and_" has incompatible type "*List[Union[Expr, Literal, Variable]]"; expected "Expr" + # type error: Argument 1 to "and_" has incompatible type "*list[Union[Expr, Literal, Variable]]"; expected "Expr" return and_(*filters) # type: ignore[arg-type] return None -def translateGroupOrUnionGraphPattern(graphPattern: CompValue) -> Optional[CompValue]: - A: Optional[CompValue] = None +def translateGroupOrUnionGraphPattern(graphPattern: CompValue) -> CompValue | None: + A: CompValue | None = None for g in graphPattern.graph: g = translateGroupGraphPattern(g) @@ -337,7 +326,7 @@ def translateGroupGraphPattern(graphPattern: CompValue) -> CompValue: # The first output from translate cannot be None for a subselect query # as it can only be None for certain DESCRIBE queries. # type error: Argument 1 to "ToMultiSet" has incompatible type "Optional[CompValue]"; - # expected "Union[List[Dict[Variable, str]], CompValue]" + # expected "Union[list[dict[Variable, str]], CompValue]" return ToMultiSet(translate(graphPattern)[0]) # type: ignore[arg-type] if not graphPattern.part: @@ -345,7 +334,7 @@ def translateGroupGraphPattern(graphPattern: CompValue) -> CompValue: filters = collectAndRemoveFilters(graphPattern.part) - g: List[CompValue] = [] + g: list[CompValue] = [] for p in graphPattern.part: if p.name == "TriplesBlock": # merge adjacent TripleBlocks @@ -405,8 +394,7 @@ def _traverse( visitPre: Callable[[Any], Any] = lambda n: None, visitPost: Callable[[Any], Any] = lambda n: None, ): - """ - Traverse a parse-tree, visit each node + """Traverse a parse-tree, visit each node if visit functions return a value, replace current node """ @@ -419,21 +407,23 @@ def _traverse( if isinstance(e, (list, ParseResults)): return [_traverse(x, visitPre, visitPost) for x in e] - elif isinstance(e, tuple): + # MyPy on Python 3.9 thinks this part is unreachable, I don't know why. + elif isinstance(e, tuple): # type: ignore[unreachable, unused-ignore] return tuple([_traverse(x, visitPre, visitPost) for x in e]) elif isinstance(e, CompValue): for k, val in e.items(): e[k] = _traverse(val, visitPre, visitPost) - _e = visitPost(e) + # MyPy on Python 3.9 thinks this part is unreachable, I don't know why. + _e = visitPost(e) # type: ignore[unreachable, unused-ignore] if _e is not None: return _e return e -def _traverseAgg(e, visitor: Callable[[Any, Any], Any] = lambda n, v: None): +def _traverseAgg(e: Any, visitor: Callable[[Any, Any], Any] = lambda n, v: None): """ Traverse a parse-tree, visit each node @@ -444,7 +434,8 @@ def _traverseAgg(e, visitor: Callable[[Any, Any], Any] = lambda n, v: None): if isinstance(e, (list, ParseResults, tuple)): res = [_traverseAgg(x, visitor) for x in e] - elif isinstance(e, CompValue): + # MyPy on Python 3.9 thinks this part is unreachable, I don't know why. + elif isinstance(e, CompValue): # type: ignore[unreachable, unused-ignore] for k, val in e.items(): if val is not None: res.append(_traverseAgg(val, visitor)) @@ -456,7 +447,7 @@ def traverse( tree, visitPre: Callable[[Any], Any] = lambda n: None, visitPost: Callable[[Any], Any] = lambda n: None, - complete: Optional[bool] = None, + complete: bool | None = None, ) -> Any: """ Traverse tree, visit each node with visit function @@ -485,7 +476,7 @@ def _hasAggregate(x) -> None: # type error: Missing return statement -def _aggs(e, A) -> Optional[Variable]: # type: ignore[return] +def _aggs(e, A) -> Variable | None: # type: ignore[return] """ Collect Aggregates in A replaces aggregates with variable references @@ -501,7 +492,7 @@ def _aggs(e, A) -> Optional[Variable]: # type: ignore[return] # type error: Missing return statement -def _findVars(x, res: Set[Variable]) -> Optional[CompValue]: # type: ignore[return] +def _findVars(x, res: set[Variable]) -> CompValue | None: # type: ignore[return] """ Find all variables in a tree """ @@ -518,7 +509,7 @@ def _findVars(x, res: Set[Variable]) -> Optional[CompValue]: # type: ignore[ret return x -def _addVars(x, children: List[Set[Variable]]) -> Set[Variable]: +def _addVars(x, children: list[set[Variable]]) -> set[Variable]: """ find which variables may be bound by this part of the query """ @@ -552,7 +543,7 @@ def _addVars(x, children: List[Set[Variable]]) -> Set[Variable]: # type error: Missing return statement -def _sample(e: typing.Union[CompValue, List[Expr], Expr, List[str], Variable], v: Optional[Variable] = None) -> Optional[CompValue]: # type: ignore[return] +def _sample(e: typing.Union[CompValue, list[Expr], Expr, list[str], Variable], v: Variable | None = None) -> CompValue | None: # type: ignore[return] """ For each unaggregated variable V in expr Replace V with Sample(V) @@ -570,9 +561,9 @@ def _simplifyFilters(e: Any) -> Any: def translateAggregates( q: CompValue, M: CompValue -) -> Tuple[CompValue, List[Tuple[Variable, Variable]]]: - E: List[Tuple[Variable, Variable]] = [] - A: List[CompValue] = [] +) -> tuple[CompValue, list[tuple[Variable, Variable]]]: + E: list[tuple[Variable, Variable]] = [] + A: list[CompValue] = [] # collect/replace aggs in : # select expr as ?var @@ -606,11 +597,11 @@ def translateAggregates( def translateValues( v: CompValue, -) -> typing.Union[List[Dict[Variable, str]], CompValue]: +) -> typing.Union[list[dict[Variable, str]], CompValue]: # if len(v.var)!=len(v.value): # raise Exception("Unmatched vars and values in ValueClause: "+str(v)) - res: List[Dict[Variable, str]] = [] + res: list[dict[Variable, str]] = [] if not v.var: return res if not v.value: @@ -625,10 +616,9 @@ def translateValues( return Values(res) -def translate(q: CompValue) -> Tuple[Optional[CompValue], List[Variable]]: +def translate(q: CompValue) -> tuple[CompValue | None, list[Variable]]: """ http://www.w3.org/TR/sparql11-query/#convertSolMod - """ _traverse(q, _simplifyFilters) @@ -636,7 +626,7 @@ def translate(q: CompValue) -> Tuple[Optional[CompValue], List[Variable]]: q.where = traverse(q.where, visitPost=translatePath) # TODO: Var scope test - VS: Set[Variable] = set() + VS: set[Variable] = set() # All query types have a WHERE clause EXCEPT some DESCRIBE queries # where only explicit IRIs are provided. @@ -768,7 +758,7 @@ def translate(q: CompValue) -> Tuple[Optional[CompValue], List[Variable]]: def _find_first_child_projections(M: CompValue) -> Iterable[CompValue]: """ Recursively find the first child instance of a Projection operation in each of - the branches of the query execution plan/tree. + the branches of the query execution plan/tree. """ for child_op in M.values(): @@ -781,7 +771,7 @@ def _find_first_child_projections(M: CompValue) -> Iterable[CompValue]: # type error: Missing return statement -def simplify(n: Any) -> Optional[CompValue]: # type: ignore[return] +def simplify(n: Any) -> CompValue | None: # type: ignore[return] """Remove joins to empty BGPs""" if isinstance(n, CompValue): if n.name == "Join": @@ -815,9 +805,9 @@ def analyse(n: Any, children: Any) -> bool: def translatePrologue( p: ParseResults, - base: Optional[str], - initNs: Optional[Mapping[str, Any]] = None, - prologue: Optional[Prologue] = None, + base: str | None, + initNs: Mapping[str, Any] | None = None, + prologue: Prologue | None = None, ) -> Prologue: if prologue is None: prologue = Prologue() @@ -840,17 +830,17 @@ def translatePrologue( def translateQuads( quads: CompValue, -) -> Tuple[ - List[Tuple[Identifier, Identifier, Identifier]], - DefaultDict[str, List[Tuple[Identifier, Identifier, Identifier]]], +) -> tuple[ + list[tuple[Identifier, Identifier, Identifier]], + defaultdict[str, list[tuple[Identifier, Identifier, Identifier]]], ]: if quads.triples: alltriples = triples(quads.triples) else: alltriples = [] - allquads: DefaultDict[str, List[Tuple[Identifier, Identifier, Identifier]]] = ( - collections.defaultdict(list) + allquads: defaultdict[str, list[tuple[Identifier, Identifier, Identifier]]] = ( + defaultdict(list) ) if quads.quadsNotTriples: @@ -887,17 +877,17 @@ def translateUpdate1(u: CompValue, prologue: Prologue) -> CompValue: def translateUpdate( q: CompValue, - base: Optional[str] = None, - initNs: Optional[Mapping[str, Any]] = None, + base: str | None = None, + initNs: Mapping[str, Any] | None = None, ) -> Update: """ Returns a list of SPARQL Update Algebra expressions """ - res: List[CompValue] = [] + res: list[CompValue] = [] prologue = None if not q.request: - # type error: Incompatible return value type (got "List[CompValue]", expected "Update") + # type error: Incompatible return value type (got "list[CompValue]", expected "Update") return res # type: ignore[return-value] for p, u in zip(q.prologue, q.request): prologue = translatePrologue(p, base, initNs, prologue) @@ -916,8 +906,8 @@ def translateUpdate( def translateQuery( q: ParseResults, - base: Optional[str] = None, - initNs: Optional[Mapping[str, Any]] = None, + base: str | None = None, + initNs: Mapping[str, Any] | None = None, ) -> Query: """ Translate a query-parsetree to a SPARQL Algebra Expression @@ -955,17 +945,16 @@ class ExpressionNotCoveredException(Exception): # noqa: N818 class _AlgebraTranslator: - """ - Translator of a Query's algebra to its equivalent SPARQL (string). + """Translator of a Query's algebra to its equivalent SPARQL (string). Coded as a class to support storage of state during the translation process, without use of a file. Anticipated Usage: - .. code-block:: python - - translated_query = _AlgebraTranslator(query).translateAlgebra() + ```python + translated_query = _AlgebraTranslator(query).translateAlgebra() + ``` An external convenience function which wraps the above call, `translateAlgebra`, is supplied, so this class does not need to be @@ -974,9 +963,7 @@ class _AlgebraTranslator: def __init__(self, query_algebra: Query): self.query_algebra = query_algebra - self.aggr_vars: DefaultDict[Identifier, List[Identifier]] = ( - collections.defaultdict(list) - ) + self.aggr_vars: defaultdict[Identifier, list[Identifier]] = defaultdict(list) self._alg_translation: str = "" def _replace( @@ -1023,12 +1010,7 @@ def convert_node_arg( ) def sparql_query_text(self, node): - """ - https://www.w3.org/TR/sparql11-query/#sparqlSyntax - - :param node: - :return: - """ + """""" if isinstance(node, CompValue): # 18.2 Query Forms @@ -1655,9 +1637,12 @@ def translateAlgebra(query_algebra: Query) -> str: """ Translates a SPARQL 1.1 algebra tree into the corresponding query string. - :param query_algebra: An algebra returned by `translateQuery`. - :return: The query form generated from the SPARQL 1.1 algebra tree for - SELECT queries. + Args: + query_algebra: An algebra returned by `translateQuery`. + + Returns: + The query form generated from the SPARQL 1.1 algebra tree for + SELECT queries. """ query_from_algebra = _AlgebraTranslator( query_algebra=query_algebra diff --git a/rdflib/plugins/sparql/datatypes.py b/rdflib/plugins/sparql/datatypes.py index bc06525a0..5541cd720 100644 --- a/rdflib/plugins/sparql/datatypes.py +++ b/rdflib/plugins/sparql/datatypes.py @@ -4,7 +4,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, List, Optional, Set +from typing import TYPE_CHECKING from rdflib.namespace import XSD @@ -12,7 +12,7 @@ from rdflib.term import URIRef -XSD_DTs: Set[URIRef] = set( +XSD_DTs: set[URIRef] = set( ( XSD.integer, XSD.decimal, @@ -43,7 +43,7 @@ XSD_Duration_DTs = set((XSD.duration, XSD.dayTimeDuration, XSD.yearMonthDuration)) -_sub_types: Dict[URIRef, List[URIRef]] = { +_sub_types: dict[URIRef, list[URIRef]] = { XSD.integer: [ XSD.nonPositiveInteger, XSD.negativeInteger, @@ -60,13 +60,13 @@ ], } -_super_types: Dict[URIRef, URIRef] = {} +_super_types: dict[URIRef, URIRef] = {} for superdt in XSD_DTs: for subdt in _sub_types.get(superdt, []): _super_types[subdt] = superdt # we only care about float, double, integer, decimal -_typePromotionMap: Dict[URIRef, Dict[URIRef, URIRef]] = { +_typePromotionMap: dict[URIRef, dict[URIRef, URIRef]] = { XSD.float: {XSD.integer: XSD.float, XSD.decimal: XSD.float, XSD.double: XSD.double}, XSD.double: { XSD.integer: XSD.double, @@ -86,7 +86,7 @@ } -def type_promotion(t1: URIRef, t2: Optional[URIRef]) -> URIRef: +def type_promotion(t1: URIRef, t2: URIRef | None) -> URIRef: if t2 is None: return t1 t1 = _super_types.get(t1, t1) diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py index 82fe8034f..5a6b2cc59 100644 --- a/rdflib/plugins/sparql/evaluate.py +++ b/rdflib/plugins/sparql/evaluate.py @@ -6,30 +6,22 @@ evalPart is called on each level and will delegate to the right method -A rdflib.plugins.sparql.sparql.QueryContext is passed along, keeping +A `rdflib.plugins.sparql.sparql.QueryContext` is passed along, keeping information needed for evaluation A list of dicts (solution mappings) is returned, apart from GroupBy which may also return a dict of list of dicts - """ from __future__ import annotations -import collections import itertools import re +from collections import defaultdict, deque +from collections.abc import Generator, Iterable, Mapping from typing import ( TYPE_CHECKING, Any, - Deque, - Dict, - Generator, - Iterable, - List, - Mapping, - Optional, - Tuple, Union, ) from urllib.parse import urlencode @@ -72,11 +64,11 @@ orjson = None # type: ignore[assignment, unused-ignore] _HAS_ORJSON = False -_Triple = Tuple[Identifier, Identifier, Identifier] +_Triple = tuple[Identifier, Identifier, Identifier] def evalBGP( - ctx: QueryContext, bgp: List[_Triple] + ctx: QueryContext, bgp: list[_Triple] ) -> Generator[FrozenBindings, None, None]: """ A basic graph pattern @@ -93,7 +85,7 @@ def evalBGP( _o = ctx[o] # type error: Item "None" of "Optional[Graph]" has no attribute "triples" - # type Argument 1 to "triples" of "Graph" has incompatible type "Tuple[Union[str, Path, None], Union[str, Path, None], Union[str, Path, None]]"; expected "Tuple[Optional[Node], Optional[Node], Optional[Node]]" + # Argument 1 to "triples" of "Graph" has incompatible type "tuple[Union[str, Path, None], Union[str, Path, None], Union[str, Path, None]]"; expected "tuple[Optional[Union[IdentifiedNode, Literal, QuotedGraph, Variable]], Optional[IdentifiedNode], Optional[Union[IdentifiedNode, Literal, QuotedGraph, Variable]]]" [arg-type] for ss, sp, so in ctx.graph.triples((_s, _p, _o)): # type: ignore[union-attr, arg-type] if None in (_s, _p, _o): c = ctx.push() @@ -101,20 +93,17 @@ def evalBGP( c = ctx if _s is None: - # type error: Incompatible types in assignment (expression has type "Union[Node, Any]", target has type "Identifier") - c[s] = ss # type: ignore[assignment] + c[s] = ss try: if _p is None: - # type error: Incompatible types in assignment (expression has type "Union[Node, Any]", target has type "Identifier") - c[p] = sp # type: ignore[assignment] + c[p] = sp except AlreadyBound: continue try: if _o is None: - # type error: Incompatible types in assignment (expression has type "Union[Node, Any]", target has type "Identifier") - c[o] = so # type: ignore[assignment] + c[o] = so except AlreadyBound: continue @@ -166,7 +155,7 @@ def evalJoin(ctx: QueryContext, join: CompValue) -> Generator[FrozenDict, None, return _join(a, b) -def evalUnion(ctx: QueryContext, union: CompValue) -> List[Any]: +def evalUnion(ctx: QueryContext, union: CompValue) -> list[Any]: branch1_branch2 = [] for x in evalPart(ctx, union.p1): branch1_branch2.append(x) @@ -382,8 +371,8 @@ def evalServiceQuery(ctx: QueryContext, part: CompValue): res = json_dict["results"]["bindings"] if len(res) > 0: for r in res: - # type error: Argument 2 to "_yieldBindingsFromServiceCallResult" has incompatible type "str"; expected "Dict[str, Dict[str, str]]" - for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables): # type: ignore[arg-type] + # type error: Argument 2 to "_yieldBindingsFromServiceCallResult" has incompatible type "str"; expected "Dict[str, dict[str, str]]" + for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables): yield bound else: raise Exception( @@ -424,9 +413,9 @@ def _buildQueryStringForServiceCall(ctx: QueryContext, service_query: str) -> st def _yieldBindingsFromServiceCallResult( - ctx: QueryContext, r: Dict[str, Dict[str, str]], variables: List[str] + ctx: QueryContext, r: dict[str, dict[str, str]], variables: list[str] ) -> Generator[FrozenBindings, None, None]: - res_dict: Dict[Variable, Identifier] = {} + res_dict: dict[Variable, Identifier] = {} for var in variables: if var in r and r[var]: var_binding = r[var] @@ -468,9 +457,7 @@ def evalAggregateJoin( # p is always a Group, we always get a dict back group_expr = agg.p.expr - res: Dict[Any, Any] = collections.defaultdict( - lambda: Aggregator(aggregations=agg.A) - ) + res: dict[Any, Any] = defaultdict(lambda: Aggregator(aggregations=agg.A)) if group_expr is None: # no grouping, just COUNT in SELECT clause @@ -540,7 +527,7 @@ def evalReduced( # mixed data structure: set for lookup, deque for append/pop/remove mru_set = set() - mru_queue: Deque[Any] = collections.deque() + mru_queue: deque[Any] = deque() for row in evalPart(ctx, part.p): if row in mru_set: @@ -576,8 +563,8 @@ def evalProject(ctx: QueryContext, project: CompValue): def evalSelectQuery( ctx: QueryContext, query: CompValue -) -> Mapping[str, Union[str, List[Variable], Iterable[FrozenDict]]]: - res: Dict[str, Union[str, List[Variable], Iterable[FrozenDict]]] = {} +) -> Mapping[str, Union[str, list[Variable], Iterable[FrozenDict]]]: + res: dict[str, Union[str, list[Variable], Iterable[FrozenDict]]] = {} res["type_"] = "SELECT" res["bindings"] = evalPart(ctx, query.p) res["vars_"] = query.PV @@ -585,7 +572,7 @@ def evalSelectQuery( def evalAskQuery(ctx: QueryContext, query: CompValue) -> Mapping[str, Union[str, bool]]: - res: Dict[str, Union[bool, str]] = {} + res: dict[str, Union[bool, str]] = {} res["type_"] = "ASK" res["askAnswer"] = False for x in evalPart(ctx, query.p): @@ -609,14 +596,14 @@ def evalConstructQuery( for c in evalPart(ctx, query.p): graph += _fillTemplate(template, c) - res: Dict[str, Union[str, Graph]] = {} + res: dict[str, Union[str, Graph]] = {} res["type_"] = "CONSTRUCT" res["graph"] = graph return res -def evalDescribeQuery(ctx: QueryContext, query) -> Dict[str, Union[str, Graph]]: +def evalDescribeQuery(ctx: QueryContext, query) -> dict[str, Union[str, Graph]]: # Create a result graph and bind namespaces from the graph being queried graph = Graph() # type error: Item "None" of "Optional[Graph]" has no attribute "namespaces" @@ -644,7 +631,7 @@ def evalDescribeQuery(ctx: QueryContext, query) -> Dict[str, Union[str, Graph]]: # type error: Item "None" of "Optional[Graph]" has no attribute "cbd" ctx.graph.cbd(resource, target_graph=graph) # type: ignore[union-attr] - res: Dict[str, Union[str, Graph]] = {} + res: dict[str, Union[str, Graph]] = {} res["type_"] = "DESCRIBE" res["graph"] = graph @@ -654,22 +641,22 @@ def evalDescribeQuery(ctx: QueryContext, query) -> Dict[str, Union[str, Graph]]: def evalQuery( graph: Graph, query: Query, - initBindings: Optional[Mapping[str, Identifier]] = None, - base: Optional[str] = None, + initBindings: Mapping[str, Identifier] | None = None, + base: str | None = None, ) -> Mapping[Any, Any]: - """ + """Evaluate a SPARQL query against a graph. - .. caution:: + !!! warning "Caution" This method can access indirectly requested network endpoints, for example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. + specified in `SERVICE` directives. When processing untrusted or potentially malicious queries, measures should be taken to restrict network and file access. For information on available security measures, see the RDFLib - :doc:`Security Considerations ` + [Security Considerations](../security_considerations.md) documentation. """ main = query.algebra diff --git a/rdflib/plugins/sparql/evalutils.py b/rdflib/plugins/sparql/evalutils.py index 1f737e469..b042ab1d7 100644 --- a/rdflib/plugins/sparql/evalutils.py +++ b/rdflib/plugins/sparql/evalutils.py @@ -1,18 +1,7 @@ from __future__ import annotations -import collections -from typing import ( - Any, - DefaultDict, - Generator, - Iterable, - Mapping, - Set, - Tuple, - TypeVar, - Union, - overload, -) +from collections import defaultdict +from typing import TYPE_CHECKING, Any, TypeVar, Union, overload from rdflib.plugins.sparql.operators import EBV from rdflib.plugins.sparql.parserutils import CompValue, Expr @@ -25,13 +14,20 @@ ) from rdflib.term import BNode, Identifier, Literal, URIRef, Variable -_ContextType = Union[FrozenBindings, QueryContext] +if TYPE_CHECKING: + from collections.abc import Generator, Iterable, Mapping + + from typing_extensions import TypeAlias + + from rdflib.graph import _TripleType + +_ContextType: TypeAlias = Union[FrozenBindings, QueryContext] _FrozenDictT = TypeVar("_FrozenDictT", bound=FrozenDict) def _diff( a: Iterable[_FrozenDictT], b: Iterable[_FrozenDictT], expr -) -> Set[_FrozenDictT]: +) -> set[_FrozenDictT]: res = set() for x in a: @@ -70,7 +66,7 @@ def _join( yield x.merge(y) -def _ebv(expr: Union[Literal, Variable, Expr], ctx: FrozenDict) -> bool: +def _ebv(expr: Literal | Variable | Expr, ctx: FrozenDict) -> bool: """ Return true/false for the given expr Either the expr is itself true/false @@ -101,22 +97,22 @@ def _ebv(expr: Union[Literal, Variable, Expr], ctx: FrozenDict) -> bool: @overload def _eval( - expr: Union[Literal, URIRef], + expr: Literal | URIRef, ctx: FrozenBindings, raise_not_bound_error: bool = ..., -) -> Union[Literal, URIRef]: ... +) -> Literal | URIRef: ... @overload def _eval( - expr: Union[Variable, Expr], + expr: Variable | Expr, ctx: FrozenBindings, raise_not_bound_error: bool = ..., -) -> Union[Any, SPARQLError]: ... +) -> Any | SPARQLError: ... def _eval( - expr: Union[Literal, URIRef, Variable, Expr], + expr: Literal | URIRef | Variable | Expr, ctx: FrozenBindings, raise_not_bound_error: bool = True, ) -> Any: @@ -139,7 +135,7 @@ def _eval( def _filter( - a: Iterable[FrozenDict], expr: Union[Literal, Variable, Expr] + a: Iterable[FrozenDict], expr: Literal | Variable | Expr ) -> Generator[FrozenDict, None, None]: for c in a: if _ebv(expr, c): @@ -147,16 +143,16 @@ def _filter( def _fillTemplate( - template: Iterable[Tuple[Identifier, Identifier, Identifier]], + template: Iterable[tuple[Identifier, Identifier, Identifier]], solution: _ContextType, -) -> Generator[Tuple[Identifier, Identifier, Identifier], None, None]: +) -> Generator[_TripleType, None, None]: """ For construct/deleteWhere and friends Fill a triple template with instantiated variables """ - bnodeMap: DefaultDict[BNode, BNode] = collections.defaultdict(BNode) + bnodeMap: defaultdict[BNode, BNode] = defaultdict(BNode) for t in template: s, p, o = t @@ -176,7 +172,7 @@ def _fillTemplate( _ValueT = TypeVar("_ValueT", Variable, BNode, URIRef, Literal) -def _val(v: _ValueT) -> Tuple[int, _ValueT]: +def _val(v: _ValueT) -> tuple[int, _ValueT]: """utilitity for ordering things""" if isinstance(v, Variable): return (0, v) diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py index e4d19f664..ec819a585 100644 --- a/rdflib/plugins/sparql/operators.py +++ b/rdflib/plugins/sparql/operators.py @@ -3,7 +3,6 @@ They get bound as instances-methods to the CompValue objects from parserutils using setEvalFn - """ from __future__ import annotations @@ -18,7 +17,7 @@ import warnings from decimal import ROUND_HALF_DOWN, ROUND_HALF_UP, Decimal, InvalidOperation from functools import reduce -from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, Union, overload +from typing import Any, Callable, NoReturn, Union, overload from urllib.parse import quote from pyparsing import ParseResults @@ -481,8 +480,11 @@ def Builtin_TIMEZONE(e: Expr, ctx) -> Literal: """ http://www.w3.org/TR/sparql11-query/#func-timezone - :returns: the timezone part of arg as an xsd:dayTimeDuration. - :raises: an error if there is no timezone. + Returns: + The timezone part of arg as an xsd:dayTimeDuration. + + Raises: + An error if there is no timezone. """ dt = datetime(e.arg) if not dt.tzinfo: @@ -538,8 +540,7 @@ def Builtin_UCASE(e: Expr, ctx) -> Literal: def Builtin_LANG(e: Expr, ctx) -> Literal: - """ - http://www.w3.org/TR/sparql11-query/#func-lang + """http://www.w3.org/TR/sparql11-query/#func-lang Returns the language tag of ltrl, if it has one. It returns "" if ltrl has no language tag. Note that the RDF data model does not include literals @@ -550,7 +551,7 @@ def Builtin_LANG(e: Expr, ctx) -> Literal: return Literal(l_.language or "") -def Builtin_DATATYPE(e: Expr, ctx) -> Optional[str]: +def Builtin_DATATYPE(e: Expr, ctx) -> str | None: l_ = e.arg if not isinstance(l_, Literal): raise SPARQLError("Can only get datatype of literal: %r" % l_) @@ -592,14 +593,13 @@ def Builtin_EXISTS(e: Expr, ctx: FrozenBindings) -> Literal: _CustomFunction = Callable[[Expr, FrozenBindings], Node] -_CUSTOM_FUNCTIONS: Dict[URIRef, Tuple[_CustomFunction, bool]] = {} +_CUSTOM_FUNCTIONS: dict[URIRef, tuple[_CustomFunction, bool]] = {} def register_custom_function( uri: URIRef, func: _CustomFunction, override: bool = False, raw: bool = False ) -> None: - """ - Register a custom SPARQL function. + """Register a custom SPARQL function. By default, the function will be passed the RDF terms in the argument list. If raw is True, the function will be passed an Expression and a Context. @@ -626,7 +626,7 @@ def decorator(func: _CustomFunction) -> _CustomFunction: def unregister_custom_function( - uri: URIRef, func: Optional[Callable[..., Any]] = None + uri: URIRef, func: Callable[..., Any] | None = None ) -> None: """ The 'func' argument is included for compatibility with existing code. @@ -993,7 +993,9 @@ def simplify(expr: Any) -> Any: if isinstance(expr, (list, ParseResults)): return list(map(simplify, expr)) - if not isinstance(expr, CompValue): + # I don't know why MyPy thinks this is unreachable + # Something to do with the Any type and the isinstance calls above. + if not isinstance(expr, CompValue): # type: ignore[unreachable, unused-ignore] return expr if expr.name.endswith("Expression"): if expr.other is None: @@ -1081,7 +1083,6 @@ def numeric(expr: Literal) -> Any: def dateTimeObjects(expr: Literal) -> Any: """ return a dataTime/date/time/duration/dayTimeDuration/yearMonthDuration python objects from a literal - """ return expr.toPython() @@ -1096,7 +1097,6 @@ def isCompatibleDateTimeDatatype( # type: ignore[return] """ Returns a boolean indicating if first object is compatible with operation(+/-) over second object. - """ if dt1 == XSD.date: if dt2 == XSD.yearMonthDuration: @@ -1132,7 +1132,6 @@ def calculateDuration( ) -> Literal: """ returns the duration Literal between two datetime - """ date1 = obj1 date2 = obj2 @@ -1180,8 +1179,7 @@ def EBV(rt: Union[Identifier, SPARQLError, Expr]) -> Union[bool, NoReturn]: ... def EBV(rt: Union[Identifier, SPARQLError, Expr]) -> bool: - """ - Effective Boolean Value (EBV) + """Effective Boolean Value (EBV) * If the argument is a typed literal with a datatype of xsd:boolean, the EBV is the value of that argument. @@ -1192,7 +1190,6 @@ def EBV(rt: Union[Identifier, SPARQLError, Expr]) -> bool: derived from a numeric type, the EBV is false if the operand value is NaN or is numerically equal to zero; otherwise the EBV is true. * All other arguments, including unbound arguments, produce a type error. - """ if isinstance(rt, Literal): @@ -1226,28 +1223,27 @@ def EBV(rt: Union[Identifier, SPARQLError, Expr]) -> bool: def _lang_range_check(range: Literal, lang: Literal) -> bool: """ Implementation of the extended filtering algorithm, as defined in point - 3.3.2, of U{RFC 4647}, on + 3.3.2, of [RFC 4647](http://www.rfc-editor.org/rfc/rfc4647.txt), on matching language ranges and language tags. - Needed to handle the C{rdf:PlainLiteral} datatype. - @param range: language range - @param lang: language tag - @rtype: boolean + Needed to handle the `rdf:PlainLiteral` datatype. - @author: U{Ivan Herman} + Args: + range: language range + lang: language tag - Taken from `RDFClosure/RestrictedDatatype.py`__ - - .. __:http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py + Author: [Ivan Herman](http://www.w3.org/People/Ivan/) + Taken from [`RDFClosure/RestrictedDatatype.py`](http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py) """ def _match(r: str, l_: str) -> bool: """ Matching of a range and language item: either range is a wildcard or the two are equal - @param r: language range item - @param l_: language tag item - @rtype: boolean + + Args: + r: language range item + l_: language tag item """ return r == "*" or r == l_ diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py index 3ee230f53..0700ac2c0 100644 --- a/rdflib/plugins/sparql/parser.py +++ b/rdflib/plugins/sparql/parser.py @@ -8,9 +8,9 @@ import re import sys -from typing import Any, BinaryIO, List +from typing import Any, BinaryIO from typing import Optional as OptionalType -from typing import TextIO, Tuple, Union +from typing import TextIO, Union from pyparsing import CaselessKeyword as Keyword # watch out :) from pyparsing import ( @@ -46,22 +46,22 @@ def neg(literal: rdflib.Literal) -> rdflib.Literal: return rdflib.Literal(-literal, datatype=literal.datatype) -def setLanguage(terms: Tuple[Any, OptionalType[str]]) -> rdflib.Literal: +def setLanguage(terms: tuple[Any, OptionalType[str]]) -> rdflib.Literal: return rdflib.Literal(terms[0], lang=terms[1]) -def setDataType(terms: Tuple[Any, OptionalType[str]]) -> rdflib.Literal: +def setDataType(terms: tuple[Any, OptionalType[str]]) -> rdflib.Literal: return rdflib.Literal(terms[0], datatype=terms[1]) -def expandTriples(terms: ParseResults) -> List[Any]: +def expandTriples(terms: ParseResults) -> list[Any]: """ Expand ; and , syntax for repeat predicates, subjects """ # import pdb; pdb.set_trace() last_subject, last_predicate = None, None # Used for ; and , try: - res: List[Any] = [] + res: list[Any] = [] if DEBUG: print("Terms", terms) l_ = len(terms) @@ -117,7 +117,7 @@ def expandTriples(terms: ParseResults) -> List[Any]: raise -def expandBNodeTriples(terms: ParseResults) -> List[Any]: +def expandBNodeTriples(terms: ParseResults) -> list[Any]: """ expand [ ?p ?o ] syntax for implicit bnodes """ @@ -134,14 +134,14 @@ def expandBNodeTriples(terms: ParseResults) -> List[Any]: raise -def expandCollection(terms: ParseResults) -> List[List[Any]]: +def expandCollection(terms: ParseResults) -> list[list[Any]]: """ expand ( 1 2 3 ) notation for collections """ if DEBUG: print("Collection: ", terms) - res: List[Any] = [] + res: list[Any] = [] other = [] for x in terms: if isinstance(x, list): # is this a [ .. ] ? @@ -1483,7 +1483,7 @@ def expandCollection(terms: ParseResults) -> List[List[Any]]: AskQuery = Comp( "AskQuery", Keyword("ASK") - + Param("datasetClause", ZeroOrMore(DatasetClause)) + + ZeroOrMore(ParamList("datasetClause", DatasetClause)) + WhereClause + SolutionModifier + ValuesClause, diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py index 7b85eb659..61398dbe1 100644 --- a/rdflib/plugins/sparql/parserutils.py +++ b/rdflib/plugins/sparql/parserutils.py @@ -21,22 +21,16 @@ Comp lets you set an evalFn that is bound to the eval method of the resulting CompValue - - """ from __future__ import annotations from collections import OrderedDict +from collections.abc import Callable, Mapping from types import MethodType from typing import ( TYPE_CHECKING, Any, - Callable, - List, - Mapping, - Optional, - Tuple, TypeVar, Union, ) @@ -60,8 +54,7 @@ def value( variables: bool = False, errors: bool = False, ) -> Any: - """ - utility function for evaluating something... + """Utility function for evaluating something... Variables will be looked up in the context Normally, non-bound vars is an error, @@ -69,7 +62,6 @@ def value( Normally, an error raises the error, set errors=True to return error - """ if isinstance(val, Expr): @@ -107,7 +99,7 @@ class ParamValue: """ def __init__( - self, name: str, tokenList: Union[List[Any], ParseResults], isList: bool + self, name: str, tokenList: Union[list[Any], ParseResults], isList: bool ): self.isList = isList self.name = name @@ -133,7 +125,7 @@ def __init__(self, name: str, expr, isList: bool = False): self.setName(name) self.addParseAction(self.postParse2) - def postParse2(self, tokenList: Union[List[Any], ParseResults]) -> ParamValue: + def postParse2(self, tokenList: Union[list[Any], ParseResults]) -> ParamValue: return ParamValue(self.name, tokenList, self.isList) @@ -154,7 +146,6 @@ class CompValue(OrderedDict): The result of parsing a Comp Any included Params are available as Dict keys or as attributes - """ def __init__(self, name: str, **values): @@ -210,7 +201,7 @@ class Expr(CompValue): def __init__( self, name: str, - evalfn: Optional[Callable[[Any, Any], Any]] = None, + evalfn: Callable[[Any, Any], Any] | None = None, **values, ): super(Expr, self).__init__(name, **values) @@ -221,7 +212,7 @@ def __init__( def eval(self, ctx: Any = {}) -> Union[SPARQLError, Any]: try: - self.ctx: Optional[Union[Mapping, FrozenBindings]] = ctx + self.ctx: Union[Mapping, FrozenBindings] | None = ctx # type error: "None" not callable return self._evalfn(ctx) # type: ignore[misc] except SPARQLError as e: @@ -242,7 +233,7 @@ def __init__(self, name: str, expr: ParserElement): self.expr = expr TokenConverter.__init__(self, expr) self.setName(name) - self.evalfn: Optional[Callable[[Any, Any], Any]] = None + self.evalfn: Callable[[Any, Any], Any] | None = None def postParse( self, instring: str, loc: int, tokenList: ParseResults @@ -280,7 +271,7 @@ def setEvalFn(self, evalfn: Callable[[Any, Any], Any]) -> Comp: def prettify_parsetree(t: ParseResults, indent: str = "", depth: int = 0) -> str: - out: List[str] = [] + out: list[str] = [] for e in t.asList(): out.append(_prettify_sub_parsetree(e, indent, depth + 1)) for k, v in sorted(t.items()): @@ -290,11 +281,11 @@ def prettify_parsetree(t: ParseResults, indent: str = "", depth: int = 0) -> str def _prettify_sub_parsetree( - t: Union[Identifier, CompValue, set, list, dict, Tuple, bool, None], + t: Union[Identifier, CompValue, set, list, dict, tuple, bool, None], indent: str = "", depth: int = 0, ) -> str: - out: List[str] = [] + out: list[str] = [] if isinstance(t, CompValue): out.append("%s%s> %s:\n" % (indent, " " * depth, t.name)) for k, v in t.items(): diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py index de97d80bd..26d24b3bf 100644 --- a/rdflib/plugins/sparql/processor.py +++ b/rdflib/plugins/sparql/processor.py @@ -2,12 +2,12 @@ Code for tying SPARQL Engine into RDFLib These should be automatically registered with RDFLib - """ from __future__ import annotations -from typing import Any, Mapping, Optional, Union +from collections.abc import Mapping +from typing import Any, Union from rdflib.graph import Graph from rdflib.plugins.sparql.algebra import translateQuery, translateUpdate @@ -21,8 +21,8 @@ def prepareQuery( queryString: str, - initNs: Optional[Mapping[str, Any]] = None, - base: Optional[str] = None, + initNs: Mapping[str, Any] | None = None, + base: str | None = None, ) -> Query: """ Parse and translate a SPARQL Query @@ -36,8 +36,8 @@ def prepareQuery( def prepareUpdate( updateString: str, - initNs: Optional[Mapping[str, Any]] = None, - base: Optional[str] = None, + initNs: Mapping[str, Any] | None = None, + base: str | None = None, ) -> Update: """ Parse and translate a SPARQL Update @@ -52,9 +52,9 @@ def prepareUpdate( def processUpdate( graph: Graph, updateString: str, - initBindings: Optional[Mapping[str, Identifier]] = None, - initNs: Optional[Mapping[str, Any]] = None, - base: Optional[str] = None, + initBindings: Mapping[str, Identifier] | None = None, + initNs: Mapping[str, Any] | None = None, + base: str | None = None, ) -> None: """ Process a SPARQL Update Request @@ -81,23 +81,23 @@ def __init__(self, graph): def update( self, - strOrQuery: Union[str, Update], - initBindings: Optional[Mapping[str, Identifier]] = None, - initNs: Optional[Mapping[str, Any]] = None, + strOrQuery: str | Update, + initBindings: Mapping[str, Identifier] | None = None, + initNs: Mapping[str, Any] | None = None, ) -> None: """ - .. caution:: + !!! warning "Caution" - This method can access indirectly requested network endpoints, for - example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in `SERVICE` directives. - When processing untrusted or potentially malicious queries, measures - should be taken to restrict network and file access. + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. + For information on available security measures, see the RDFLib + [Security Considerations](../security_considerations.md) + documentation. """ if isinstance(strOrQuery, str): @@ -117,9 +117,9 @@ def __init__(self, graph): def query( # type: ignore[override] self, strOrQuery: Union[str, Query], - initBindings: Optional[Mapping[str, Identifier]] = None, - initNs: Optional[Mapping[str, Any]] = None, - base: Optional[str] = None, + initBindings: Mapping[str, Identifier] | None = None, + initNs: Mapping[str, Any] | None = None, + base: str | None = None, DEBUG: bool = False, ) -> Mapping[str, Any]: """ @@ -127,18 +127,18 @@ def query( # type: ignore[override] namespaces. The given base is used to resolve relative URIs in the query and will be overridden by any BASE given in the query. - .. caution:: + !!! warning "Caution" - This method can access indirectly requested network endpoints, for - example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in `SERVICE` directives. - When processing untrusted or potentially malicious queries, measures - should be taken to restrict network and file access. + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. + For information on available security measures, see the RDFLib + [Security Considerations](../security_considerations.md) + documentation. """ if isinstance(strOrQuery, str): diff --git a/rdflib/plugins/sparql/results/csvresults.py b/rdflib/plugins/sparql/results/csvresults.py index 32b3e4212..fe893d69a 100644 --- a/rdflib/plugins/sparql/results/csvresults.py +++ b/rdflib/plugins/sparql/results/csvresults.py @@ -1,10 +1,8 @@ """ - This module implements a parser and serializer for the CSV SPARQL result formats http://www.w3.org/TR/sparql11-results-csv-tsv/ - """ from __future__ import annotations @@ -12,7 +10,7 @@ import codecs import csv from io import BufferedIOBase, TextIOBase -from typing import IO, Dict, List, Optional, Union, cast +from typing import IO, Union, cast from rdflib.plugins.sparql.processor import SPARQLResult from rdflib.query import Result, ResultParser, ResultSerializer @@ -20,11 +18,13 @@ class CSVResultParser(ResultParser): + """Parses SPARQL CSV results into a Result object.""" + def __init__(self): self.delim = "," # type error: Signature of "parse" incompatible with supertype "ResultParser" - def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # type: ignore[override] + def parse(self, source: IO, content_type: str | None = None) -> Result: # type: ignore[override] r = Result("SELECT") # type error: Incompatible types in assignment (expression has type "StreamReader", variable has type "IO[Any]") @@ -43,15 +43,15 @@ def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # ty return r def parseRow( - self, row: List[str], v: List[Variable] - ) -> Dict[Variable, Union[BNode, URIRef, Literal]]: + self, row: list[str], v: list[Variable] + ) -> dict[Variable, Union[BNode, URIRef, Literal]]: return dict( (var, val) for var, val in zip(v, [self.convertTerm(t) for t in row]) if val is not None ) - def convertTerm(self, t: str) -> Optional[Union[BNode, URIRef, Literal]]: + def convertTerm(self, t: str) -> BNode | URIRef | Literal | None: if t == "": return None if t.startswith("_:"): @@ -62,6 +62,8 @@ def convertTerm(self, t: str) -> Optional[Union[BNode, URIRef, Literal]]: class CSVResultSerializer(ResultSerializer): + """Serializes SPARQL results into CSV format.""" + def __init__(self, result: SPARQLResult): ResultSerializer.__init__(self, result) @@ -94,7 +96,7 @@ def serialize(self, stream: IO, encoding: str = "utf-8", **kwargs) -> None: ) def serializeTerm( - self, term: Optional[Identifier], encoding: str + self, term: Identifier | None, encoding: str ) -> Union[str, Identifier]: if term is None: return "" diff --git a/rdflib/plugins/sparql/results/jsonresults.py b/rdflib/plugins/sparql/results/jsonresults.py index cfc2dc1e1..bbb161189 100644 --- a/rdflib/plugins/sparql/results/jsonresults.py +++ b/rdflib/plugins/sparql/results/jsonresults.py @@ -6,16 +6,16 @@ http://projects.bigasterisk.com/sparqlhttp/ Authors: Drew Perttula, Gunnar Aastrand Grimnes - """ from __future__ import annotations import json -from typing import IO, Any, Dict, Mapping, MutableSequence, Optional +from collections.abc import Mapping, MutableSequence +from typing import IO, TYPE_CHECKING, Any from rdflib.query import Result, ResultException, ResultParser, ResultSerializer -from rdflib.term import BNode, Identifier, Literal, URIRef, Variable +from rdflib.term import BNode, Literal, URIRef, Variable try: import orjson @@ -25,10 +25,16 @@ orjson = None # type: ignore[assignment, unused-ignore] _HAS_ORJSON = False +if TYPE_CHECKING: + from rdflib.query import QueryResultValueType + from rdflib.term import IdentifiedNode + class JSONResultParser(ResultParser): + """Parses SPARQL JSON results into a Result object.""" + # type error: Signature of "parse" incompatible with supertype "ResultParser" - def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # type: ignore[override] + def parse(self, source: IO, content_type: str | None = None) -> Result: # type: ignore[override] inp = source.read() if _HAS_ORJSON: try: @@ -43,12 +49,14 @@ def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # ty class JSONResultSerializer(ResultSerializer): + """Serializes SPARQL results to JSON format.""" + def __init__(self, result: Result): ResultSerializer.__init__(self, result) # type error: Signature of "serialize" incompatible with supertype "ResultSerializer" def serialize(self, stream: IO, encoding: str = None) -> None: # type: ignore[override] - res: Dict[str, Any] = {} + res: dict[str, Any] = {} if self.result.type == "ASK": res["head"] = {} res["boolean"] = self.result.askAnswer @@ -84,17 +92,20 @@ def serialize(self, stream: IO, encoding: str = None) -> None: # type: ignore[o else: stream.write(r_str) - def _bindingToJSON(self, b: Mapping[Variable, Identifier]) -> Dict[Variable, Any]: + def _bindingToJSON( + self, b: Mapping[Variable, QueryResultValueType] + ) -> dict[Variable, Any]: res = {} for var in b: j = termToJSON(self, b[var]) if j is not None: + # TODO: Why is this not simply `res[var] = j`? res[var] = termToJSON(self, b[var]) return res class JSONResult(Result): - def __init__(self, json: Dict[str, Any]): + def __init__(self, json: dict[str, Any]): self.json = json if "boolean" in json: type_ = "ASK" @@ -111,22 +122,25 @@ def __init__(self, json: Dict[str, Any]): self.bindings = self._get_bindings() self.vars = [Variable(x) for x in json["head"]["vars"]] - def _get_bindings(self) -> MutableSequence[Mapping[Variable, Identifier]]: - ret: MutableSequence[Mapping[Variable, Identifier]] = [] + def _get_bindings(self) -> MutableSequence[Mapping[Variable, QueryResultValueType]]: + ret: MutableSequence[Mapping[Variable, QueryResultValueType]] = [] for row in self.json["results"]["bindings"]: - outRow: Dict[Variable, Identifier] = {} + outRow: dict[Variable, QueryResultValueType] = {} for k, v in row.items(): outRow[Variable(k)] = parseJsonTerm(v) ret.append(outRow) return ret -def parseJsonTerm(d: Dict[str, str]) -> Identifier: +def parseJsonTerm(d: dict[str, str]) -> IdentifiedNode | Literal: """rdflib object (Literal, URIRef, BNode) for the given json-format dict. input is like: - { 'type': 'uri', 'value': 'http://famegame.com/2006/01/username' } - { 'type': 'literal', 'value': 'drewp' } + + ```json + { 'type': 'uri', 'value': 'http://famegame.com/2006/01/username' } + { 'type': 'literal', 'value': 'drewp' } + ``` """ t = d["type"] @@ -143,8 +157,8 @@ def parseJsonTerm(d: Dict[str, str]) -> Identifier: def termToJSON( - self: JSONResultSerializer, term: Optional[Identifier] -) -> Optional[Dict[str, str]]: + self: JSONResultSerializer, term: IdentifiedNode | Literal | None +) -> dict[str, str] | None: if isinstance(term, URIRef): return {"type": "uri", "value": str(term)} elif isinstance(term, Literal): diff --git a/rdflib/plugins/sparql/results/rdfresults.py b/rdflib/plugins/sparql/results/rdfresults.py index c59a40c14..7dc7f1f74 100644 --- a/rdflib/plugins/sparql/results/rdfresults.py +++ b/rdflib/plugins/sparql/results/rdfresults.py @@ -1,22 +1,28 @@ from __future__ import annotations -from typing import IO, Any, MutableMapping, Optional, Union +from typing import IO, TYPE_CHECKING, Any, Optional, cast from rdflib.graph import Graph from rdflib.namespace import RDF, Namespace from rdflib.query import Result, ResultParser -from rdflib.term import Node, Variable +from rdflib.term import Literal, Variable + +if TYPE_CHECKING: + from rdflib.graph import _ObjectType + from rdflib.term import IdentifiedNode RS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/result-set#") class RDFResultParser(ResultParser): - def parse(self, source: Union[IO, Graph], **kwargs: Any) -> Result: + """This ResultParser is only used for DAWG standardised SPARQL tests.""" + + def parse(self, source: IO | Graph, **kwargs: Any) -> Result: return RDFResult(source, **kwargs) class RDFResult(Result): - def __init__(self, source: Union[IO, Graph], **kwargs: Any): + def __init__(self, source: IO | Graph, **kwargs: Any): if not isinstance(source, Graph): graph = Graph() graph.parse(source, **kwargs) @@ -32,9 +38,9 @@ def __init__(self, source: Union[IO, Graph], **kwargs: Any): # use a new graph g = Graph() g += graph - + askAnswer: Literal | None = None else: - askAnswer = graph.value(rs, RS.boolean) + askAnswer = cast(Optional[Literal], graph.value(rs, RS.boolean)) if askAnswer is not None: type_ = "ASK" @@ -44,27 +50,35 @@ def __init__(self, source: Union[IO, Graph], **kwargs: Any): Result.__init__(self, type_) if type_ == "SELECT": - # type error: Argument 1 to "Variable" has incompatible type "Node"; expected "str" - self.vars = [Variable(v) for v in graph.objects(rs, RS.resultVariable)] # type: ignore[arg-type] + self.vars = [ + # Technically we should check for QuotedGraph here, to make MyPy happy + Variable(v.identifier if isinstance(v, Graph) else v) # type: ignore[unreachable] + for v in graph.objects(rs, RS.resultVariable) + ] self.bindings = [] for s in graph.objects(rs, RS.solution): - sol: MutableMapping[Variable, Optional[Node]] = {} + sol: dict[Variable, IdentifiedNode | Literal] = {} for b in graph.objects(s, RS.binding): - # type error: Argument 1 to "Variable" has incompatible type "Optional[Node]"; expected "str" - sol[Variable(graph.value(b, RS.variable))] = graph.value( # type: ignore[arg-type] - b, RS.value - ) - # error: Argument 1 to "append" of "list" has incompatible type "MutableMapping[Variable, Optional[Node]]"; expected "Mapping[Variable, Identifier]" - self.bindings.append(sol) # type: ignore[arg-type] + var_name: _ObjectType | str | None = graph.value(b, RS.variable) + if var_name is None: + continue + # Technically we should check for QuotedGraph here, to make MyPy happy + elif isinstance(var_name, Graph): # type: ignore[unreachable] + var_name = var_name.identifier # type: ignore[unreachable] + var_val = graph.value(b, RS.value) + if var_val is None: + continue + elif isinstance(var_val, (Graph, Variable)): + raise ValueError(f"Malformed rdf result binding {var_name}") + sol[Variable(var_name)] = var_val + self.bindings.append(sol) elif type_ == "ASK": - # type error: Item "Node" of "Optional[Node]" has no attribute "value" - # type error: Item "None" of "Optional[Node]" has no attribute "value" - self.askAnswer = askAnswer.value # type: ignore[union-attr] - # type error: Item "Node" of "Optional[Node]" has no attribute "value" - # type error: Item "None" of "Optional[Node]" has no attribute "value" - if askAnswer.value is None: # type: ignore[union-attr] + if askAnswer is None: + raise Exception("Malformed boolean in ask answer!") + self.askAnswer = askAnswer.value + if askAnswer.value is None: raise Exception("Malformed boolean in ask answer!") elif type_ == "CONSTRUCT": self.graph = g diff --git a/rdflib/plugins/sparql/results/tsvresults.py b/rdflib/plugins/sparql/results/tsvresults.py index 54b516d0d..5241a0eb2 100644 --- a/rdflib/plugins/sparql/results/tsvresults.py +++ b/rdflib/plugins/sparql/results/tsvresults.py @@ -7,7 +7,6 @@ from __future__ import annotations import codecs -import typing from typing import IO, Union from pyparsing import ( @@ -20,6 +19,7 @@ ZeroOrMore, ) +from rdflib import IdentifiedNode from rdflib.plugins.sparql.parser import ( BLANK_NODE_LABEL, IRIREF, @@ -32,7 +32,7 @@ ) from rdflib.plugins.sparql.parserutils import Comp, CompValue, Param from rdflib.query import Result, ResultParser -from rdflib.term import BNode, URIRef +from rdflib.term import BNode, URIRef, Variable from rdflib.term import Literal as RDFLiteral ParserElement.setDefaultWhitespaceChars(" \n") @@ -64,8 +64,10 @@ class TSVResultParser(ResultParser): + """Parses SPARQL TSV results into a Result object.""" + # type error: Signature of "parse" incompatible with supertype "ResultParser" [override] - def parse(self, source: IO, content_type: typing.Optional[str] = None) -> Result: # type: ignore[override] + def parse(self, source: IO, content_type: str | None = None) -> Result: # type: ignore[override] if isinstance(source.read(0), bytes): # if reading from source returns bytes do utf-8 decoding # type error: Incompatible types in assignment (expression has type "StreamReader", variable has type "IO[Any]") @@ -86,20 +88,28 @@ def parse(self, source: IO, content_type: typing.Optional[str] = None) -> Result continue row = ROW.parseString(line, parseAll=True) - # type error: Generator has incompatible item type "object"; expected "Identifier" - r.bindings.append(dict(zip(r.vars, (self.convertTerm(x) for x in row)))) # type: ignore[misc] - + this_row_dict: dict[Variable, IdentifiedNode | RDFLiteral] = {} + for var, val_read in zip(r.vars, row): + val = self.convertTerm(val_read) + if val is None: + # Skip unbound vars + continue + this_row_dict[var] = val + if len(this_row_dict) > 0: + r.bindings.append(this_row_dict) return r def convertTerm( self, t: Union[object, RDFLiteral, BNode, CompValue, URIRef] - ) -> typing.Optional[Union[object, BNode, URIRef, RDFLiteral]]: + ) -> BNode | URIRef | RDFLiteral | None: if t is NONE_VALUE: return None - if isinstance(t, CompValue): + elif isinstance(t, CompValue): if t.name == "literal": return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype) else: raise Exception("I dont know how to handle this: %s" % (t,)) - else: + elif isinstance(t, (RDFLiteral, BNode, URIRef)): return t + else: + raise ValueError(f"Unexpected type {type(t)} found in TSV result") diff --git a/rdflib/plugins/sparql/results/txtresults.py b/rdflib/plugins/sparql/results/txtresults.py index 86d8933e3..50501e6f9 100644 --- a/rdflib/plugins/sparql/results/txtresults.py +++ b/rdflib/plugins/sparql/results/txtresults.py @@ -1,7 +1,7 @@ from __future__ import annotations from io import StringIO -from typing import IO, List, Optional, Union +from typing import IO from rdflib.namespace import NamespaceManager from rdflib.query import ResultSerializer @@ -9,8 +9,8 @@ def _termString( - t: Optional[Union[URIRef, Literal, BNode]], - namespace_manager: Optional[NamespaceManager], + t: URIRef | Literal | BNode | None, + namespace_manager: NamespaceManager | None, ) -> str: if t is None: return "-" @@ -35,7 +35,7 @@ def serialize( stream: IO, encoding: str = "utf-8", *, - namespace_manager: Optional[NamespaceManager] = None, + namespace_manager: NamespaceManager | None = None, **kwargs, ) -> None: """ @@ -58,7 +58,7 @@ def c(s, w): if not self.result: string_stream.write("(no results)\n") else: - keys: List[Variable] = self.result.vars # type: ignore[assignment] + keys: list[Variable] = self.result.vars # type: ignore[assignment] maxlen = [0] * len(keys) b = [ # type error: Value of type "Union[Tuple[Node, Node, Node], bool, ResultRow]" is not indexable diff --git a/rdflib/plugins/sparql/results/xmlresults.py b/rdflib/plugins/sparql/results/xmlresults.py index 3cc6b2c38..d28901f94 100644 --- a/rdflib/plugins/sparql/results/xmlresults.py +++ b/rdflib/plugins/sparql/results/xmlresults.py @@ -12,17 +12,14 @@ import logging import xml.etree.ElementTree as xml_etree # noqa: N813 +from collections.abc import Sequence from io import BytesIO from typing import ( IO, TYPE_CHECKING, Any, BinaryIO, - Dict, - Optional, - Sequence, TextIO, - Tuple, Union, cast, ) @@ -48,14 +45,16 @@ class XMLResultParser(ResultParser): + """A Parser for SPARQL results in XML.""" + # TODO FIXME: content_type should be a keyword only arg. - def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # type: ignore[override] + def parse(self, source: IO, content_type: str | None = None) -> Result: # type: ignore[override] return XMLResult(source) class XMLResult(Result): - def __init__(self, source: IO, content_type: Optional[str] = None): - parser_encoding: Optional[str] = None + def __init__(self, source: IO, content_type: str | None = None): + parser_encoding: str | None = None if hasattr(source, "encoding"): if TYPE_CHECKING: assert isinstance(source, TextIO) @@ -74,7 +73,7 @@ def __init__(self, source: IO, content_type: Optional[str] = None): ) else: xml_parser = xml_etree.XMLParser(encoding=parser_encoding) - tree = xml_etree.parse(source, parser=xml_parser) + tree = xml_etree.parse(source, parser=xml_parser) # type: ignore[assignment] boolean = tree.find(RESULTS_NS_ET + "boolean") results = tree.find(RESULTS_NS_ET + "results") @@ -153,6 +152,8 @@ def parseTerm(element: xml_etree.Element) -> Union[URIRef, Literal, BNode]: class XMLResultSerializer(ResultSerializer): + """Serializes SPARQL results into XML format.""" + def __init__(self, result: Result): ResultSerializer.__init__(self, result) @@ -209,8 +210,8 @@ def write_header(self, allvarsL: Sequence[Variable]) -> None: self.writer.startElementNS( (SPARQL_XML_NAMESPACE, "variable"), "variable", - # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[Tuple[None, str], str]"; expected "Mapping[Tuple[str, str], str]" - # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[Tuple[None, str], str]"; expected "Mapping[Tuple[str, str], str]" [arg-type] + # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[tuple[None, str], str]"; expected "Mapping[tuple[str, str], str]" + # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[tuple[None, str], str]"; expected "Mapping[tuple[str, str], str]" [arg-type] AttributesNSImpl(attr_vals, attr_qnames), # type: ignore[arg-type] ) self.writer.endElementNS((SPARQL_XML_NAMESPACE, "variable"), "variable") @@ -243,17 +244,17 @@ def write_end_result(self) -> None: def write_binding(self, name: Variable, val: Identifier) -> None: assert self._resultStarted - attr_vals: Dict[Tuple[Optional[str], str], str] = { + attr_vals: dict[tuple[str | None, str], str] = { (None, "name"): str(name), } - attr_qnames: Dict[Tuple[Optional[str], str], str] = { + attr_qnames: dict[tuple[str | None, str], str] = { (None, "name"): "name", } self.writer.startElementNS( (SPARQL_XML_NAMESPACE, "binding"), "binding", - # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[Tuple[None, str], str]"; expected "Mapping[Tuple[str, str], str]" - # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[Tuple[None, str], str]"; expected "Mapping[Tuple[str, str], str]" + # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[tuple[None, str], str]"; expected "Mapping[tuple[str, str], str]" + # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[tuple[None, str], str]"; expected "Mapping[tuple[str, str], str]" AttributesNSImpl(attr_vals, attr_qnames), # type: ignore[arg-type, unused-ignore] ) @@ -282,8 +283,8 @@ def write_binding(self, name: Variable, val: Identifier) -> None: self.writer.startElementNS( (SPARQL_XML_NAMESPACE, "literal"), "literal", - # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[Tuple[Optional[str], str], str]"; expected "Mapping[Tuple[str, str], str]" - # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[Tuple[Optional[str], str], str]"; expected "Mapping[Tuple[str, str], str]" + # type error: Argument 1 to "AttributesNSImpl" has incompatible type "Dict[tuple[Optional[str], str], str]"; expected "Mapping[tuple[str, str], str]" + # type error: Argument 2 to "AttributesNSImpl" has incompatible type "Dict[tuple[Optional[str], str], str]"; expected "Mapping[tuple[str, str], str]" AttributesNSImpl(attr_vals, attr_qnames), # type: ignore[arg-type, unused-ignore] ) self.writer.characters(val) diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py index 8249a0ee8..575a851ed 100644 --- a/rdflib/plugins/sparql/sparql.py +++ b/rdflib/plugins/sparql/sparql.py @@ -4,17 +4,10 @@ import datetime import itertools import typing as t -from collections.abc import Mapping, MutableMapping +from collections.abc import Container, Generator, Iterable, Mapping, MutableMapping from typing import ( TYPE_CHECKING, Any, - Container, - Dict, - Generator, - Iterable, - List, - Optional, - Tuple, TypeVar, Union, ) @@ -33,12 +26,12 @@ class SPARQLError(Exception): - def __init__(self, msg: Optional[str] = None): + def __init__(self, msg: str | None = None): Exception.__init__(self, msg) class NotBoundError(SPARQLError): - def __init__(self, msg: Optional[str] = None): + def __init__(self, msg: str | None = None): SPARQLError.__init__(self, msg) @@ -50,7 +43,7 @@ def __init__(self): class SPARQLTypeError(SPARQLError): - def __init__(self, msg: Optional[str]): + def __init__(self, msg: str | None): SPARQLError.__init__(self, msg) @@ -64,8 +57,8 @@ class Bindings(MutableMapping): In python 3.3 this could be a collections.ChainMap """ - def __init__(self, outer: Optional[Bindings] = None, d=[]): - self._d: Dict[str, str] = dict(d) + def __init__(self, outer: Bindings | None = None, d=[]): + self._d: dict[str, str] = dict(d) self.outer = outer def __getitem__(self, key: str) -> str: @@ -91,14 +84,14 @@ def __delitem__(self, key: str) -> None: def __len__(self) -> int: i = 0 - d: Optional[Bindings] = self + d: Bindings | None = self while d is not None: i += len(d._d) d = d.outer return i def __iter__(self) -> Generator[str, None, None]: - d: Optional[Bindings] = self + d: Bindings | None = self while d is not None: yield from d._d d = d.outer @@ -120,8 +113,8 @@ class FrozenDict(Mapping): """ def __init__(self, *args: Any, **kwargs: Any): - self._d: Dict[Identifier, Identifier] = dict(*args, **kwargs) - self._hash: Optional[int] = None + self._d: dict[Identifier, Identifier] = dict(*args, **kwargs) + self._hash: int | None = None def __iter__(self): return iter(self._d) @@ -208,11 +201,11 @@ def bnodes(self) -> t.Mapping[Identifier, BNode]: return self.ctx.bnodes @property - def prologue(self) -> Optional[Prologue]: + def prologue(self) -> Prologue | None: return self.ctx.prologue def forget( - self, before: QueryContext, _except: Optional[Container[Variable]] = None + self, before: QueryContext, _except: Container[Variable] | None = None ) -> FrozenBindings: """ return a frozen dict only of bindings made in self @@ -250,9 +243,9 @@ class QueryContext: def __init__( self, - graph: Optional[Graph] = None, - bindings: Optional[Union[Bindings, FrozenBindings, List[Any]]] = None, - initBindings: Optional[Mapping[str, Identifier]] = None, + graph: Graph | None = None, + bindings: Bindings | FrozenBindings | list[Any] | None = None, + initBindings: Mapping[str, Identifier] | None = None, datasetClause=None, ): self.initBindings = initBindings @@ -260,8 +253,8 @@ def __init__( if initBindings: self.bindings.update(initBindings) - self.graph: Optional[Graph] - self._dataset: Optional[Union[Dataset, ConjunctiveGraph]] + self.graph: Graph | None + self._dataset: Dataset | ConjunctiveGraph | None if isinstance(graph, (Dataset, ConjunctiveGraph)): if datasetClause: self._dataset = Dataset() @@ -290,8 +283,8 @@ def __init__( self._dataset = None self.graph = graph - self.prologue: Optional[Prologue] = None - self._now: Optional[datetime.datetime] = None + self.prologue: Prologue | None = None + self._now: datetime.datetime | None = None self.bnodes: t.MutableMapping[Identifier, BNode] = collections.defaultdict( BNode @@ -304,7 +297,7 @@ def now(self) -> datetime.datetime: return self._now def clone( - self, bindings: Optional[Union[FrozenBindings, Bindings, List[Any]]] = None + self, bindings: FrozenBindings | Bindings | list[Any] | None = None ) -> QueryContext: r = QueryContext( self._dataset if self._dataset is not None else self.graph, @@ -331,21 +324,22 @@ def load( self, source: URIRef, default: bool = False, - into: Optional[Identifier] = None, + into: Identifier | None = None, **kwargs: Any, ) -> None: """ Load data from the source into the query context's. - :param source: The source to load from. - :param default: If `True`, triples from the source will be added - to the default graph, otherwise it will be loaded into a - graph with ``source`` URI as its name. - :param into: The name of the graph to load the data into. If - `None`, the source URI will be used as as the name of the - graph. - :param kwargs: Keyword arguments to pass to - :meth:`rdflib.graph.Graph.parse`. + Args: + source: The source to load from. + default: If `True`, triples from the source will be added + to the default graph, otherwise it will be loaded into a + graph with `source` URI as its name. + into: The name of the graph to load the data into. If + `None`, the source URI will be used as as the name of the + graph. + **kwargs: Keyword arguments to pass to + [`parse`][rdflib.graph.Graph.parse]. """ def _load(graph, source): @@ -382,7 +376,7 @@ def _load(graph, source): into = source _load(self.dataset.get_context(into), source) - def __getitem__(self, key: Union[str, Path]) -> Optional[Union[str, Path]]: + def __getitem__(self, key: str | Path) -> str | Path | None: # in SPARQL BNodes are just labels if not isinstance(key, (BNode, Variable)): return key @@ -391,13 +385,13 @@ def __getitem__(self, key: Union[str, Path]) -> Optional[Union[str, Path]]: except KeyError: return None - def get(self, key: str, default: Optional[Any] = None) -> Any: + def get(self, key: str, default: Any | None = None) -> Any: try: return self[key] except KeyError: return default - def solution(self, vars: Optional[Iterable[Variable]] = None) -> FrozenBindings: + def solution(self, vars: Iterable[Variable] | None = None) -> FrozenBindings: """ Return a static copy of the current variable bindings as dict """ @@ -414,7 +408,7 @@ def __setitem__(self, key: str, value: str) -> None: self.bindings[key] = value - def pushGraph(self, graph: Optional[Graph]) -> QueryContext: + def pushGraph(self, graph: Graph | None) -> QueryContext: r = self.clone() r.graph = graph return r @@ -441,21 +435,19 @@ class Prologue: """ def __init__(self) -> None: - self.base: Optional[str] = None + self.base: str | None = None self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store - def resolvePName(self, prefix: Optional[str], localname: Optional[str]) -> URIRef: + def resolvePName(self, prefix: str | None, localname: str | None) -> URIRef: ns = self.namespace_manager.store.namespace(prefix or "") if ns is None: raise Exception("Unknown namespace prefix : %s" % prefix) return URIRef(ns + (localname or "")) - def bind(self, prefix: Optional[str], uri: Any) -> None: + def bind(self, prefix: str | None, uri: Any) -> None: self.namespace_manager.bind(prefix, uri, replace=True) - def absolutize( - self, iri: Optional[Union[CompValue, str]] - ) -> Optional[Union[CompValue, str]]: + def absolutize(self, iri: CompValue | str | None) -> CompValue | str | None: """ Apply BASE / PREFIXes to URIs (and to datatypes in Literals) @@ -485,7 +477,7 @@ class Query: def __init__(self, prologue: Prologue, algebra: CompValue): self.prologue = prologue self.algebra = algebra - self._original_args: Tuple[str, Mapping[str, str], Optional[str]] + self._original_args: tuple[str, Mapping[str, str], str | None] class Update: @@ -493,7 +485,7 @@ class Update: A parsed and translated update """ - def __init__(self, prologue: Prologue, algebra: List[CompValue]): + def __init__(self, prologue: Prologue, algebra: list[CompValue]): self.prologue = prologue self.algebra = algebra - self._original_args: Tuple[str, Mapping[str, str], Optional[str]] + self._original_args: tuple[str, Mapping[str, str], str | None] diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py index cd22a7520..80107238c 100644 --- a/rdflib/plugins/sparql/update.py +++ b/rdflib/plugins/sparql/update.py @@ -1,12 +1,11 @@ """ - Code for carrying out Update Operations - """ from __future__ import annotations -from typing import TYPE_CHECKING, Iterator, Mapping, Optional, Sequence +from collections.abc import Iterator, Mapping, Sequence +from typing import TYPE_CHECKING from rdflib.graph import Graph from rdflib.plugins.sparql.evaluate import evalBGP, evalPart @@ -16,7 +15,7 @@ from rdflib.term import Identifier, URIRef, Variable -def _graphOrDefault(ctx: QueryContext, g: str) -> Optional[Graph]: +def _graphOrDefault(ctx: QueryContext, g: str) -> Graph | None: if g == "DEFAULT": return ctx.graph else: @@ -97,7 +96,7 @@ def evalInsertData(ctx: QueryContext, u: CompValue) -> None: # u.quads is a dict of graphURI=>[triples] for g in u.quads: # type error: Argument 1 to "get_context" of "ConjunctiveGraph" has incompatible type "Optional[Graph]"; expected "Union[IdentifiedNode, str, None]" - cg = ctx.dataset.get_context(g) # type: ignore[arg-type] + cg = ctx.dataset.get_context(g) cg += u.quads[g] @@ -113,7 +112,7 @@ def evalDeleteData(ctx: QueryContext, u: CompValue) -> None: # u.quads is a dict of graphURI=>[triples] for g in u.quads: # type error: Argument 1 to "get_context" of "ConjunctiveGraph" has incompatible type "Optional[Graph]"; expected "Union[IdentifiedNode, str, None]" - cg = ctx.dataset.get_context(g) # type: ignore[arg-type] + cg = ctx.dataset.get_context(g) cg -= u.quads[g] @@ -131,7 +130,7 @@ def evalDeleteWhere(ctx: QueryContext, u: CompValue) -> None: # type error: Incompatible types in assignment (expression has type "FrozenBindings", variable has type "QueryContext") for c in res: # type: ignore[assignment] g = ctx.graph - g -= _fillTemplate(u.triples, c) + g -= _fillTemplate(u.triples, c) # type: ignore[operator] for g in u.quads: cg = ctx.dataset.get_context(c.get(g)) @@ -142,7 +141,7 @@ def evalModify(ctx: QueryContext, u: CompValue) -> None: originalctx = ctx # Using replaces the dataset for evaluating the where-clause - dg: Optional[Graph] + dg: Graph | None if u.using: otherDefault = False for d in u.using: @@ -283,11 +282,9 @@ def evalCopy(ctx: QueryContext, u: CompValue) -> None: def evalUpdate( graph: Graph, update: Update, - initBindings: Optional[Mapping[str, Identifier]] = None, + initBindings: Mapping[str, Identifier] | None = None, ) -> None: - """ - - http://www.w3.org/TR/sparql11-update/#updateLanguage + """http://www.w3.org/TR/sparql11-update/#updateLanguage 'A request is a sequence of operations [...] Implementations MUST ensure that operations of a single request are executed in a @@ -302,17 +299,17 @@ def evalUpdate( This will return None on success and raise Exceptions on error - .. caution:: + !!! warning "Security Considerations" This method can access indirectly requested network endpoints, for example, query processing will attempt to access network endpoints - specified in ``SERVICE`` directives. + specified in `SERVICE` directives. When processing untrusted or potentially malicious queries, measures should be taken to restrict network and file access. For information on available security measures, see the RDFLib - :doc:`Security Considerations ` + [Security Considerations](../security_considerations.md) documentation. """ diff --git a/rdflib/plugins/stores/__init__.py b/rdflib/plugins/stores/__init__.py index 2eb475636..6e394f97a 100644 --- a/rdflib/plugins/stores/__init__.py +++ b/rdflib/plugins/stores/__init__.py @@ -1,3 +1,3 @@ """ -This package contains modules for additional RDFLib stores +Modules for additional RDFLib stores """ diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py index 7a9748c69..4e91b9807 100644 --- a/rdflib/plugins/stores/auditable.py +++ b/rdflib/plugins/stores/auditable.py @@ -10,7 +10,7 @@ Calls to commit or rollback, flush the list of reverse operations This provides thread-safe atomicity and isolation (assuming concurrent operations occur with different store instances), but no durability (transactions are -persisted in memory and wont be available to reverse operations after the +persisted in memory and won't be available to reverse operations after the system fails): A and I out of ACID. """ @@ -18,7 +18,8 @@ from __future__ import annotations import threading -from typing import TYPE_CHECKING, Any, Generator, Iterator, List, Optional, Tuple +from collections.abc import Generator, Iterator +from typing import TYPE_CHECKING, Any from rdflib.graph import ConjunctiveGraph, Graph from rdflib.store import Store @@ -44,6 +45,8 @@ class AuditableStore(Store): + """A store that logs destructive operations (add/remove) in reverse order.""" + def __init__(self, store: Store): self.store = store self.context_aware = store.context_aware @@ -51,18 +54,18 @@ def __init__(self, store: Store): # info to reverse the removal of a quoted statement self.formula_aware = False # store.formula_aware self.transaction_aware = True # This is only half true - self.reverseOps: List[ - Tuple[ - Optional[_SubjectType], - Optional[_PredicateType], - Optional[_ObjectType], - Optional[_ContextIdentifierType], + self.reverseOps: list[ + tuple[ + _SubjectType | None, + _PredicateType | None, + _ObjectType | None, + _ContextIdentifierType | None, str, ] ] = [] self.rollbackLock = threading.RLock() - def open(self, configuration: str, create: bool = True) -> Optional[int]: + def open(self, configuration: str, create: bool = True) -> int | None: return self.store.open(configuration, create) def close(self, commit_pending_transaction: bool = False) -> None: @@ -97,7 +100,7 @@ def add( self.store.add((s, p, o), context, quoted) def remove( - self, spo: _TriplePatternType, context: Optional[_ContextType] = None + self, spo: _TriplePatternType, context: _ContextType | None = None ) -> None: subject, predicate, object_ = spo lock = destructiveOpLocks["remove"] @@ -141,8 +144,8 @@ def remove( self.store.remove((subject, predicate, object_), context) def triples( - self, triple: _TriplePatternType, context: Optional[_ContextType] = None - ) -> Iterator[Tuple[_TripleType, Iterator[Optional[_ContextType]]]]: + self, triple: _TriplePatternType, context: _ContextType | None = None + ) -> Iterator[tuple[_TripleType, Iterator[_ContextType | None]]]: (su, pr, ob) = triple context = ( context.__class__(self.store, context.identifier) @@ -152,7 +155,7 @@ def triples( for (s, p, o), cg in self.store.triples((su, pr, ob), context): yield (s, p, o), cg - def __len__(self, context: Optional[_ContextType] = None): + def __len__(self, context: _ContextType | None = None): context = ( context.__class__(self.store, context.identifier) if context is not None @@ -161,7 +164,7 @@ def __len__(self, context: Optional[_ContextType] = None): return self.store.__len__(context) def contexts( - self, triple: Optional[_TripleType] = None + self, triple: _TripleType | None = None ) -> Generator[_ContextType, None, None]: for ctx in self.store.contexts(triple): yield ctx @@ -169,13 +172,13 @@ def contexts( def bind(self, prefix: str, namespace: URIRef, override: bool = True) -> None: self.store.bind(prefix, namespace, override=override) - def prefix(self, namespace: URIRef) -> Optional[str]: + def prefix(self, namespace: URIRef) -> str | None: return self.store.prefix(namespace) - def namespace(self, prefix: str) -> Optional[URIRef]: + def namespace(self, prefix: str) -> URIRef | None: return self.store.namespace(prefix) - def namespaces(self) -> Iterator[Tuple[str, URIRef]]: + def namespaces(self) -> Iterator[tuple[str, URIRef]]: return self.store.namespaces() def commit(self) -> None: diff --git a/rdflib/plugins/stores/berkeleydb.py b/rdflib/plugins/stores/berkeleydb.py index 12009787c..6a034d1e6 100644 --- a/rdflib/plugins/stores/berkeleydb.py +++ b/rdflib/plugins/stores/berkeleydb.py @@ -1,10 +1,11 @@ from __future__ import annotations import logging +from collections.abc import Callable, Generator from os import mkdir from os.path import abspath, exists from threading import Thread -from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Optional from urllib.request import pathname2url from rdflib.store import NO_STORE, VALID_STORE, Store @@ -49,37 +50,35 @@ def bb(u: str) -> bytes: ] -_ToKeyFunc = Callable[[Tuple[bytes, bytes, bytes], bytes], bytes] -_FromKeyFunc = Callable[[bytes], Tuple[bytes, bytes, bytes, bytes]] +_ToKeyFunc = Callable[[tuple[bytes, bytes, bytes], bytes], bytes] +_FromKeyFunc = Callable[[bytes], tuple[bytes, bytes, bytes, bytes]] _GetPrefixFunc = Callable[ - [Tuple[str, str, str], Optional[str]], Generator[str, None, None] + [tuple[str, str, str], Optional[str]], Generator[str, None, None] ] _ResultsFromKeyFunc = Callable[ [bytes, Optional[Node], Optional[Node], Optional[Node], bytes], - Tuple[Tuple[Node, Node, Node], Generator[Node, None, None]], + tuple[tuple[Node, Node, Node], Generator[Node, None, None]], ] class BerkeleyDB(Store): - """\ - A store that allows for on-disk persistent using BerkeleyDB, a fast - key/value DB. + """A store that allows for on-disk persistent using BerkeleyDB, a fast key/value DB. This store implementation used to be known, previous to rdflib 6.0.0 as 'Sleepycat' due to that being the then name of the Python wrapper for BerkeleyDB. This store allows for quads as well as triples. See examples of use - in both the `examples.berkeleydb_example` and ``test/test_store/test_store_berkeleydb.py`` + in both the `examples.berkeleydb_example` and `test/test_store/test_store_berkeleydb.py` files. **NOTE on installation**: To use this store, you must have BerkeleyDB installed on your system - separately to Python (``brew install berkeley-db`` on a Mac) and also have - the BerkeleyDB Python wrapper installed (``pip install berkeleydb``). + separately to Python (`brew install berkeley-db` on a Mac) and also have + the BerkeleyDB Python wrapper installed (`pip install berkeleydb`). You may need to install BerkeleyDB Python wrapper like this: - ``YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION=1 pip install berkeleydb`` + `YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION=1 pip install berkeleydb` """ context_aware = True @@ -90,8 +89,8 @@ class BerkeleyDB(Store): def __init__( self, - configuration: Optional[str] = None, - identifier: Optional[Identifier] = None, + configuration: str | None = None, + identifier: Identifier | None = None, ): if not has_bsddb: raise ImportError("Unable to import berkeleydb, store is unusable.") @@ -100,9 +99,9 @@ def __init__( super(BerkeleyDB, self).__init__(configuration) self._loads = self.node_pickler.loads self._dumps = self.node_pickler.dumps - self.__indicies_info: List[Tuple[Any, _ToKeyFunc, _FromKeyFunc]] + self.__indicies_info: list[tuple[Any, _ToKeyFunc, _FromKeyFunc]] - def __get_identifier(self) -> Optional[Identifier]: + def __get_identifier(self) -> Identifier | None: return self.__identifier identifier = property(__get_identifier) @@ -127,7 +126,7 @@ def _init_db_environment( def is_open(self) -> bool: return self.__open - def open(self, path: str, create: bool = True) -> Optional[int]: + def open(self, path: str, create: bool = True) -> int | None: if not has_bsddb: return NO_STORE homeDir = path # noqa: N806 @@ -157,12 +156,12 @@ def open(self, path: str, create: bool = True) -> Optional[int]: dbsetflags = 0 # create and open the DBs - self.__indicies: List[db.DB] = [ + self.__indicies: list[db.DB] = [ None, ] * 3 # NOTE on type ingore: this is because type checker does not like this # way of initializing, using a temporary variable will solve it. - # type error: error: List item 0 has incompatible type "None"; expected "Tuple[Any, Callable[[Tuple[bytes, bytes, bytes], bytes], bytes], Callable[[bytes], Tuple[bytes, bytes, bytes, bytes]]]" + # type error: error: List item 0 has incompatible type "None"; expected "tuple[Any, Callable[[tuple[bytes, bytes, bytes], bytes], bytes], Callable[[bytes], tuple[bytes, bytes, bytes, bytes]]]" self.__indicies_info = [ None, # type: ignore[list-item] ] * 3 @@ -177,11 +176,11 @@ def open(self, path: str, create: bool = True) -> Optional[int]: self.__indicies[i] = index self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i)) - lookup: Dict[ - int, Tuple[db.DB, _GetPrefixFunc, _FromKeyFunc, _ResultsFromKeyFunc] + lookup: dict[ + int, tuple[db.DB, _GetPrefixFunc, _FromKeyFunc, _ResultsFromKeyFunc] ] = {} for i in range(0, 8): - results: List[Tuple[Tuple[int, int], int, int]] = [] + results: list[tuple[tuple[int, int], int, int]] = [] for start in range(0, 3): score = 1 len = 0 @@ -197,12 +196,12 @@ def open(self, path: str, create: bool = True) -> Optional[int]: results.sort() # NOTE on type error: this is because the variable `score` is # reused with different type - # type error: Incompatible types in assignment (expression has type "Tuple[int, int]", variable has type "int") + # type error: Incompatible types in assignment (expression has type "tuple[int, int]", variable has type "int") score, start, len = results[-1] # type: ignore[assignment] def get_prefix_func(start: int, end: int) -> _GetPrefixFunc: def get_prefix( - triple: Tuple[str, str, str], context: Optional[str] + triple: tuple[str, str, str], context: str | None ) -> Generator[str, None, None]: if context is None: yield "" @@ -303,7 +302,7 @@ def add( triple: _TripleType, context: _ContextType, quoted: bool = False, - txn: Optional[Any] = None, + txn: Any | None = None, ) -> None: """\ Add a triple to the store of triples. @@ -346,10 +345,10 @@ def add( def __remove( self, - spo: Tuple[bytes, bytes, bytes], + spo: tuple[bytes, bytes, bytes], c: bytes, quoted: bool = False, - txn: Optional[Any] = None, + txn: Any | None = None, ) -> None: s, p, o = spo cspo, cpos, cosp = self.__indicies @@ -383,8 +382,8 @@ def __remove( def remove( # type: ignore[override] self, spo: _TriplePatternType, - context: Optional[_ContextType], - txn: Optional[Any] = None, + context: _ContextType | None, + txn: Any | None = None, ) -> None: subject, predicate, object = spo assert self.__open, "The Store must be open." @@ -428,8 +427,7 @@ def remove( # type: ignore[override] cursor = index.cursor(txn=txn) try: cursor.set_range(key) - # Hack to stop 2to3 converting this to next(cursor) - current = getattr(cursor, "next")() + current = cursor.next except db.DBNotFoundError: current = None cursor.close() @@ -448,11 +446,11 @@ def remove( # type: ignore[override] for i, _to_key, _ in self.__indicies_info: # NOTE on type error: variables are being # reused with a different type - # type error: Argument 1 has incompatible type "Tuple[str, str, str]"; expected "Tuple[bytes, bytes, bytes]" + # type error: Argument 1 has incompatible type "tuple[str, str, str]"; expected "tuple[bytes, bytes, bytes]" # type error: Argument 2 has incompatible type "str"; expected "bytes" i.delete(_to_key((s, p, o), c), txn=txn) # type: ignore[arg-type] else: - # type error: Argument 1 to "__remove" of "BerkeleyDB" has incompatible type "Tuple[str, str, str]"; expected "Tuple[bytes, bytes, bytes]" + # type error: Argument 1 to "__remove" of "BerkeleyDB" has incompatible type "tuple[str, str, str]"; expected "tuple[bytes, bytes, bytes]" # type error: Argument 2 to "__remove" of "BerkeleyDB" has incompatible type "str"; expected "bytes" self.__remove((s, p, o), c, txn=txn) # type: ignore[arg-type] else: @@ -474,10 +472,10 @@ def remove( # type: ignore[override] def triples( self, spo: _TriplePatternType, - context: Optional[_ContextType] = None, - txn: Optional[Any] = None, + context: _ContextType | None = None, + txn: Any | None = None, ) -> Generator[ - Tuple[_TripleType, Generator[Optional[_ContextType], None, None]], + tuple[_TripleType, Generator[_ContextType | None, None, None]], None, None, ]: @@ -506,21 +504,20 @@ def triples( cursor = index.cursor(txn=txn) try: cursor.set_range(key) - # Cheap hack so 2to3 doesn't convert to next(cursor) - current = getattr(cursor, "next")() + current = cursor.next except db.DBNotFoundError: current = None cursor.close() if key and key.startswith(prefix): contexts_value = index.get(key, txn=txn) - # type error: Incompatible types in "yield" (actual type "Tuple[Tuple[Node, Node, Node], Generator[Node, None, None]]", expected type "Tuple[Tuple[IdentifiedNode, URIRef, Identifier], Iterator[Optional[Graph]]]") + # type error: Incompatible types in "yield" (actual type "tuple[tuple[Node, Node, Node], Generator[Node, None, None]]", expected type "tuple[tuple[IdentifiedNode, URIRef, Identifier], Iterator[Optional[Graph]]]") # NOTE on type ignore: this is needed because some context is # lost in the process of extracting triples from the database. yield results_from_key(key, subject, predicate, object, contexts_value) # type: ignore[misc] else: break - def __len__(self, context: Optional[_ContextType] = None) -> int: + def __len__(self, context: _ContextType | None = None) -> int: assert self.__open, "The Store must be open." if context is not None: if context == self: @@ -539,8 +536,7 @@ def __len__(self, context: Optional[_ContextType] = None) -> int: key, value = current if key.startswith(prefix): count += 1 - # Hack to stop 2to3 converting this to next(cursor) - current = getattr(cursor, "next")() + current = cursor.next else: break cursor.close() @@ -566,7 +562,7 @@ def bind(self, prefix: str, namespace: URIRef, override: bool = True) -> None: self.__prefix[bound_namespace or namespace] = bound_prefix or prefix self.__namespace[bound_prefix or prefix] = bound_namespace or namespace - def namespace(self, prefix: str) -> Optional[URIRef]: + def namespace(self, prefix: str) -> URIRef | None: # NOTE on type error: this is because the variable is reused with # another type. # type error: Incompatible types in assignment (expression has type "bytes", variable has type "str") @@ -576,7 +572,7 @@ def namespace(self, prefix: str) -> Optional[URIRef]: return URIRef(ns.decode("utf-8")) return None - def prefix(self, namespace: URIRef) -> Optional[str]: + def prefix(self, namespace: URIRef) -> str | None: # NOTE on type error: this is because the variable is reused with # another type. # type error: Incompatible types in assignment (expression has type "bytes", variable has type "URIRef") @@ -586,21 +582,20 @@ def prefix(self, namespace: URIRef) -> Optional[str]: return prefix.decode("utf-8") return None - def namespaces(self) -> Generator[Tuple[str, URIRef], None, None]: + def namespaces(self) -> Generator[tuple[str, URIRef], None, None]: cursor = self.__namespace.cursor() results = [] current = cursor.first() while current: prefix, namespace = current results.append((prefix.decode("utf-8"), namespace.decode("utf-8"))) - # Hack to stop 2to3 converting this to next(cursor) - current = getattr(cursor, "next")() + current = cursor.next cursor.close() for prefix, namespace in results: yield prefix, URIRef(namespace) def contexts( - self, triple: Optional[_TripleType] = None + self, triple: _TripleType | None = None ) -> Generator[_ContextType, None, None]: _from_string = self._from_string _to_string = self._to_string @@ -610,14 +605,10 @@ def contexts( s: str p: str o: str - # type error: Incompatible types in assignment (expression has type "Node", variable has type "str") - s, p, o = triple # type: ignore[assignment] - # type error: Argument 1 has incompatible type "str"; expected "Node" - s = _to_string(s) # type: ignore[arg-type] - # type error: Argument 1 has incompatible type "str"; expected "Node" - p = _to_string(p) # type: ignore[arg-type] - # type error: Argument 1 has incompatible type "str"; expected "Node" - o = _to_string(o) # type: ignore[arg-type] + _s, _p, _o = triple + s = _to_string(_s) + p = _to_string(_p) + o = _to_string(_o) contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o))) if contexts: for c in contexts.split("^".encode("latin-1")): @@ -637,8 +628,7 @@ def contexts( cursor = index.cursor() try: cursor.set_range(key) - # Hack to stop 2to3 converting this to next(cursor) - current = getattr(cursor, "next")() + current = cursor.next except db.DBNotFoundError: current = None cursor.close() @@ -653,7 +643,7 @@ def _from_string(self, i: bytes) -> Node: k = self.__i2k.get(int(i)) return self._loads(k) - def _to_string(self, term: Node, txn: Optional[Any] = None) -> str: + def _to_string(self, term: Node, txn: Any | None = None) -> str: k = self._dumps(term) i = self.__k2i.get(k, txn=txn) if i is None: @@ -672,39 +662,44 @@ def _to_string(self, term: Node, txn: Optional[Any] = None) -> str: def __lookup( self, spo: _TriplePatternType, - context: Optional[_ContextType], - txn: Optional[Any] = None, - ) -> Tuple[db.DB, bytes, _FromKeyFunc, _ResultsFromKeyFunc]: - subject, predicate, object = spo + context: _ContextType | None, + txn: Any | None = None, + ) -> tuple[db.DB, bytes, _FromKeyFunc, _ResultsFromKeyFunc]: + subject, predicate, object_ = spo _to_string = self._to_string - # NOTE on type errors: this is because the same variable is used with different types. - if context is not None: - # type error: Incompatible types in assignment (expression has type "str", variable has type "Optional[Graph]") - context = _to_string(context, txn=txn) # type: ignore[assignment] + context_str: str | None = ( + None if context is None else _to_string(context, txn=txn) + ) i = 0 + subject_str: str | None + predicate_str: str | None + object_str: str | None if subject is not None: i += 1 - # type error: Incompatible types in assignment (expression has type "str", variable has type "Node") - subject = _to_string(subject, txn=txn) # type: ignore[assignment] + subject_str = _to_string(subject, txn=txn) + else: + subject_str = None if predicate is not None: i += 2 - # type error: Incompatible types in assignment (expression has type "str", variable has type "Node") - predicate = _to_string(predicate, txn=txn) # type: ignore[assignment] - if object is not None: + predicate_str = _to_string(predicate, txn=txn) + else: + predicate_str = None + if object_ is not None: i += 4 - # type error: Incompatible types in assignment (expression has type "str", variable has type "Node") - object = _to_string(object, txn=txn) # type: ignore[assignment] + object_str = _to_string(object_, txn=txn) + else: + object_str = None index, prefix_func, from_key, results_from_key = self.__lookup_dict[i] # print (subject, predicate, object), context, prefix_func, index # #DEBUG - # type error: Argument 1 has incompatible type "Tuple[Node, Node, Node]"; expected "Tuple[str, str, str]" + # type error: Argument 1 has incompatible type "tuple[Node, Node, Node]"; expected "tuple[str, str, str]" # type error: Argument 2 has incompatible type "Optional[Graph]"; expected "Optional[str]" - prefix = bb("^".join(prefix_func((subject, predicate, object), context))) # type: ignore[arg-type] + prefix = bb("^".join(prefix_func((subject_str, predicate_str, object_str), context_str))) # type: ignore[arg-type] return index, prefix, from_key, results_from_key def to_key_func(i: int) -> _ToKeyFunc: - def to_key(triple: Tuple[bytes, bytes, bytes], context: bytes) -> bytes: + def to_key(triple: tuple[bytes, bytes, bytes], context: bytes) -> bytes: "Takes a string; returns key" return "^".encode("latin-1").join( ( @@ -720,7 +715,7 @@ def to_key(triple: Tuple[bytes, bytes, bytes], context: bytes) -> bytes: def from_key_func(i: int) -> _FromKeyFunc: - def from_key(key: bytes) -> Tuple[bytes, bytes, bytes, bytes]: + def from_key(key: bytes) -> tuple[bytes, bytes, bytes, bytes]: "Takes a key; returns string" parts = key.split("^".encode("latin-1")) return ( @@ -738,11 +733,11 @@ def results_from_key_func( ) -> _ResultsFromKeyFunc: def from_key( key: bytes, - subject: Optional[Node], - predicate: Optional[Node], - object: Optional[Node], + subject: Node | None, + predicate: Node | None, + object: Node | None, contexts_value: bytes, - ) -> Tuple[Tuple[Node, Node, Node], Generator[Node, None, None]]: + ) -> tuple[tuple[Node, Node, Node], Generator[Node, None, None]]: "Takes a key and subject, predicate, object; returns tuple for yield" parts = key.split("^".encode("latin-1")) if subject is None: diff --git a/rdflib/plugins/stores/concurrent.py b/rdflib/plugins/stores/concurrent.py index 2d050954b..4203dd1ac 100644 --- a/rdflib/plugins/stores/concurrent.py +++ b/rdflib/plugins/stores/concurrent.py @@ -21,6 +21,8 @@ def __next__(self): class ConcurrentStore: + """A store that allows concurrent reads and writes.""" + def __init__(self, store): self.store = store diff --git a/rdflib/plugins/stores/memory.py b/rdflib/plugins/stores/memory.py index 7dc7c25ac..8a9a2da3f 100644 --- a/rdflib/plugins/stores/memory.py +++ b/rdflib/plugins/stores/memory.py @@ -2,17 +2,10 @@ # from __future__ import annotations +from collections.abc import Collection, Generator, Iterator, Mapping from typing import ( TYPE_CHECKING, Any, - Collection, - Dict, - Generator, - Iterator, - Mapping, - Optional, - Set, - Tuple, Union, overload, ) @@ -40,41 +33,40 @@ class SimpleMemory(Store): - """\ - A fast naive in memory implementation of a triple store. + """A fast naive in memory implementation of a triple store. This triple store uses nested dictionaries to store triples. Each - triple is stored in two such indices as follows spo[s][p][o] = 1 and - pos[p][o][s] = 1. + triple is stored in two such indices as follows `spo[s][p][o]` = 1 and + `pos[p][o][s]` = 1. Authors: Michel Pelletier, Daniel Krech, Stefan Niederhauser """ def __init__( self, - configuration: Optional[str] = None, - identifier: Optional[Identifier] = None, + configuration: str | None = None, + identifier: Identifier | None = None, ): super(SimpleMemory, self).__init__(configuration) self.identifier = identifier # indexed by [subject][predicate][object] - self.__spo: Dict[_SubjectType, Dict[_PredicateType, Dict[_ObjectType, int]]] = ( + self.__spo: dict[_SubjectType, dict[_PredicateType, dict[_ObjectType, int]]] = ( {} ) # indexed by [predicate][object][subject] - self.__pos: Dict[_PredicateType, Dict[_ObjectType, Dict[_SubjectType, int]]] = ( + self.__pos: dict[_PredicateType, dict[_ObjectType, dict[_SubjectType, int]]] = ( {} ) # indexed by [predicate][object][subject] - self.__osp: Dict[_ObjectType, Dict[_SubjectType, Dict[_PredicateType, int]]] = ( + self.__osp: dict[_ObjectType, dict[_SubjectType, dict[_PredicateType, int]]] = ( {} ) - self.__namespace: Dict[str, URIRef] = {} - self.__prefix: Dict[URIRef, str] = {} + self.__namespace: dict[str, URIRef] = {} + self.__prefix: dict[URIRef, str] = {} def add( self, @@ -82,9 +74,7 @@ def add( context: _ContextType, quoted: bool = False, ) -> None: - """\ - Add a triple to the store of triples. - """ + """Add a triple to the store of triples.""" # add dictionary entries for spo[s][p][p] = 1 and pos[p][o][s] # = 1, creating the nested dictionaries where they do not yet # exits. @@ -125,7 +115,7 @@ def add( def remove( self, triple_pattern: _TriplePatternType, - context: Optional[_ContextType] = None, + context: _ContextType | None = None, ) -> None: for (subject, predicate, object), c in list(self.triples(triple_pattern)): del self.__spo[subject][predicate][object] @@ -135,8 +125,8 @@ def remove( def triples( self, triple_pattern: _TriplePatternType, - context: Optional[_ContextType] = None, - ) -> Iterator[Tuple[_TripleType, Iterator[Optional[_ContextType]]]]: + context: _ContextType | None = None, + ) -> Iterator[tuple[_TripleType, Iterator[_ContextType | None]]]: """A generator over all the triples matching""" subject, predicate, object = triple_pattern if subject != ANY: # subject is given @@ -196,7 +186,7 @@ def triples( for o in subjectDictionary[p].keys(): yield (s, p, o), self.__contexts() - def __len__(self, context: Optional[_ContextType] = None) -> int: + def __len__(self, context: _ContextType | None = None) -> int: # @@ optimize i = 0 for triple in self.triples((None, None, None)): @@ -219,22 +209,22 @@ def bind(self, prefix: str, namespace: URIRef, override: bool = True) -> None: self.__prefix[namespace] = prefix self.__namespace[prefix] = namespace else: - # type error: Invalid index type "Optional[URIRef]" for "Dict[URIRef, str]"; expected type "URIRef" + # type error: Invalid index type "Optional[URIRef]" for "dict[URIRef, str]"; expected type "URIRef" self.__prefix[_coalesce(bound_namespace, namespace)] = _coalesce( # type: ignore[index] bound_prefix, default=prefix ) - # type error: Invalid index type "Optional[str]" for "Dict[str, URIRef]"; expected type "str" + # type error: Invalid index type "Optional[str]" for "dict[str, URIRef]"; expected type "str" self.__namespace[_coalesce(bound_prefix, prefix)] = _coalesce( # type: ignore[index] bound_namespace, default=namespace ) - def namespace(self, prefix: str) -> Optional[URIRef]: + def namespace(self, prefix: str) -> URIRef | None: return self.__namespace.get(prefix, None) - def prefix(self, namespace: URIRef) -> Optional[str]: + def prefix(self, namespace: URIRef) -> str | None: return self.__prefix.get(namespace, None) - def namespaces(self) -> Iterator[Tuple[str, URIRef]]: + def namespaces(self) -> Iterator[tuple[str, URIRef]]: for prefix, namespace in self.__namespace.items(): yield prefix, namespace @@ -270,8 +260,7 @@ def update( class Memory(Store): - """\ - An in memory implementation of a triple store. + """An in memory implementation of a triple store. Same as SimpleMemory above, but is Context-aware, Graph-aware, and Formula-aware Authors: Ashley Sommer @@ -283,36 +272,36 @@ class Memory(Store): def __init__( self, - configuration: Optional[str] = None, - identifier: Optional[Identifier] = None, + configuration: str | None = None, + identifier: Identifier | None = None, ): super(Memory, self).__init__(configuration) self.identifier = identifier # indexed by [subject][predicate][object] - self.__spo: Dict[_SubjectType, Dict[_PredicateType, Dict[_ObjectType, int]]] = ( + self.__spo: dict[_SubjectType, dict[_PredicateType, dict[_ObjectType, int]]] = ( {} ) # indexed by [predicate][object][subject] - self.__pos: Dict[_PredicateType, Dict[_ObjectType, Dict[_SubjectType, int]]] = ( + self.__pos: dict[_PredicateType, dict[_ObjectType, dict[_SubjectType, int]]] = ( {} ) # indexed by [predicate][object][subject] - self.__osp: Dict[_ObjectType, Dict[_SubjectType, Dict[_PredicateType, int]]] = ( + self.__osp: dict[_ObjectType, dict[_SubjectType, dict[_PredicateType, int]]] = ( {} ) - self.__namespace: Dict[str, URIRef] = {} - self.__prefix: Dict[URIRef, str] = {} - self.__context_obj_map: Dict[str, Graph] = {} - self.__tripleContexts: Dict[_TripleType, Dict[Optional[str], bool]] = {} - self.__contextTriples: Dict[Optional[str], Set[_TripleType]] = {None: set()} + self.__namespace: dict[str, URIRef] = {} + self.__prefix: dict[URIRef, str] = {} + self.__context_obj_map: dict[str, Graph] = {} + self.__tripleContexts: dict[_TripleType, dict[str | None, bool]] = {} + self.__contextTriples: dict[str | None, set[_TripleType]] = {None: set()} # all contexts used in store (unencoded) - self.__all_contexts: Set[Graph] = set() + self.__all_contexts: set[Graph] = set() # default context information for triples - self.__defaultContexts: Optional[Dict[Optional[str], bool]] = None + self.__defaultContexts: dict[str | None, bool] | None = None def add( self, @@ -320,9 +309,7 @@ def add( context: _ContextType, quoted: bool = False, ) -> None: - """\ - Add a triple to the store of triples. - """ + """Add a triple to the store of triples.""" # add dictionary entries for spo[s][p][p] = 1 and pos[p][o][s] # = 1, creating the nested dictionaries where they do not yet # exits. @@ -379,7 +366,7 @@ def add( def remove( self, triple_pattern: _TriplePatternType, - context: Optional[_ContextType] = None, + context: _ContextType | None = None, ) -> None: req_ctx = self.__ctx_to_str(context) for triple, c in self.triples(triple_pattern, context=context): @@ -417,9 +404,9 @@ def remove( def triples( self, triple_pattern: _TriplePatternType, - context: Optional[_ContextType] = None, + context: _ContextType | None = None, ) -> Generator[ - Tuple[_TripleType, Generator[Optional[_ContextType], None, None]], + tuple[_TripleType, Generator[_ContextType | None, None, None]], None, None, ]: @@ -437,7 +424,7 @@ def triples( # optimize "triple in graph" case (all parts given) elif subject is not None and predicate is not None and object_ is not None: - # type error: Incompatible types in assignment (expression has type "Tuple[Optional[IdentifiedNode], Optional[IdentifiedNode], Optional[Identifier]]", variable has type "Tuple[IdentifiedNode, IdentifiedNode, Identifier]") + # type error: Incompatible types in assignment (expression has type "tuple[Optional[IdentifiedNode], Optional[IdentifiedNode], Optional[Identifier]]", variable has type "tuple[IdentifiedNode, IdentifiedNode, Identifier]") # NOTE on type error: at this point, all elements of triple_pattern # is not None, so it has the same type as triple triple = triple_pattern # type: ignore[assignment] @@ -538,28 +525,28 @@ def bind(self, prefix: str, namespace: URIRef, override: bool = True) -> None: self.__prefix[namespace] = prefix self.__namespace[prefix] = namespace else: - # type error: Invalid index type "Optional[URIRef]" for "Dict[URIRef, str]"; expected type "URIRef" + # type error: Invalid index type "Optional[URIRef]" for "dict[URIRef, str]"; expected type "URIRef" self.__prefix[_coalesce(bound_namespace, namespace)] = _coalesce( # type: ignore[index] bound_prefix, default=prefix ) - # type error: Invalid index type "Optional[str]" for "Dict[str, URIRef]"; expected type "str" + # type error: Invalid index type "Optional[str]" for "dict[str, URIRef]"; expected type "str" # type error: Incompatible types in assignment (expression has type "Optional[URIRef]", target has type "URIRef") self.__namespace[_coalesce(bound_prefix, prefix)] = _coalesce( # type: ignore[index] bound_namespace, default=namespace ) - def namespace(self, prefix: str) -> Optional[URIRef]: + def namespace(self, prefix: str) -> URIRef | None: return self.__namespace.get(prefix, None) - def prefix(self, namespace: URIRef) -> Optional[str]: + def prefix(self, namespace: URIRef) -> str | None: return self.__prefix.get(namespace, None) - def namespaces(self) -> Iterator[Tuple[str, URIRef]]: + def namespaces(self) -> Iterator[tuple[str, URIRef]]: for prefix, namespace in self.__namespace.items(): yield prefix, namespace def contexts( - self, triple: Optional[_TripleType] = None + self, triple: _TripleType | None = None ) -> Generator[_ContextType, None, None]: if triple is None or triple == (None, None, None): return (context for context in self.__all_contexts) @@ -571,7 +558,7 @@ def contexts( except KeyError: return (_ for _ in []) - def __len__(self, context: Optional[_ContextType] = None) -> int: + def __len__(self, context: _ContextType | None = None) -> int: ctx = self.__ctx_to_str(context) if ctx not in self.__contextTriples: return 0 @@ -598,7 +585,7 @@ def __add_triple_context( self, triple: _TripleType, triple_exists: bool, - context: Optional[_ContextType], + context: _ContextType | None, quoted: bool, ) -> None: """add the given context to the set of contexts for the triple""" @@ -611,7 +598,7 @@ def __add_triple_context( except KeyError: # triple exists with default ctx info # start with a copy of the default ctx info - # type error: Item "None" of "Optional[Dict[Optional[str], bool]]" has no attribute "copy" + # type error: Item "None" of "Optional[dict[Optional[str], bool]]" has no attribute "copy" triple_context = self.__tripleContexts[triple] = ( self.__defaultContexts.copy() # type: ignore[union-attr] ) @@ -649,27 +636,27 @@ def __add_triple_context( def __get_context_for_triple( self, triple: _TripleType, skipQuoted: bool = False # noqa: N803 - ) -> Collection[Optional[str]]: + ) -> Collection[str | None]: """return a list of contexts (str) for the triple, skipping quoted contexts if skipQuoted==True""" ctxs = self.__tripleContexts.get(triple, self.__defaultContexts) if not skipQuoted: - # type error: Item "None" of "Optional[Dict[Optional[str], bool]]" has no attribute "keys" + # type error: Item "None" of "Optional[dict[Optional[str], bool]]" has no attribute "keys" return ctxs.keys() # type: ignore[union-attr] - # type error: Item "None" of "Optional[Dict[Optional[str], bool]]" has no attribute "items" + # type error: Item "None" of "Optional[dict[Optional[str], bool]]" has no attribute "items" return [ctx for ctx, quoted in ctxs.items() if not quoted] # type: ignore[union-attr] - def __triple_has_context(self, triple: _TripleType, ctx: Optional[str]) -> bool: + def __triple_has_context(self, triple: _TripleType, ctx: str | None) -> bool: """return True if the triple exists in the given context""" - # type error: Unsupported right operand type for in ("Optional[Dict[Optional[str], bool]]") + # type error: Unsupported right operand type for in ("Optional[dict[Optional[str], bool]]") return ctx in self.__tripleContexts.get(triple, self.__defaultContexts) # type: ignore[operator] def __remove_triple_context(self, triple: _TripleType, ctx): """remove the context from the triple""" - # type error: Item "None" of "Optional[Dict[Optional[str], bool]]" has no attribute "copy" + # type error: Item "None" of "Optional[dict[Optional[str], bool]]" has no attribute "copy" ctxs = self.__tripleContexts.get(triple, self.__defaultContexts).copy() # type: ignore[union-attr] del ctxs[ctx] if ctxs == self.__defaultContexts: @@ -684,7 +671,7 @@ def __ctx_to_str(self, ctx: _ContextType) -> str: ... @overload def __ctx_to_str(self, ctx: None) -> None: ... - def __ctx_to_str(self, ctx: Optional[_ContextType]) -> Optional[str]: + def __ctx_to_str(self, ctx: _ContextType | None) -> str | None: if ctx is None: return None try: diff --git a/rdflib/plugins/stores/sparqlconnector.py b/rdflib/plugins/stores/sparqlconnector.py index e2bb83909..1b5b580d3 100644 --- a/rdflib/plugins/stores/sparqlconnector.py +++ b/rdflib/plugins/stores/sparqlconnector.py @@ -4,7 +4,7 @@ import copy import logging from io import BytesIO -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING from urllib.error import HTTPError from urllib.parse import urlencode from urllib.request import Request, urlopen @@ -17,6 +17,9 @@ if TYPE_CHECKING: import typing_extensions as te + SUPPORTED_METHODS = te.Literal["GET", "POST", "POST_FORM"] + SUPPORTED_FORMATS = te.Literal["xml", "json", "csv", "tsv", "application/rdf+xml"] + class SPARQLConnectorException(Exception): # noqa: N818 pass @@ -39,11 +42,11 @@ class SPARQLConnector: def __init__( self, - query_endpoint: Optional[str] = None, - update_endpoint: Optional[str] = None, - returnFormat: str = "xml", # noqa: N803 - method: te.Literal["GET", "POST", "POST_FORM"] = "GET", - auth: Optional[Tuple[str, str]] = None, + query_endpoint: str | None = None, + update_endpoint: str | None = None, + returnFormat: SUPPORTED_FORMATS = "xml", # noqa: N803 + method: SUPPORTED_METHODS = "GET", + auth: tuple[str, str] | None = None, **kwargs, ): """ @@ -84,8 +87,8 @@ def method(self, method: str) -> None: def query( self, query: str, - default_graph: Optional[str] = None, - named_graph: Optional[str] = None, + default_graph: str | None = None, + named_graph: str | None = None, ) -> Result: if not self.query_endpoint: raise SPARQLConnectorException("Query endpoint not set!") @@ -155,8 +158,8 @@ def query( def update( self, query: str, - default_graph: Optional[str] = None, - named_graph: Optional[str] = None, + default_graph: str | None = None, + named_graph: str | None = None, ) -> None: if not self.update_endpoint: raise SPARQLConnectorException("Query endpoint not set!") diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py index f9827cf94..b544c88a2 100644 --- a/rdflib/plugins/stores/sparqlstore.py +++ b/rdflib/plugins/stores/sparqlstore.py @@ -1,36 +1,37 @@ """ This is an RDFLib store around Ivan Herman et al.'s SPARQL service wrapper. This was first done in layer-cake, and then ported to RDFLib - """ from __future__ import annotations import collections import re +from collections.abc import Callable from typing import ( TYPE_CHECKING, Any, - Callable, - Dict, - Generator, - Iterable, - Iterator, - List, - Mapping, - Optional, - Tuple, Union, + cast, overload, ) from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Graph from rdflib.plugins.stores.regexmatching import NATIVE_REGEX from rdflib.store import Store -from rdflib.term import BNode, Identifier, Node, URIRef, Variable +from rdflib.term import ( + BNode, + IdentifiedNode, + Identifier, + Literal, + Node, + URIRef, + Variable, +) if TYPE_CHECKING: import typing_extensions as te # noqa: I001 + from collections.abc import Mapping, Iterator, Iterable, Generator from rdflib.graph import ( _TripleType, _ContextType, @@ -43,6 +44,7 @@ ) from rdflib.plugins.sparql.sparql import Query, Update from rdflib.query import Result, ResultRow + from .sparqlconnector import SUPPORTED_FORMATS, SUPPORTED_METHODS from .sparqlconnector import SPARQLConnector @@ -53,7 +55,7 @@ BNODE_IDENT_PATTERN = re.compile(r"(?P