From 8d1a5f53d080f9e5a5917c3a9ef70400933dc390 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 00:06:27 +0100 Subject: [PATCH 01/42] ci: Add Ruff linting --- .github/workflows/{flake8.yml => lint.yml} | 14 ++++++++------ pyproject.toml | 12 ++++++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) rename .github/workflows/{flake8.yml => lint.yml} (75%) diff --git a/.github/workflows/flake8.yml b/.github/workflows/lint.yml similarity index 75% rename from .github/workflows/flake8.yml rename to .github/workflows/lint.yml index 88166dd5af..f96eaf430b 100644 --- a/.github/workflows/flake8.yml +++ b/.github/workflows/lint.yml @@ -1,4 +1,4 @@ -name: Flake8 +name: Lint permissions: contents: read @@ -18,10 +18,8 @@ on: - main jobs: - flake8: - + codelint: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v6 - name: Set up Python 3.10 @@ -31,8 +29,12 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8-pyproject + pip install flake8-pyproject ruff - - name: Lint with flake8 + - name: Lint codebase with flake8 run: | flake8 --builtins=ArgumentError . + + - name: Lint codebase with ruff + run: | + ruff check --preview --output-format github diff --git a/pyproject.toml b/pyproject.toml index afa0c02704..bdb0c01d3a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,18 @@ exclude = ["binder", "docker", "docs", "tests", "examples"] [tool.setuptools_scm] fallback_version = "0+untagged" +[tool.ruff] +line-length = 90 + +[tool.ruff.lint] +# Add the following rule sets +extend-select = ["E", "W", "F", "B", "UP", "SIM", "I"] +# But ignore these inconvenient rules +ignore = ["F403", "E226", "E731", "E275", "F405", "E722", "E741", "W605"] + +[tool.ruff.lint.isort] +known-first-party = ["devito", "examples"] + [tool.flake8] max-line-length = 90 ignore = [ From dc370e4a470c8b35cd958aec56ee75afe8cc544d Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 00:08:14 +0100 Subject: [PATCH 02/42] ci: Add actionlint and hadolint for good measure --- .github/workflows/lint.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f96eaf430b..ac95c45ef5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -38,3 +38,35 @@ jobs: - name: Lint codebase with ruff run: | ruff check --preview --output-format github + + actionlint: + name: "Lint Github actions YAML files" + # There's a way to add error formatting so GH actions adds messages to code, + # but I can't work out the right number of quotes to get it to work + # https://github.com/rhysd/actionlint/blob/main/docs/usage.md + # #example-error-annotation-on-github-actions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Check workflow files + uses: docker://rhysd/actionlint:latest + with: + args: -color + + dockerlint: + name: "Lint dockerfiles" + runs-on: ubuntu-latest + container: + image: hadolint/hadolint:latest-alpine + env: + HADOLINT_IGNORE: "DL3005,DL3007,DL3008,DL3015,DL3059" + steps: + - uses: actions/checkout@v3 + - name: Lint dockerfiles inside hadolint container + run: | + for DOCKERFILE in docker/Dockerfile.*; \ + do \ + echo " Linting $DOCKERFILE"; \ + hadolint "$DOCKERFILE" \ + || exit 1; \ + done From 1783147bc45ac0d92513ddfa96cb41bf0c63321a Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 00:32:27 +0100 Subject: [PATCH 03/42] ci: Give codelint a name --- .github/workflows/lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ac95c45ef5..67aa0423be 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,6 +19,7 @@ on: jobs: codelint: + name: "Lint the codebase" runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 From b23eeaca2b2731a7ef6d5ccf5ae395788603acb0 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 13:35:43 +0100 Subject: [PATCH 04/42] misc: Replace ruff I rules with more specific isort rules --- .github/workflows/lint.yml | 6 +++++- pyproject.toml | 9 ++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 67aa0423be..530f59dfc0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -30,12 +30,16 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8-pyproject ruff + pip install flake8-pyproject isort ruff - name: Lint codebase with flake8 run: | flake8 --builtins=ArgumentError . + - name: Lint the Python imports with isort + run: | + isort --check-only . + - name: Lint codebase with ruff run: | ruff check --preview --output-format github diff --git a/pyproject.toml b/pyproject.toml index bdb0c01d3a..51b4b333f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,12 +74,15 @@ line-length = 90 [tool.ruff.lint] # Add the following rule sets -extend-select = ["E", "W", "F", "B", "UP", "SIM", "I"] +select = ["E", "W", "F", "B", "UP", "SIM", "RUF022"] # But ignore these inconvenient rules ignore = ["F403", "E226", "E731", "E275", "F405", "E722", "E741", "W605"] -[tool.ruff.lint.isort] -known-first-party = ["devito", "examples"] +[tool.isort] +line_length = 90 +known_first_party = ["devito", "examples"] +multi_line_output = "VERTICAL_GRID_GROUPED" +force_alphabetical_sort_within_sections = true [tool.flake8] max-line-length = 90 From 089067ee65fe46584948e39cd69a800c6d4f45fe Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 18:35:08 +0100 Subject: [PATCH 05/42] misc: Add support for command line spellchequing with typo --- .github/workflows/lint.yml | 9 +++++++++ pyproject.toml | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 530f59dfc0..a2d7c11267 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -44,6 +44,15 @@ jobs: run: | ruff check --preview --output-format github + spellcheck: + name: "Spellcheck everything" + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + - name: Spell Check Repo + uses: crate-ci/typos@v1.38.1 + actionlint: name: "Lint Github actions YAML files" # There's a way to add error formatting so GH actions adds messages to code, diff --git a/pyproject.toml b/pyproject.toml index 51b4b333f9..52e518dcb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,6 +84,24 @@ known_first_party = ["devito", "examples"] multi_line_output = "VERTICAL_GRID_GROUPED" force_alphabetical_sort_within_sections = true +[tool.typos] +# For identifiers (variables, functions, classes and the like): +# Ignore words 2 characters or fewer followed by zero or more digits +# Also ignore collections of 20 or more "word character"s +default.extend-ignore-identifiers-re = ["\\b[[:alpha:]]{1,2}\\d*\\b", "\\b\\w{20,}\\b"] +# For words: +# Ignore words 2 characters or fewer followed by one or more digits +# Also ignore collections of 20 or more "word character"s +default.extend-ignore-words-re = ["\\b[[:alpha:]]{1,2}\\d?\\b", "\\b\\w{20,}\\b"] + +# Only for known words common abbreviations +# IMPORTANT: Add a comment with the meaning! +[tool.typos.default.extend-identifiers] +# arange - numpy.arange +arange = "arange" +# dorder - derivative order +dorder = "dorder" + [tool.flake8] max-line-length = 90 ignore = [ From 42b7275815d180767f8825ca300f75ba66318c9e Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 18:41:55 +0100 Subject: [PATCH 06/42] misc: Experiment --- .github/workflows/lint.yml | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a2d7c11267..3fb1b6e852 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -48,10 +48,19 @@ jobs: name: "Spellcheck everything" runs-on: ubuntu-latest steps: - - name: Checkout Actions Repository - uses: actions/checkout@v4 - - name: Spell Check Repo - uses: crate-ci/typos@v1.38.1 + - uses: actions/checkout@v5 + - name: Set up Python 3.10 + uses: actions/setup-python@v6 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install typos + + - name: Lint codebase with flake8 + run: | + typos --format sarif actionlint: name: "Lint Github actions YAML files" From ef9440ef9f4a93c3a0261d2a8b598edfea035c79 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 18:47:36 +0100 Subject: [PATCH 07/42] misc: Deliberately add a file with a typo --- experiment.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 experiment.txt diff --git a/experiment.txt b/experiment.txt new file mode 100644 index 0000000000..3b4d1bd3df --- /dev/null +++ b/experiment.txt @@ -0,0 +1 @@ +Add a fille with a typo in it. From bed65d7a19ea2bd65784b376bee9ed03e3bd1346 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 19:15:56 +0100 Subject: [PATCH 08/42] ci: Let's break everything by adding typo annotations --- .github/workflows/lint.yml | 2 +- scripts/typos_json_to_gha.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 scripts/typos_json_to_gha.py diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3fb1b6e852..dffd80c2f5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -60,7 +60,7 @@ jobs: - name: Lint codebase with flake8 run: | - typos --format sarif + typos --format json | python scripts/typos_json_to_gha.py actionlint: name: "Lint Github actions YAML files" diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py new file mode 100644 index 0000000000..58944f4bb1 --- /dev/null +++ b/scripts/typos_json_to_gha.py @@ -0,0 +1,28 @@ +import json +import sys +from string import Template + + +def main(): + # Standard Github message template for CI annotations + message_template = Template( + '::error file=$path,line=$line_num,col=$byte_offset,endcol=$end_col,' + 'title=$type::`$typo` should be $suggestions' + ) + + for line in sys.stdin: + # Grab the JSON data coming from typos from stdin + data = json.loads(line.rstrip()) + + # Calculate the end column and format the correction + end_col = data['byte_offset'] + len(data['typo']) + suggestions = ', '.join(data['corrections']) + + # Print the templated message to stdout + print(message_template.safe_substitute( + data, end_col=end_col, suggestions=suggestions + )) + + +if __name__ == '__main__': + exit(main()) From cad17c1e7235873ba2808a7ebf7f5f2319a370c8 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 19:27:04 +0100 Subject: [PATCH 09/42] scripts: Fix gha helper script --- scripts/typos_json_to_gha.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py index 58944f4bb1..b474e469b4 100644 --- a/scripts/typos_json_to_gha.py +++ b/scripts/typos_json_to_gha.py @@ -14,14 +14,25 @@ def main(): # Grab the JSON data coming from typos from stdin data = json.loads(line.rstrip()) - # Calculate the end column and format the correction - end_col = data['byte_offset'] + len(data['typo']) - suggestions = ', '.join(data['corrections']) - - # Print the templated message to stdout - print(message_template.safe_substitute( - data, end_col=end_col, suggestions=suggestions - )) + if data['type'] == 'binary_file': + continue + + try: + # Calculate the end column and format the correction + suggestions = ', '.join(data['corrections']) + end_col = data['byte_offset'] + len(data['typo']) + + # Print the templated message to stdout + print(message_template.safe_substitute( + data, end_col=end_col, suggestions=suggestions + )) + except KeyError: + print('KeyError') + print(f'{data}') + except Exception as e: + print('Caught unhandled exception') + print(f'{data}') + print(f'{e}') if __name__ == '__main__': From 4f4b98d00acdaf5eff39d917ee98476ad96690f9 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 19:33:28 +0100 Subject: [PATCH 10/42] scripts: Return non-zero error code, unless stdin is empty --- scripts/typos_json_to_gha.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py index b474e469b4..62db76f708 100644 --- a/scripts/typos_json_to_gha.py +++ b/scripts/typos_json_to_gha.py @@ -4,6 +4,8 @@ def main(): + error_code = 0 + # Standard Github message template for CI annotations message_template = Template( '::error file=$path,line=$line_num,col=$byte_offset,endcol=$end_col,' @@ -11,6 +13,7 @@ def main(): ) for line in sys.stdin: + error_code = 1 # Grab the JSON data coming from typos from stdin data = json.loads(line.rstrip()) @@ -34,6 +37,8 @@ def main(): print(f'{data}') print(f'{e}') + return error_code + if __name__ == '__main__': exit(main()) From 87657f447a4036e12479eeff3e1180d6e92077cf Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 19:45:27 +0100 Subject: [PATCH 11/42] misc: Typos (ironically) --- .github/workflows/lint.yml | 2 +- scripts/typos_json_to_gha.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index dffd80c2f5..f16d3f15a6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -58,7 +58,7 @@ jobs: python -m pip install --upgrade pip pip install typos - - name: Lint codebase with flake8 + - name: Spellcheck the codebase with typos run: | typos --format json | python scripts/typos_json_to_gha.py diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py index 62db76f708..1f5fc72bc7 100644 --- a/scripts/typos_json_to_gha.py +++ b/scripts/typos_json_to_gha.py @@ -8,7 +8,7 @@ def main(): # Standard Github message template for CI annotations message_template = Template( - '::error file=$path,line=$line_num,col=$byte_offset,endcol=$end_col,' + '::error file=$path,line=$line_num,col=$byte_offset,endColumn=$end_col,' 'title=$type::`$typo` should be $suggestions' ) From e4914cad314452b61add2e5f8fd637c24236d089 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 17 Oct 2025 19:58:41 +0100 Subject: [PATCH 12/42] misc: Remove ./ prefix from path --- scripts/typos_json_to_gha.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py index 1f5fc72bc7..c7be78ecc6 100644 --- a/scripts/typos_json_to_gha.py +++ b/scripts/typos_json_to_gha.py @@ -8,7 +8,7 @@ def main(): # Standard Github message template for CI annotations message_template = Template( - '::error file=$path,line=$line_num,col=$byte_offset,endColumn=$end_col,' + '::error file=$xpath,line=$line_num,col=$byte_offset,endColumn=$end_col,' 'title=$type::`$typo` should be $suggestions' ) @@ -24,10 +24,12 @@ def main(): # Calculate the end column and format the correction suggestions = ', '.join(data['corrections']) end_col = data['byte_offset'] + len(data['typo']) + # Remove './' from the start of the path + xpath = data['path'].removeprefix('./') # Print the templated message to stdout print(message_template.safe_substitute( - data, end_col=end_col, suggestions=suggestions + data, xpath=xpath, end_col=end_col, suggestions=suggestions )) except KeyError: print('KeyError') From 7d6f48ee9983a376eb54bc75bbf4f52c1d167eab Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Thu, 13 Nov 2025 18:04:16 +0000 Subject: [PATCH 13/42] misc: Prevent isort from sorting __init__.py --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 52e518dcb8..6375945d87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,6 +83,8 @@ line_length = 90 known_first_party = ["devito", "examples"] multi_line_output = "VERTICAL_GRID_GROUPED" force_alphabetical_sort_within_sections = true +# We should really do some work to eliminate this (circular imports): +skip = ["__init__.py"] [tool.typos] # For identifiers (variables, functions, classes and the like): From a1ea4344a8b25766524b02fb45f17b2a37399fc4 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Thu, 13 Nov 2025 18:26:06 +0000 Subject: [PATCH 14/42] misc: Change overly strict isort rule --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6375945d87..9732a44a56 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,6 @@ ignore = ["F403", "E226", "E731", "E275", "F405", "E722", "E741", "W605"] line_length = 90 known_first_party = ["devito", "examples"] multi_line_output = "VERTICAL_GRID_GROUPED" -force_alphabetical_sort_within_sections = true # We should really do some work to eliminate this (circular imports): skip = ["__init__.py"] From e200b51fad2e56be13c70167437a97380c89852f Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Thu, 13 Nov 2025 14:49:35 +0000 Subject: [PATCH 15/42] ci: Shutup CodeQL --- .github/workflows/lint.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f16d3f15a6..f9d87b5738 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,4 @@ name: Lint - permissions: contents: read From da4d644a47cfd5076516d7963162a82a866d2002 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Thu, 13 Nov 2025 16:31:01 +0000 Subject: [PATCH 16/42] misc: Add a configuration for running pre-commit hooks --- .pre-commit-config.yaml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..58af58f97b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,35 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + # Run isort to check only (don't modify files) + - id: isort + args: [ --check-only ] + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.14.4 + hooks: + # Run the linter to check only (don't modify files) + - id: ruff-check + - repo: https://github.com/crate-ci/typos + rev: v1.39.1 + hooks: + - id: typos + args: [] + - repo: https://github.com/rhysd/actionlint + rev: v1.7.8 + hooks: + - id: actionlint-docker + - repo: https://github.com/hadolint/hadolint + rev: v2.12.0 + hooks: + - id: hadolint-docker From 941a50fbb559141b1ef04f1f9a4c84c5296ca984 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Thu, 13 Nov 2025 16:49:57 +0000 Subject: [PATCH 17/42] lint: Remove all the trailing whitespace --- CITATION.md | 4 ++-- README.md | 4 ++-- auto_lint.sh | 22 ++++++++++++++++++++++ docker/Dockerfile.amd | 4 ++-- docker/Dockerfile.cpu | 2 +- docker/Dockerfile.nvidia | 4 ++-- docker/README.md | 4 ++-- docker/Singularity.nvidia.def | 24 ++++++++++++------------ docker/run-jupyterlab.sh | 2 +- examples/README.md | 2 +- examples/seismic/abc_methods/README.md | 4 ++-- examples/seismic/self_adjoint/README.md | 6 +++--- 12 files changed, 52 insertions(+), 30 deletions(-) create mode 100755 auto_lint.sh diff --git a/CITATION.md b/CITATION.md index 6230370506..9e8d183d29 100644 --- a/CITATION.md +++ b/CITATION.md @@ -41,8 +41,8 @@ Additionally, if you use Devito for distributed runs, you may want to cite the f @misc{bisbas2024automatedmpixcodegeneration, title={Automated MPI-X code generation for scalable finite-difference solvers}, author={Bisbas, George and Nelson, Rhodri and Louboutin, Mathias and Luporini, Fabio and Kelly, Paul H.J. and Gorman, Gerard}, - booktitle={2025 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, - title={Automated MPI-X Code Generation for Scalable Finite-Difference Solvers}, + booktitle={2025 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, + title={Automated MPI-X Code Generation for Scalable Finite-Difference Solvers}, year={2025}, volume={}, number={}, diff --git a/README.md b/README.md index 0e2155fc7b..e234164857 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,7 @@ provided with Devito or create your own notebooks. instructions and other options. If you encounter a problem during installation, please see the [installation issues](https://github.com/devitocodes/devito/wiki/Installation-Issues) we -have seen in the past. +have seen in the past. ## Resources @@ -126,4 +126,4 @@ are facing issues or just trying it out, join the [conversation](https://join.slack.com/t/devitocodes/shared_invite/zt-2hgp6891e-jQDcepOWPQwxL5JJegYKSA). ## Interactive jupyter notebooks -The tutorial jupyter notebook are available interactively at the public [binder](https://mybinder.org/v2/gh/devitocodes/devito/main) jupyterhub. +The tutorial jupyter notebook are available interactively at the public [binder](https://mybinder.org/v2/gh/devitocodes/devito/main) jupyterhub. diff --git a/auto_lint.sh b/auto_lint.sh new file mode 100755 index 0000000000..1281fd1ce4 --- /dev/null +++ b/auto_lint.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +pre-commit run --all-files trailing-whitespace +git add --all +git commit --no-verify -m "lint: Remove all the trailing whitespace" + +pre-commit run --all-files end-of-file-fixer +git add --all +git commit --no-verify -m "lint: Fix ends of files" + +isort . +git add --all +git commit --no-verify -m "lint: Re-sort all imports with new isort rules" + +ruff check --fix +git add --all +git commit --no-verify -m "lint: First pass with ruff --fix" + +# Don't run these +# ruff check --fix --unsafe-fixes +# git add --all +# git commit --no-verify -m "lint: Second pass with ruff --fix --unsafe-fixes" diff --git a/docker/Dockerfile.amd b/docker/Dockerfile.amd index 907d6d9c06..67b65151f2 100644 --- a/docker/Dockerfile.amd +++ b/docker/Dockerfile.amd @@ -84,8 +84,8 @@ RUN rm -rf /tmp/ucx && rm -rf /tmp/ompi # Adding OpenMPI and UCX to Environment ENV PATH=$OMPI_HOME/bin:$UCX_HOME/bin:$PATH \ LD_LIBRARY_PATH=$OMPI_HOME/lib:$UCX_HOME/lib:$LD_LIBRARY_PATH \ - C_INCLUDE_PATH=$OMPI_HOME/include:$UCX_HOME/include:$C_INCLUDE_PATH \ - CPLUS_INCLUDE_PATH=$OMPI_HOME/include:$UCX_HOME/include:$CPLUS_INCLUDE_PATH \ + C_INCLUDE_PATH=$OMPI_HOME/include:$UCX_HOME/include:$C_INCLUDE_PATH \ + CPLUS_INCLUDE_PATH=$OMPI_HOME/include:$UCX_HOME/include:$CPLUS_INCLUDE_PATH \ CPATH=$OMPI_HOME/include:$UCX_HOME/include:$CPATH \ INCLUDE=$OMPI_HOME/include:$UCX_HOME/include:$INCLUDE \ PKG_CONFIG_PATH=$OMPI_HOME/lib/pkgconfig:$UCX_HOME/lib/pkgconfig:$PKG_CONFIG_PATH diff --git a/docker/Dockerfile.cpu b/docker/Dockerfile.cpu index 43cf96b18c..7e27266a09 100644 --- a/docker/Dockerfile.cpu +++ b/docker/Dockerfile.cpu @@ -4,7 +4,7 @@ # architectures using GCC compilers and OpenMPI. ############################################################## -# Base image +# Base image FROM ubuntu:22.04 AS base ARG gcc="" diff --git a/docker/Dockerfile.nvidia b/docker/Dockerfile.nvidia index a8adfe329a..66e4cba5e7 100644 --- a/docker/Dockerfile.nvidia +++ b/docker/Dockerfile.nvidia @@ -121,8 +121,8 @@ RUN echo "$HPCSDK_HOME/cuda/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ echo "$HPCSDK_HOME/compilers/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ echo "$HPCSDK_HOME/comm_libs/mpi/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ echo "$HPCSDK_CUPTI/lib64" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "$HPCSDK_HOME/math_libs/lib64" >> /etc/ld.so.conf.d/nvidia.conf - + echo "$HPCSDK_HOME/math_libs/lib64" >> /etc/ld.so.conf.d/nvidia.conf + # Compiler, CUDA, and Library paths # CUDA_HOME has been deprecated but keep for now because of other dependencies (@mloubout). ENV CUDA_HOME=$HPCSDK_HOME/cuda diff --git a/docker/README.md b/docker/README.md index 33f863dd01..3903e92ebd 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,6 +1,6 @@ # [Devito] Docker image library -In order to facilitate the dissemination, usage, and development of Devito, we provide a series of Docker images. These images support numerous architectures and compilers and are tagged accordingly. You can find all the available images at [DevitoHub](https://hub.docker.com/r/devitocodes/). The following describes the available images and the workflow to build it yourself. +In order to facilitate the dissemination, usage, and development of Devito, we provide a series of Docker images. These images support numerous architectures and compilers and are tagged accordingly. You can find all the available images at [DevitoHub](https://hub.docker.com/r/devitocodes/). The following describes the available images and the workflow to build it yourself. ## [Devito] images @@ -17,7 +17,7 @@ These images provide a working environment for any CPU architecture and come wit To run this image locally, you will first need to install `docker`. Then, the following commands will get you started: ```bash -# Pull image and start a bash shell +# Pull image and start a bash shell docker run --rm -it -p 8888:8888 -p 8787:8787 -p 8786:8786 devitocodes/devito:gcc-latest /bin/bash docker run --rm -it -p 8888:8888 -p 8787:8787 -p 8786:8786 --device=/dev/infiniband/uverbs0 --device=/dev/infiniband/rdma_cm devitocodes/devito:gcc-latest /bin/bash diff --git a/docker/Singularity.nvidia.def b/docker/Singularity.nvidia.def index 2b450b5823..3530a0016b 100644 --- a/docker/Singularity.nvidia.def +++ b/docker/Singularity.nvidia.def @@ -3,14 +3,14 @@ From: python:3.8 %help ############################################################## -# This Dockerfile contains the additional NVIDIA compilers, -# libraries, and plugins to enable OpenACC and NVIDIA GPU +# This Dockerfile contains the additional NVIDIA compilers, +# libraries, and plugins to enable OpenACC and NVIDIA GPU # acceleration of Devito codes. # -# BUILD: +# BUILD: # singularity build --fakeroot devito.nvidia.sif docker/Singularity.nvidia.def # -# RUN: +# RUN: # singularity run --nv --writable-tmpfs devito.nvidia.sif ############################################################## @@ -75,11 +75,11 @@ export DEVITO_LANGUAGE="openacc" export DEVITO_PLATFORM=nvidiaX # Options: [unset, 1] For PGI openacc; Should only be set after a first execution of the benchmark -# export DEVITO_JIT_BACKDOOR=1 +# export DEVITO_JIT_BACKDOOR=1 # Enable logging, Options: [unset, PERF, DEBUG] export DEVITO_LOGGING=DEBUG -#export PGI_ACC_TIME=1 +#export PGI_ACC_TIME=1 # Set the home directory to our app user's home. export HOME=/app @@ -89,7 +89,7 @@ export APP_HOME=/app export DEBIAN_FRONTEND=noninteractive -# nodesource: nvdashboard requires nodejs>=10 +# nodesource: nvdashboard requires nodejs>=10 echo 'deb [trusted=yes] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' > /etc/apt/sources.list.d/nvhpc.list && \ apt-get update -y && \ apt-get install -y -q \ @@ -113,7 +113,7 @@ rm -rf /var/lib/apt/lists/* export HPCSDK_HOME=/opt/nvidia/hpc_sdk/Linux_x86_64/2022 export HPCSDK_CUPTI=/opt/nvidia/hpc_sdk/Linux_x86_64/2022/cuda/11.6/extras/CUPTI - + # Compiler, CUDA, and Library paths export CUDA_HOME=$HPCSDK_HOME/cuda export NVHPC_CUDA_HOME=$HPCSDK_HOME/cuda @@ -128,7 +128,7 @@ python3 -m venv /venv && \ /venv/bin/pip install --no-cache-dir -r /app/requirements-optional.txt && \ /venv/bin/pip install --no-cache-dir -r /app/requirements-nvidia.txt && \ rm -rf ~/.cache/pip - + # MPI ROOT USER DEFAULTS export CPATH=$HPCSDK_HOME/comm_libs/mpi/include:${CPATH} export CFLAGS=-noswitcherror @@ -137,10 +137,10 @@ export CFLAGS=-noswitcherror # Do Nothing #MPI 4 #rm -f $HPCSDK_HOME/comm_libs/mpi && \ -#ln -sf $HPCSDK_HOME/comm_libs/openmpi4/openmpi-4.0.5 $HPCSDK_HOME/comm_libs/mpi ; +#ln -sf $HPCSDK_HOME/comm_libs/openmpi4/openmpi-4.0.5 $HPCSDK_HOME/comm_libs/mpi ; #HPCX rm -f $HPCSDK_HOME/comm_libs/mpi && \ -ln -sf $HPCSDK_HOME/comm_libs/hpcx/latest/ompi $HPCSDK_HOME/comm_libs/mpi ; +ln -sf $HPCSDK_HOME/comm_libs/hpcx/latest/ompi $HPCSDK_HOME/comm_libs/mpi ; /venv/bin/pip install --no-cache-dir -r /app/requirements-mpi.txt && \ rm -rf ~/.cache/pip @@ -149,7 +149,7 @@ export CFLAGS= chmod -R 755 /app chmod 777 /app chmod 777 /print-defaults /jupyter /tests /entrypoint.sh && \ - /venv/bin/jupyter serverextension enable dask_labextension + /venv/bin/jupyter serverextension enable dask_labextension # /venv/bin/jupyter lab workspaces import /app/nvdashboard.json %runscript diff --git a/docker/run-jupyterlab.sh b/docker/run-jupyterlab.sh index e1516dd51f..e50a65aca7 100644 --- a/docker/run-jupyterlab.sh +++ b/docker/run-jupyterlab.sh @@ -1,3 +1,3 @@ #!/usr/bin/env bash -/venv/bin/jupyter-lab --ip=0.0.0.0 --port=8888 --allow-root --NotebookApp.token='' +/venv/bin/jupyter-lab --ip=0.0.0.0 --port=8888 --allow-root --NotebookApp.token='' diff --git a/examples/README.md b/examples/README.md index 391c7d1e4b..3960faae88 100644 --- a/examples/README.md +++ b/examples/README.md @@ -38,7 +38,7 @@ A set of more advanced examples are available in `seismic`: equations. * `seismic/self-adjoint`: Self-adjoint energy conserving pseudo-acoustic operators, including notebooks for implementation of the nonlinear forward, - the forward and adjoint linearized Jacobian, and tests proving accuracy and + the forward and adjoint linearized Jacobian, and tests proving accuracy and correctness. Further: diff --git a/examples/seismic/abc_methods/README.md b/examples/seismic/abc_methods/README.md index 7adcafb8d6..9e633c4e32 100644 --- a/examples/seismic/abc_methods/README.md +++ b/examples/seismic/abc_methods/README.md @@ -4,7 +4,7 @@ Institute of Mathematics and Statistics - Applied Mathematics Department (felipe.augusto.guedes@gmail.com, saulo@ime.usp.br, pedrosp@ime.usp.br) -**Important Informations:** These notebooks are part of the Project Software Technologies for Modeling and Inversion (STMI) at RCGI in the University of Sao Paulo. +**Important Informations:** These notebooks are part of the Project Software Technologies for Modeling and Inversion (STMI) at RCGI in the University of Sao Paulo. The objective of these notebooks is to present several schemes which are designed to reduce artificial reflections on boundaries in the numerical solution of the acoustic wave equation with finite differences. We consider several methods, covering absorbing boundary conditions and absorbing boundary layers. Among the schemes, we have implemented: @@ -21,6 +21,6 @@ The computational implementation of the methods above is done within the framewo - 3. PML implementation; - 4. HABC (Hybrid absorbing boundary conditions. These encompass also the absorbing boundary conditions A1, A2 and Higdon).; -The notebooks bring a theoretical description of the methods together with the Devito implementation, which can be used for the simulations of interest. We choose a reference problem, described in the notebook Introduction to Acoustic Problem. The spatial and temporal discretizations used throughout the notebooks are also presented in this introductory notebook, together with other relevant concepts to be used overall. Therefore, one should first assimilate the contents of this notebook. +The notebooks bring a theoretical description of the methods together with the Devito implementation, which can be used for the simulations of interest. We choose a reference problem, described in the notebook Introduction to Acoustic Problem. The spatial and temporal discretizations used throughout the notebooks are also presented in this introductory notebook, together with other relevant concepts to be used overall. Therefore, one should first assimilate the contents of this notebook. In the remaining notebooks, we incrementally describe several numerical techniques to reduce artificial boundary reflections. It is better to follow the order of the notebooks, since concepts are used afterward. We include simulations demonstrating the use of the methods. By changing some parameters, the user would be able to carry out several tests. diff --git a/examples/seismic/self_adjoint/README.md b/examples/seismic/self_adjoint/README.md index a0f569463d..cd005ff685 100644 --- a/examples/seismic/self_adjoint/README.md +++ b/examples/seismic/self_adjoint/README.md @@ -12,7 +12,7 @@ These operators are based on simplfications of the systems presented in: The goal of this series of tutorials is to generate -- and then test for correctness -- the modeling and inversion capability in Devito for variable density visco- acoustics. We use an energy conserving form of the wave equation that is *self adjoint*, which allows the same modeling system to be used for all for all phases of finite difference evolution required for quasi-Newton optimization: - **nonlinear forward**, nonlinear with respect to the model parameters -- **Jacobian forward**, linearized with respect to the model parameters +- **Jacobian forward**, linearized with respect to the model parameters - **Jacobian adjoint**, linearized with respect to the model parameters These notebooks first implement and then test for correctness for three types of modeling physics. @@ -57,7 +57,7 @@ These notebooks first implement and then test for correctness for three types of name='IsoJacobianAdjOperator', **kwargs) ``` - With Equation order 1, all tests pass - - With Equation order 2, there are different outcomes for tests + - With Equation order 2, there are different outcomes for tests - Possibly there is a different path chosen through the AST, and different c code is generated? - [ ] replace the conditional logic in the stencil with comprehension @@ -90,7 +90,7 @@ These notebooks first implement and then test for correctness for three types of - [X] Jacobian operator adjoint test, with respect to model/data - [X] Skew symmetry test for shifted derivatives -## To save generated code +## To save generated code ``` f = open("operator.c", "w") From 56aa107785bb6093e85727a57a859807029afdbd Mon Sep 17 00:00:00 2001 From: JDBetteridge Date: Wed, 24 Dec 2025 16:44:34 +0000 Subject: [PATCH 18/42] lint: Fix ends of files --- .deploy_key.enc | 2 +- .github/workflows/triggers.yml | 2 +- CODE_OF_CONDUCT.md | 1 - benchmarks/user/advisor/README.md | 1 - binder/README.md | 2 +- devito/ir/__init__.py | 2 +- devito/ir/cgen/__init__.py | 2 +- docker/README.md | 2 +- docker/entrypoint.sh | 2 +- environment-dev.yml | 2 +- examples/checkpointing/checkpoint.py | 2 +- examples/performance/resources/RoofsData.json | 2 +- examples/seismic/self_adjoint/README.md | 2 +- requirements-mpi.txt | 2 +- requirements-optional.txt | 2 +- requirements-testing.txt | 2 +- 16 files changed, 14 insertions(+), 16 deletions(-) diff --git a/.deploy_key.enc b/.deploy_key.enc index 941b7ab589..3a22ad5618 100644 --- a/.deploy_key.enc +++ b/.deploy_key.enc @@ -1 +1 @@ -gAAAAABeDg5ZyNUhS3dc25RQZvpvHGAkmK6zQU8i3YRb9yUTC4MCQ0nkl215UQh0kNYoRcxaeJ7ecRYUjBamraRCVaaWvNal8DV9Ba9abHVZVA3T1rv7ptbcAmMgUK6e4LMc7xGB5w2GBTEjLSc9oGOhAiuFD-OPU6WVwEskiRD4D0SAmuRuI_8fTdpuV10plvGJNXM48TrDWabBnp2F2x-6GB0jqtuq6Z6yXIJ-cmgWRXzVAEj9NjYgKD3xeB4-8Mv9O1RMhazlRMQX5NFvcdNYPhdxIr0olFsoXhcavOmpsMNljLfl1MVnLtCp2QHzAjjIMKGtmBDYILUvKXdknmHi3hGcz_tTLCfSk976Qycf5pQwhlxNsoMfr4p3m3EvL7rXfwZCcctwiIZFzZn7SayMV1IMYUEAWR6JwbaUU93AHw_VrGM-urqMJIzZ_TMQ96RbFgwIjSB8352tOFbya-f1zDFupFDLwY2_XN4wrEactgvkvXI8aunJbBwP-u_YE4JjWU4EGUHebbwUSiDGk7GZ9Ji3KWsRq1QRv5anJMkGjrUSzkLv-5AJBgPz2DOtV6Y19KdqytKXyefE1cGhAXJfjlADkkWy7XikYxXoNSenBH0ccWujp_zn3XRnGc_IvcCyYYGON0VuGPKDDx3amZEGLBgdktkHYwoGkfLpPJygFkJB0-w4hJOsGNi1OQJMfqG5FcrsKBlSvvpsvMCSW_8eY4yQjb7P9DLF2A0kLoTANjg-8bVCUPxYVLMmXMnhJDdyJlq7cqwIcu-8-NgzDPfJFantgSLM2k066BOZpqsZqnaa2TouE9B9Ql3G4cgqEllWhY0v2keQpv3bvQBo9PU-zxztgs5yMrbuK2T2xn3nGyFuj3pchuWhlZiQptkAEi7GcM-iZ5n9WPUC_0qtusaXuddZPAvLLYxECqs9u-14eRykOmvHKRVKNg-AGaOtK602qxZhx3xgHwetHwOipmf-WsL54p7mxn37JyNoUk_O_gCkx5tRmUbCEFupILcfoHnR13cjozm6pqtquJfK1XxHBzHf6e-2DWuooLy3PI-vKKWce9QSjTQgOmxb6XVzdQIJ9vJB4GYgfeevRe9j73OWrKh2VGurVnt_K2wuhjOPRsHlTKMhpfPWlwLFHXdqTDZswRowdaOhcizvamsdI_rNAlRiyB4HQYXVdGLzYQMDEU-dm4psB-xO3UzmHKwGmPXaqsh4pzrC2j3UU4oznI3HE-Qqgj4XEXhilZ8RMmoQWvOhnl_WH7fxxAn7UE80J70w9X2S3yzV_evwPlIJtDc-e7-Rf2GzCln11zTBgOQ9u3aBK-8gLYZV3AF0yxOPpYy0BZAsRUJmrR6K8i3tvFc0_pZIAFwtB2esxJDsA-25CXKaphwGH-ikRl8Ls7WJeffjhPbexfgdbPHwxxanXdh0KMLs47BCOxCy0aardlTHj44wP6pNXD6URnM-d7Aj-SmEmCTBciNexmYlRwRPOb0DdnV5oMH4PABMLvxiB_Lr7LNk5lPdgOzOLCeLnjHyygISTLUKbn9_J-40xomVZfakAkryodCYWK8kSWQ1vTVROwkgpTxA108Yh8AOnBjEUs8csrtmI04ujTnJL7AYoQ0LnsVRMjUqKfWQ_jPt5wTOh-CKjWpBSCpH-OjXbnN0L_bw4vEEfH1YEA4Wi5jcQghEbIBdUcjRsdtfNUZTAVPG0m-EaYDpfEf7UWv-A9z6YhVlLOH67f5uIiskX_bIaiq7A7X3ahgWUzhNVpMjETrpNnu0hE4yI462wtMhEZDv4hO0mT_EeidTCC12609p8clPiWDiO4dTD8ivWqWVZgiK9EIurmxCZE1T489tuzpCZLcwya5Pu5Ddxv8jRzdSdRbX4lju8fCgCYmRS5C5N0VG_2t3bFqxpr8PsHndfobu5MyVCgyAzebOPksMD0ZXJjjhD10_gMU7Njkv5nLpXBIyk3dOkABVBxk5kmd1j1ftQS7j-AcGnvnagIwYFaXWPfgtIRMFoBnRbaJzqSkyobeApAqWN6EhinC8CL9mC1mC9VE6qWykabg1Ev0O7Ke6lRvd9Yl14W1yqr507izMP8uFosjYkZj69FNiWg90I3jDgp0N5MsS3GweVT3ewBYA5AOcQiCmOf4Tcm-TKsYyIJVHrnxOZjD9wDFtviixlYb9GNYIlcvBmuLAWzZh_EUWXW2GRmW-_Y5jZfMUtE6mZVVuKBkKoy8MeiAPcfHg7a-x9Ri_YxArHzUHO0xzI2Zve4K9B3KeZaiIqGf6tIAW3BjFp6L-XnsulBkM7_A847l8vhs-om3wflTUCURuxlxVg0dFokkvIaxQUyeE2Fz-rkmWKHj1M9MLNQIEQ46laSKt4wjEutwsAH_2ycy_HpEsTk4NeGTbS0-RVMxl5OQBy4g6lBvUGMZ4Qm7UuslO5oSzTF8zYsziLX7remEgiIvRdhcMSkU11d4DLASwNNW_fYyacXH4X_fNBIvWdnIjcgrHGYiMNRZ-B6unFEqecfTzJX4zuJ11v1vbh631UnJnY1mOUJ9qTbK0GKrBYpc1dWpq6rRY8LC9xMbCFYFeTVQ8Zgh1IaGsYcnwhZnhbz8_xCsdznjkFKHOM0ttUetXbbSi2GvysJRJiuwnpdZCqjT8VCCvjPBZ7CaBSjb-_yF6T3zTNz3kPJtHA04pPBBhRxSMYPUdAJJfkfZdfjDDON_Qq41EkJhu2bv3MKkcgXSXxtI4j78iXnmg2dWUIjrTIkn__SbqtD5OS_1AIzSssHkuG5TV4iI6yHzTuyFhOAlUv4FOCY_ivIlueBXMC-ZFJ7w2N8gHwwlyfeEkW9RUh8UATkYhEPu4CS4CSNFdRsXBtdJ1cAez2YE88VVcKvE4jiZwFtw78D6CNkYfEJqA8csnLd87_8xSXhNWK238H_Tp3gLlNrMvZpSDEHwEV4pRM_V-HRGcY3jQ0WLCfyqdvJsMu6wBQlNlWpuFJUKiiH6dyu2QKghBQGiZD62AQswtQW4U7nTJZxI3IGEdkluUSd0JI1AUsWRWwpzNYIXhudF3ovPbUsSm0HDrt3wj9tdM6rJ6qcBR44wsIN0p39Jkd5K37-4riH_gOkuvZprE__b_cgfTCaMqCdO88rSDWr9GZkmjllInAp8SvBuuwvFtLy5raMPmxI8OKBdrGfmvbmUHNwQ19Je68cr40Pfmhk08S8yONFQITnC2V1jDxhu4SAX4pW-rQ8yOHXQ9Irb39k9D1HE0y4Dbo54uZZdqtC0EIY3kS2wxxRS8qv1t7Vrj1LVone_jnavXexVG0PBvtiolRtPWNAGLlq53NyJvCSQgOaT4KdwH_euJXYHLQ8N0rA8yICmTzNV_w2SpeZ-NKQIdlEz3Z0W7DqgI8gW8WNGIBKKvh5JdgWQeFHlyo1o4hjLp8R-5LDabjM36reOwCmZT5rdzeOdhkX_vk-PngneBRyOoW4ALQJrmzRiNqGbMw2mM3XOKxCmHASC7zJnyBSTus1ttKJbdoSl2yqAqeB1ClagU3GSuv2Ms577VxoB65z5Sm_LnGeX6ZEWUYhLzj0FKFaE2dTiXR01NYVSymVbTOHpYs4fLEmAjk--O6mwJW62SIVkgXlT13u6pncv97TIxLe8549jkOA4maFcVGmP2YPLgQ_Lx_EMzfa8lvJnOaVSPi020mV8Eli8AWbMoNENK_5pMPhWPWEgZNoPeeQaYQcP5VAKPR7WzocXgf8E5NELlTefe8mwxyuPumGn0u-vYUAKpMRfMEZMawSqnLf8Y2Ujyf488YSS9emX6laOl-oyFGMlv4cRLZF82a2akb2F0sO9-GmLMCv6Rqi6ySjsfpD14N162AibMG2Xl-8j2bDUqFlGAWW6_vwwZ_crZbHq1u6Z3fzr9iTr2cH_2BiN4lbJzw3yZLV7-RP9cDq5kTCzggFXRIxW7w0Q8mgyQjskeKxHbpXKSn1lWoUMX2wtZQl9OYisOVBeF-xeKi2YM8C1JRNRFE8nvLddAaqBgWVUQza5vRJOLqOD-PbIjWv60ifbHT90RwuHK8alvkNoc46eIZhSA6tYL5SmJtJUEBnqUxwb1EjyhlphD1q8h_WL-UXHtjE4uGQzHHFpXK3VPdH9qXF5zkmtMA87CiuRc8Jo-VDMCgKq208-zSj_8y-oTkDGb71bsszulB-GaG3Rn5L1fqvWMeGgwbksEyvwrgej8dhomyNg1YnzoyRXtdX4LyVOPGvx7JlBBE85vLQ697SgXD3SkRhr7ceZQHeWV8Cgq1Tvbnz88JK48fDBgGPnMP6lydp2hxKX_dW19iPycDEkmDALh-2U0_ZpiGPqA48E8cAf3HOYCpuzzdkzhvu-DOQU1WqbcUiPQeeWo1tS-1_HAXT18yRjMJZTQUJivVBq03meNQc-ZWFAcpxAtsNaS5fDLiNF51tatJA-N8y_WuiNUHlpxMgjPWGoR8Q== \ No newline at end of file +gAAAAABeDg5ZyNUhS3dc25RQZvpvHGAkmK6zQU8i3YRb9yUTC4MCQ0nkl215UQh0kNYoRcxaeJ7ecRYUjBamraRCVaaWvNal8DV9Ba9abHVZVA3T1rv7ptbcAmMgUK6e4LMc7xGB5w2GBTEjLSc9oGOhAiuFD-OPU6WVwEskiRD4D0SAmuRuI_8fTdpuV10plvGJNXM48TrDWabBnp2F2x-6GB0jqtuq6Z6yXIJ-cmgWRXzVAEj9NjYgKD3xeB4-8Mv9O1RMhazlRMQX5NFvcdNYPhdxIr0olFsoXhcavOmpsMNljLfl1MVnLtCp2QHzAjjIMKGtmBDYILUvKXdknmHi3hGcz_tTLCfSk976Qycf5pQwhlxNsoMfr4p3m3EvL7rXfwZCcctwiIZFzZn7SayMV1IMYUEAWR6JwbaUU93AHw_VrGM-urqMJIzZ_TMQ96RbFgwIjSB8352tOFbya-f1zDFupFDLwY2_XN4wrEactgvkvXI8aunJbBwP-u_YE4JjWU4EGUHebbwUSiDGk7GZ9Ji3KWsRq1QRv5anJMkGjrUSzkLv-5AJBgPz2DOtV6Y19KdqytKXyefE1cGhAXJfjlADkkWy7XikYxXoNSenBH0ccWujp_zn3XRnGc_IvcCyYYGON0VuGPKDDx3amZEGLBgdktkHYwoGkfLpPJygFkJB0-w4hJOsGNi1OQJMfqG5FcrsKBlSvvpsvMCSW_8eY4yQjb7P9DLF2A0kLoTANjg-8bVCUPxYVLMmXMnhJDdyJlq7cqwIcu-8-NgzDPfJFantgSLM2k066BOZpqsZqnaa2TouE9B9Ql3G4cgqEllWhY0v2keQpv3bvQBo9PU-zxztgs5yMrbuK2T2xn3nGyFuj3pchuWhlZiQptkAEi7GcM-iZ5n9WPUC_0qtusaXuddZPAvLLYxECqs9u-14eRykOmvHKRVKNg-AGaOtK602qxZhx3xgHwetHwOipmf-WsL54p7mxn37JyNoUk_O_gCkx5tRmUbCEFupILcfoHnR13cjozm6pqtquJfK1XxHBzHf6e-2DWuooLy3PI-vKKWce9QSjTQgOmxb6XVzdQIJ9vJB4GYgfeevRe9j73OWrKh2VGurVnt_K2wuhjOPRsHlTKMhpfPWlwLFHXdqTDZswRowdaOhcizvamsdI_rNAlRiyB4HQYXVdGLzYQMDEU-dm4psB-xO3UzmHKwGmPXaqsh4pzrC2j3UU4oznI3HE-Qqgj4XEXhilZ8RMmoQWvOhnl_WH7fxxAn7UE80J70w9X2S3yzV_evwPlIJtDc-e7-Rf2GzCln11zTBgOQ9u3aBK-8gLYZV3AF0yxOPpYy0BZAsRUJmrR6K8i3tvFc0_pZIAFwtB2esxJDsA-25CXKaphwGH-ikRl8Ls7WJeffjhPbexfgdbPHwxxanXdh0KMLs47BCOxCy0aardlTHj44wP6pNXD6URnM-d7Aj-SmEmCTBciNexmYlRwRPOb0DdnV5oMH4PABMLvxiB_Lr7LNk5lPdgOzOLCeLnjHyygISTLUKbn9_J-40xomVZfakAkryodCYWK8kSWQ1vTVROwkgpTxA108Yh8AOnBjEUs8csrtmI04ujTnJL7AYoQ0LnsVRMjUqKfWQ_jPt5wTOh-CKjWpBSCpH-OjXbnN0L_bw4vEEfH1YEA4Wi5jcQghEbIBdUcjRsdtfNUZTAVPG0m-EaYDpfEf7UWv-A9z6YhVlLOH67f5uIiskX_bIaiq7A7X3ahgWUzhNVpMjETrpNnu0hE4yI462wtMhEZDv4hO0mT_EeidTCC12609p8clPiWDiO4dTD8ivWqWVZgiK9EIurmxCZE1T489tuzpCZLcwya5Pu5Ddxv8jRzdSdRbX4lju8fCgCYmRS5C5N0VG_2t3bFqxpr8PsHndfobu5MyVCgyAzebOPksMD0ZXJjjhD10_gMU7Njkv5nLpXBIyk3dOkABVBxk5kmd1j1ftQS7j-AcGnvnagIwYFaXWPfgtIRMFoBnRbaJzqSkyobeApAqWN6EhinC8CL9mC1mC9VE6qWykabg1Ev0O7Ke6lRvd9Yl14W1yqr507izMP8uFosjYkZj69FNiWg90I3jDgp0N5MsS3GweVT3ewBYA5AOcQiCmOf4Tcm-TKsYyIJVHrnxOZjD9wDFtviixlYb9GNYIlcvBmuLAWzZh_EUWXW2GRmW-_Y5jZfMUtE6mZVVuKBkKoy8MeiAPcfHg7a-x9Ri_YxArHzUHO0xzI2Zve4K9B3KeZaiIqGf6tIAW3BjFp6L-XnsulBkM7_A847l8vhs-om3wflTUCURuxlxVg0dFokkvIaxQUyeE2Fz-rkmWKHj1M9MLNQIEQ46laSKt4wjEutwsAH_2ycy_HpEsTk4NeGTbS0-RVMxl5OQBy4g6lBvUGMZ4Qm7UuslO5oSzTF8zYsziLX7remEgiIvRdhcMSkU11d4DLASwNNW_fYyacXH4X_fNBIvWdnIjcgrHGYiMNRZ-B6unFEqecfTzJX4zuJ11v1vbh631UnJnY1mOUJ9qTbK0GKrBYpc1dWpq6rRY8LC9xMbCFYFeTVQ8Zgh1IaGsYcnwhZnhbz8_xCsdznjkFKHOM0ttUetXbbSi2GvysJRJiuwnpdZCqjT8VCCvjPBZ7CaBSjb-_yF6T3zTNz3kPJtHA04pPBBhRxSMYPUdAJJfkfZdfjDDON_Qq41EkJhu2bv3MKkcgXSXxtI4j78iXnmg2dWUIjrTIkn__SbqtD5OS_1AIzSssHkuG5TV4iI6yHzTuyFhOAlUv4FOCY_ivIlueBXMC-ZFJ7w2N8gHwwlyfeEkW9RUh8UATkYhEPu4CS4CSNFdRsXBtdJ1cAez2YE88VVcKvE4jiZwFtw78D6CNkYfEJqA8csnLd87_8xSXhNWK238H_Tp3gLlNrMvZpSDEHwEV4pRM_V-HRGcY3jQ0WLCfyqdvJsMu6wBQlNlWpuFJUKiiH6dyu2QKghBQGiZD62AQswtQW4U7nTJZxI3IGEdkluUSd0JI1AUsWRWwpzNYIXhudF3ovPbUsSm0HDrt3wj9tdM6rJ6qcBR44wsIN0p39Jkd5K37-4riH_gOkuvZprE__b_cgfTCaMqCdO88rSDWr9GZkmjllInAp8SvBuuwvFtLy5raMPmxI8OKBdrGfmvbmUHNwQ19Je68cr40Pfmhk08S8yONFQITnC2V1jDxhu4SAX4pW-rQ8yOHXQ9Irb39k9D1HE0y4Dbo54uZZdqtC0EIY3kS2wxxRS8qv1t7Vrj1LVone_jnavXexVG0PBvtiolRtPWNAGLlq53NyJvCSQgOaT4KdwH_euJXYHLQ8N0rA8yICmTzNV_w2SpeZ-NKQIdlEz3Z0W7DqgI8gW8WNGIBKKvh5JdgWQeFHlyo1o4hjLp8R-5LDabjM36reOwCmZT5rdzeOdhkX_vk-PngneBRyOoW4ALQJrmzRiNqGbMw2mM3XOKxCmHASC7zJnyBSTus1ttKJbdoSl2yqAqeB1ClagU3GSuv2Ms577VxoB65z5Sm_LnGeX6ZEWUYhLzj0FKFaE2dTiXR01NYVSymVbTOHpYs4fLEmAjk--O6mwJW62SIVkgXlT13u6pncv97TIxLe8549jkOA4maFcVGmP2YPLgQ_Lx_EMzfa8lvJnOaVSPi020mV8Eli8AWbMoNENK_5pMPhWPWEgZNoPeeQaYQcP5VAKPR7WzocXgf8E5NELlTefe8mwxyuPumGn0u-vYUAKpMRfMEZMawSqnLf8Y2Ujyf488YSS9emX6laOl-oyFGMlv4cRLZF82a2akb2F0sO9-GmLMCv6Rqi6ySjsfpD14N162AibMG2Xl-8j2bDUqFlGAWW6_vwwZ_crZbHq1u6Z3fzr9iTr2cH_2BiN4lbJzw3yZLV7-RP9cDq5kTCzggFXRIxW7w0Q8mgyQjskeKxHbpXKSn1lWoUMX2wtZQl9OYisOVBeF-xeKi2YM8C1JRNRFE8nvLddAaqBgWVUQza5vRJOLqOD-PbIjWv60ifbHT90RwuHK8alvkNoc46eIZhSA6tYL5SmJtJUEBnqUxwb1EjyhlphD1q8h_WL-UXHtjE4uGQzHHFpXK3VPdH9qXF5zkmtMA87CiuRc8Jo-VDMCgKq208-zSj_8y-oTkDGb71bsszulB-GaG3Rn5L1fqvWMeGgwbksEyvwrgej8dhomyNg1YnzoyRXtdX4LyVOPGvx7JlBBE85vLQ697SgXD3SkRhr7ceZQHeWV8Cgq1Tvbnz88JK48fDBgGPnMP6lydp2hxKX_dW19iPycDEkmDALh-2U0_ZpiGPqA48E8cAf3HOYCpuzzdkzhvu-DOQU1WqbcUiPQeeWo1tS-1_HAXT18yRjMJZTQUJivVBq03meNQc-ZWFAcpxAtsNaS5fDLiNF51tatJA-N8y_WuiNUHlpxMgjPWGoR8Q== diff --git a/.github/workflows/triggers.yml b/.github/workflows/triggers.yml index 12d74cf7f0..a2cdbbb096 100644 --- a/.github/workflows/triggers.yml +++ b/.github/workflows/triggers.yml @@ -27,4 +27,4 @@ jobs: with: token: ${{ secrets.PRO_SUBMODULE }} repository: devitocodespro/devitopro - event-type: update-submodule \ No newline at end of file + event-type: update-submodule diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 2b6c4fe48a..9a29507f8c 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -76,4 +76,3 @@ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.ht For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq - diff --git a/benchmarks/user/advisor/README.md b/benchmarks/user/advisor/README.md index b4d0a14cb5..35f713c25a 100644 --- a/benchmarks/user/advisor/README.md +++ b/benchmarks/user/advisor/README.md @@ -85,4 +85,3 @@ advixe-cl --snapshot --project-dir= pack -- / Date: Wed, 24 Dec 2025 16:53:12 +0000 Subject: [PATCH 19/42] lint: First pass with ruff --fix --- benchmarks/user/advisor/roofline.py | 4 +- devito/arch/archinfo.py | 6 +- devito/arch/compiler.py | 41 ++--- devito/builtins/arithmetic.py | 2 +- devito/builtins/initializers.py | 2 +- devito/builtins/utils.py | 9 +- devito/core/arm.py | 8 +- devito/core/cpu.py | 18 +- devito/core/gpu.py | 23 ++- devito/core/intel.py | 13 +- devito/core/power.py | 8 +- devito/data/allocators.py | 12 +- devito/data/meta.py | 13 +- devito/data/utils.py | 16 +- devito/deprecations.py | 2 +- devito/finite_differences/differentiable.py | 16 +- .../finite_differences/finite_difference.py | 12 +- devito/finite_differences/rsfd.py | 2 +- devito/finite_differences/tools.py | 2 +- devito/ir/cgen/printer.py | 4 +- devito/ir/equations/algorithms.py | 2 +- devito/ir/equations/equation.py | 11 +- devito/ir/iet/efunc.py | 17 +- devito/ir/iet/nodes.py | 54 ++++-- devito/ir/iet/utils.py | 8 +- devito/ir/iet/visitors.py | 20 ++- devito/ir/stree/algorithms.py | 2 +- devito/ir/stree/tree.py | 11 +- devito/ir/support/basic.py | 4 +- devito/ir/support/guards.py | 19 ++- devito/ir/support/space.py | 15 +- devito/ir/support/syncs.py | 14 +- devito/ir/support/utils.py | 17 +- devito/ir/support/vector.py | 6 +- devito/logger.py | 17 +- devito/mpi/distributed.py | 15 +- devito/mpi/halo_scheme.py | 4 +- devito/mpi/routines.py | 2 +- devito/operations/solve.py | 2 +- devito/passes/clusters/asynchrony.py | 2 +- devito/passes/clusters/misc.py | 15 +- devito/passes/clusters/utils.py | 2 +- devito/passes/iet/languages/openacc.py | 2 +- devito/passes/iet/languages/openmp.py | 18 +- devito/passes/iet/languages/targets.py | 12 +- devito/passes/iet/languages/utils.py | 2 +- devito/passes/iet/misc.py | 9 +- devito/passes/iet/parpragma.py | 9 +- devito/symbolics/manipulation.py | 19 ++- devito/symbolics/queries.py | 24 ++- devito/symbolics/search.py | 13 +- devito/tools/abc.py | 2 +- devito/tools/data_structures.py | 16 +- devito/tools/memoization.py | 2 +- devito/tools/threading.py | 2 +- devito/tools/utils.py | 36 +++- devito/types/array.py | 12 +- devito/types/basic.py | 12 +- devito/types/caching.py | 2 +- devito/types/dense.py | 11 +- devito/types/dimension.py | 23 ++- devito/types/grid.py | 6 +- devito/types/misc.py | 25 ++- devito/types/object.py | 2 +- devito/types/parallel.py | 24 ++- devito/types/relational.py | 2 +- devito/types/sparse.py | 9 +- devito/types/tensor.py | 4 +- devito/types/utils.py | 19 ++- examples/cfd/01_convection.ipynb | 6 +- examples/cfd/01_convection_revisited.ipynb | 18 +- examples/cfd/02_convection_nonlinear.ipynb | 9 +- examples/cfd/03_diffusion.ipynb | 7 +- examples/cfd/03_diffusion_nonuniform.ipynb | 10 +- examples/cfd/04_burgers.ipynb | 36 ++-- examples/cfd/05_laplace.ipynb | 10 +- examples/cfd/06_poisson.ipynb | 10 +- examples/cfd/07_cavity_flow.ipynb | 57 ++++--- examples/cfd/08_shallow_water_equation.ipynb | 7 +- examples/cfd/09_Darcy_flow_equation.ipynb | 14 +- examples/cfd/example_diffusion.py | 2 +- examples/compiler/02_indexification.ipynb | 2 +- examples/compiler/04_iet-B.ipynb | 10 +- examples/finance/bs_ivbp.ipynb | 22 +-- examples/misc/linalg.py | 9 +- examples/performance/01_gpu.ipynb | 6 +- examples/performance/utils.py | 2 +- .../seismic/abc_methods/01_introduction.ipynb | 25 ++- examples/seismic/abc_methods/02_damping.ipynb | 61 ++++--- examples/seismic/abc_methods/03_pml.ipynb | 77 +++++---- examples/seismic/abc_methods/04_habc.ipynb | 154 +++++++++--------- examples/seismic/acoustic/accuracy.ipynb | 29 ++-- examples/seismic/model.py | 9 +- examples/seismic/preset_models.py | 2 +- .../sa_01_iso_implementation1.ipynb | 53 +++--- .../sa_02_iso_implementation2.ipynb | 43 +++-- .../self_adjoint/sa_03_iso_correctness.ipynb | 43 +++-- examples/seismic/source.py | 12 +- examples/seismic/tti/wavesolver.py | 1 - examples/seismic/tutorials/01_modelling.ipynb | 6 +- examples/seismic/tutorials/02_rtm.ipynb | 20 +-- examples/seismic/tutorials/03_fwi.ipynb | 45 +++-- examples/seismic/tutorials/04_dask.ipynb | 32 ++-- .../seismic/tutorials/04_dask_pickling.ipynb | 67 ++++---- .../tutorials/05_staggered_acoustic.ipynb | 2 +- examples/seismic/tutorials/06_elastic.ipynb | 6 +- .../06_elastic_varying_parameters.ipynb | 8 +- .../tutorials/07.1_dispersion_relation.ipynb | 4 +- .../seismic/tutorials/07_DRP_schemes.ipynb | 18 +- .../seismic/tutorials/08_snapshotting.ipynb | 24 +-- .../seismic/tutorials/10_nmo_correction.ipynb | 8 +- .../seismic/tutorials/11_viscoacoustic.ipynb | 82 +++++----- .../seismic/tutorials/12_time_blocking.ipynb | 53 +++--- .../seismic/tutorials/13_LSRTM_acoustic.ipynb | 94 +++++------ .../tutorials/14_creating_synthetics.ipynb | 10 +- .../seismic/tutorials/15_tti_qp_pure.ipynb | 8 +- examples/seismic/utils.py | 6 +- examples/timestepping/superstep.ipynb | 4 +- examples/userapi/00_sympy.ipynb | 10 +- examples/userapi/03_subdomains.ipynb | 8 +- examples/userapi/04_boundary_conditions.ipynb | 6 +- .../userapi/05_conditional_dimension.ipynb | 2 +- examples/userapi/06_sparse_operations.ipynb | 4 +- .../userapi/07_functions_on_subdomains.ipynb | 4 +- tests/test_data.py | 26 +-- tests/test_derivatives.py | 2 +- tests/test_operator.py | 2 +- tests/test_save.py | 2 +- tests/test_subdomains.py | 8 +- tests/test_tensors.py | 4 +- 130 files changed, 1215 insertions(+), 895 deletions(-) diff --git a/benchmarks/user/advisor/roofline.py b/benchmarks/user/advisor/roofline.py index 60ac495420..6add2a22e4 100644 --- a/benchmarks/user/advisor/roofline.py +++ b/benchmarks/user/advisor/roofline.py @@ -130,7 +130,7 @@ def roofline(name, project, scale, precision, mode, th): # y = bandwidth * x x1, x2 = 0, min(width, max_compute_bandwidth / bandwidth) y1, y2 = 0, x2*bandwidth - label = '{} {:.0f} GB/s'.format(roof.name, bandwidth) + label = f'{roof.name} {bandwidth:.0f} GB/s' ax.plot([x1, x2], [y1, y2], '-', label=label) memory_roofs.append(((x1, x2), (y1, y2))) @@ -140,7 +140,7 @@ def roofline(name, project, scale, precision, mode, th): bandwidth /= scale # scale down as requested by the user x1, x2 = max(bandwidth / max_memory_bandwidth, 0), width y1, y2 = bandwidth, bandwidth - label = '{} {:.0f} GFLOPS'.format(roof.name, bandwidth) + label = f'{roof.name} {bandwidth:.0f} GFLOPS' ax.plot([x1, x2], [y1, y2], '-', label=label) compute_roofs.append(((x1, x2), (y1, y2))) diff --git a/devito/arch/archinfo.py b/devito/arch/archinfo.py index 54274c1729..7f6f769c0f 100644 --- a/devito/arch/archinfo.py +++ b/devito/arch/archinfo.py @@ -52,7 +52,7 @@ def get_cpu_info(): # Obtain textual cpu info try: - with open('/proc/cpuinfo', 'r') as f: + with open('/proc/cpuinfo') as f: lines = f.readlines() except FileNotFoundError: lines = [] @@ -731,9 +731,7 @@ def get_platform(): elif 'intel' in brand: # Most likely a desktop i3/i5/i7 return platform_registry['intel64'] - elif 'power8' in brand: - return platform_registry['power8'] - elif 'power9' in brand: + elif 'power8' in brand or 'power9' in brand: return platform_registry['power8'] elif 'arm' in brand: return platform_registry['arm'] diff --git a/devito/arch/compiler.py b/devito/arch/compiler.py index 6083286cab..ad6e7484d1 100644 --- a/devito/arch/compiler.py +++ b/devito/arch/compiler.py @@ -25,7 +25,7 @@ from devito.tools import (as_list, change_directory, filter_ordered, memoized_func, make_tempdir) -__all__ = ['sniff_mpi_distro', 'compiler_registry'] +__all__ = ['compiler_registry', 'sniff_mpi_distro'] @memoized_func @@ -53,11 +53,7 @@ def sniff_compiler_version(cc, allow_fail=False): ver = ver.strip() if ver.startswith("gcc"): compiler = "gcc" - elif ver.startswith("clang"): - compiler = "clang" - elif ver.startswith("Apple LLVM"): - compiler = "clang" - elif ver.startswith("Homebrew clang"): + elif ver.startswith("clang") or ver.startswith("Apple LLVM") or ver.startswith("Homebrew clang"): compiler = "clang" elif ver.startswith("Intel"): compiler = "icx" @@ -340,22 +336,21 @@ def make(self, loc, args): logfile = path.join(self.get_jit_dir(), f"{hash_key}.log") errfile = path.join(self.get_jit_dir(), f"{hash_key}.err") - with change_directory(loc): - with open(logfile, "w") as lf: - with open(errfile, "w") as ef: - - command = ['make'] + args - lf.write("Compilation command:\n") - lf.write(" ".join(command)) - lf.write("\n\n") - try: - check_call(command, stderr=ef, stdout=lf) - except CalledProcessError as e: - raise CompilationError(f'Command "{e.cmd}" return error status' - f'{e.returncode}. ' - f'Unable to compile code.\n' - f'Compile log in {logfile}\n' - f'Compile errors in {errfile}\n') + with change_directory(loc), open(logfile, "w") as lf: + with open(errfile, "w") as ef: + + command = ['make'] + args + lf.write("Compilation command:\n") + lf.write(" ".join(command)) + lf.write("\n\n") + try: + check_call(command, stderr=ef, stdout=lf) + except CalledProcessError as e: + raise CompilationError(f'Command "{e.cmd}" return error status' + f'{e.returncode}. ' + f'Unable to compile code.\n' + f'Compile log in {logfile}\n' + f'Compile errors in {errfile}\n') debug(f"Make <{' '.join(args)}>") def _cmdline(self, files, object=False): @@ -393,7 +388,7 @@ def jit_compile(self, soname, code): # Warning: dropping `code` on the floor in favor to whatever is written # within `src_file` try: - with open(src_file, 'r') as f: + with open(src_file) as f: code = f.read() code = f'{code}/* Backdoor edit at {time.ctime()}*/ \n' # Bypass the devito JIT cache diff --git a/devito/builtins/arithmetic.py b/devito/builtins/arithmetic.py index 350f5257bb..2996f595a8 100644 --- a/devito/builtins/arithmetic.py +++ b/devito/builtins/arithmetic.py @@ -3,7 +3,7 @@ import devito as dv from devito.builtins.utils import make_retval, check_builtins_args -__all__ = ['norm', 'sumall', 'sum', 'inner', 'mmin', 'mmax'] +__all__ = ['inner', 'mmax', 'mmin', 'norm', 'sum', 'sumall'] @dv.switchconfig(log_level='ERROR') diff --git a/devito/builtins/initializers.py b/devito/builtins/initializers.py index 99c4f386b0..1b5b66937a 100644 --- a/devito/builtins/initializers.py +++ b/devito/builtins/initializers.py @@ -4,7 +4,7 @@ from devito.tools import as_tuple, as_list from devito.builtins.utils import check_builtins_args, nbl_to_padsize, pad_outhalo -__all__ = ['assign', 'smooth', 'gaussian_smooth', 'initialize_function'] +__all__ = ['assign', 'gaussian_smooth', 'initialize_function', 'smooth'] @dv.switchconfig(log_level='ERROR') diff --git a/devito/builtins/utils.py b/devito/builtins/utils.py index a83dd765a8..ce6b61d957 100644 --- a/devito/builtins/utils.py +++ b/devito/builtins/utils.py @@ -7,8 +7,13 @@ from devito.symbolics import uxreplace from devito.tools import as_tuple -__all__ = ['make_retval', 'nbl_to_padsize', 'pad_outhalo', 'abstract_args', - 'check_builtins_args'] +__all__ = [ + 'abstract_args', + 'check_builtins_args', + 'make_retval', + 'nbl_to_padsize', + 'pad_outhalo', +] accumulator_mapper = { diff --git a/devito/core/arm.py b/devito/core/arm.py index f990ef31e0..9649f5aaa6 100644 --- a/devito/core/arm.py +++ b/devito/core/arm.py @@ -2,8 +2,12 @@ Cpu64AdvCOperator) from devito.passes.iet import OmpTarget, CXXOmpTarget -__all__ = ['ArmAdvCOperator', 'ArmAdvOmpOperator', 'ArmAdvCXXOperator', - 'ArmAdvCXXOmpOperator'] +__all__ = [ + 'ArmAdvCOperator', + 'ArmAdvCXXOmpOperator', + 'ArmAdvCXXOperator', + 'ArmAdvOmpOperator', +] ArmAdvOperator = Cpu64AdvOperator diff --git a/devito/core/cpu.py b/devito/core/cpu.py index 8cbef8b9f6..73d120bb78 100644 --- a/devito/core/cpu.py +++ b/devito/core/cpu.py @@ -14,10 +14,20 @@ check_stability) from devito.tools import timed_pass -__all__ = ['Cpu64NoopCOperator', 'Cpu64NoopOmpOperator', 'Cpu64AdvCOperator', - 'Cpu64AdvOmpOperator', 'Cpu64FsgCOperator', 'Cpu64FsgOmpOperator', - 'Cpu64CustomOperator', 'Cpu64CustomCXXOperator', 'Cpu64AdvCXXOperator', - 'Cpu64AdvCXXOmpOperator', 'Cpu64FsgCXXOperator', 'Cpu64FsgCXXOmpOperator'] +__all__ = [ + 'Cpu64AdvCOperator', + 'Cpu64AdvCXXOmpOperator', + 'Cpu64AdvCXXOperator', + 'Cpu64AdvOmpOperator', + 'Cpu64CustomCXXOperator', + 'Cpu64CustomOperator', + 'Cpu64FsgCOperator', + 'Cpu64FsgCXXOmpOperator', + 'Cpu64FsgCXXOperator', + 'Cpu64FsgOmpOperator', + 'Cpu64NoopCOperator', + 'Cpu64NoopOmpOperator', +] class Cpu64OperatorMixin: diff --git a/devito/core/gpu.py b/devito/core/gpu.py index ed1bb58c81..108b1ab46b 100644 --- a/devito/core/gpu.py +++ b/devito/core/gpu.py @@ -15,12 +15,23 @@ relax_incr_dimensions, check_stability) from devito.tools import as_tuple, timed_pass -__all__ = ['DeviceNoopOperator', 'DeviceAdvOperator', 'DeviceCustomOperator', - 'DeviceNoopOmpOperator', 'DeviceAdvOmpOperator', 'DeviceFsgOmpOperator', - 'DeviceCustomOmpOperator', 'DeviceNoopAccOperator', 'DeviceAdvAccOperator', - 'DeviceFsgAccOperator', 'DeviceCustomAccOperator', 'DeviceNoopCXXOmpOperator', - 'DeviceAdvCXXOmpOperator', 'DeviceFsgCXXOmpOperator', - 'DeviceCustomCXXOmpOperator'] +__all__ = [ + 'DeviceAdvAccOperator', + 'DeviceAdvCXXOmpOperator', + 'DeviceAdvOmpOperator', + 'DeviceAdvOperator', + 'DeviceCustomAccOperator', + 'DeviceCustomCXXOmpOperator', + 'DeviceCustomOmpOperator', + 'DeviceCustomOperator', + 'DeviceFsgAccOperator', + 'DeviceFsgCXXOmpOperator', + 'DeviceFsgOmpOperator', + 'DeviceNoopAccOperator', + 'DeviceNoopCXXOmpOperator', + 'DeviceNoopOmpOperator', + 'DeviceNoopOperator', +] class DeviceOperatorMixin: diff --git a/devito/core/intel.py b/devito/core/intel.py index 9e378ffc12..8478189f92 100644 --- a/devito/core/intel.py +++ b/devito/core/intel.py @@ -3,9 +3,16 @@ Cpu64AdvCXXOperator, Cpu64AdvCXXOmpOperator, Cpu64FsgCXXOperator, Cpu64FsgCXXOmpOperator) -__all__ = ['Intel64AdvCOperator', 'Intel64AdvOmpOperator', 'Intel64FsgCOperator', - 'Intel64FsgOmpOperator', 'Intel64CXXAdvCOperator', 'Intel64AdvCXXOmpOperator', - 'Intel64FsgCXXOperator', 'Intel64FsgCXXOmpOperator'] +__all__ = [ + 'Intel64AdvCOperator', + 'Intel64AdvCXXOmpOperator', + 'Intel64AdvOmpOperator', + 'Intel64CXXAdvCOperator', + 'Intel64FsgCOperator', + 'Intel64FsgCXXOmpOperator', + 'Intel64FsgCXXOperator', + 'Intel64FsgOmpOperator', +] Intel64AdvCOperator = Cpu64AdvCOperator diff --git a/devito/core/power.py b/devito/core/power.py index 65cf4c3cf3..2ae711dcc9 100644 --- a/devito/core/power.py +++ b/devito/core/power.py @@ -1,8 +1,12 @@ from devito.core.cpu import (Cpu64AdvCOperator, Cpu64AdvOmpOperator, Cpu64AdvCXXOperator, Cpu64AdvCXXOmpOperator) -__all__ = ['PowerAdvCOperator', 'PowerAdvOmpOperator', - 'PowerCXXAdvCOperator', 'PowerAdvCXXOmpOperator'] +__all__ = [ + 'PowerAdvCOperator', + 'PowerAdvCXXOmpOperator', + 'PowerAdvOmpOperator', + 'PowerCXXAdvCOperator', +] PowerAdvCOperator = Cpu64AdvCOperator PowerAdvOmpOperator = Cpu64AdvOmpOperator diff --git a/devito/data/allocators.py b/devito/data/allocators.py index 6b109a2cc2..eb2fc7bc45 100644 --- a/devito/data/allocators.py +++ b/devito/data/allocators.py @@ -11,9 +11,15 @@ from devito.parameters import configuration from devito.tools import is_integer, infer_datasize -__all__ = ['ALLOC_ALIGNED', 'ALLOC_NUMA_LOCAL', 'ALLOC_NUMA_ANY', - 'ALLOC_KNL_MCDRAM', 'ALLOC_KNL_DRAM', 'ALLOC_GUARD', - 'default_allocator'] +__all__ = [ + 'ALLOC_ALIGNED', + 'ALLOC_GUARD', + 'ALLOC_KNL_DRAM', + 'ALLOC_KNL_MCDRAM', + 'ALLOC_NUMA_ANY', + 'ALLOC_NUMA_LOCAL', + 'default_allocator', +] class AbstractMemoryAllocator: diff --git a/devito/data/meta.py b/devito/data/meta.py index a3b74647a5..41412c9996 100644 --- a/devito/data/meta.py +++ b/devito/data/meta.py @@ -1,7 +1,16 @@ from devito.tools import Tag -__all__ = ['DOMAIN', 'CORE', 'OWNED', 'HALO', 'NOPAD', 'FULL', - 'LEFT', 'RIGHT', 'CENTER'] +__all__ = [ + 'CENTER', + 'CORE', + 'DOMAIN', + 'FULL', + 'HALO', + 'LEFT', + 'NOPAD', + 'OWNED', + 'RIGHT', +] class DataRegion(Tag): diff --git a/devito/data/utils.py b/devito/data/utils.py index c0e241a723..d76db9d13a 100644 --- a/devito/data/utils.py +++ b/devito/data/utils.py @@ -2,9 +2,19 @@ from devito.tools import Tag, as_tuple, as_list, is_integer -__all__ = ['Index', 'NONLOCAL', 'PROJECTED', 'index_is_basic', 'index_apply_modulo', - 'index_dist_to_repl', 'convert_index', 'index_handle_oob', - 'loc_data_idx', 'mpi_index_maps', 'flip_idx'] +__all__ = [ + 'NONLOCAL', + 'PROJECTED', + 'Index', + 'convert_index', + 'flip_idx', + 'index_apply_modulo', + 'index_dist_to_repl', + 'index_handle_oob', + 'index_is_basic', + 'loc_data_idx', + 'mpi_index_maps', +] class Index(Tag): diff --git a/devito/deprecations.py b/devito/deprecations.py index 2723cb31bb..dbba9a7665 100644 --- a/devito/deprecations.py +++ b/devito/deprecations.py @@ -2,7 +2,7 @@ from warnings import warn -class DevitoDeprecation(): +class DevitoDeprecation: @cached_property def coeff_warn(self): diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index cae122dfed..c4440ea432 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -21,8 +21,16 @@ from devito.types import Array, DimensionTuple, Evaluable, StencilDimension from devito.types.basic import AbstractFunction, Indexed -__all__ = ['Differentiable', 'DiffDerivative', 'IndexDerivative', 'EvalDerivative', - 'Weights', 'Real', 'Imag', 'Conj'] +__all__ = [ + 'Conj', + 'DiffDerivative', + 'Differentiable', + 'EvalDerivative', + 'Imag', + 'IndexDerivative', + 'Real', + 'Weights', +] class Differentiable(sympy.Expr, Evaluable): @@ -625,9 +633,7 @@ def _gather_for_diff(self): ref_inds = func_args.indices_ref.getters for f in self.args: - if f not in self._args_diff: - new_args.append(f) - elif f is func_args or isinstance(f, DifferentiableFunction): + if f not in self._args_diff or f is func_args or isinstance(f, DifferentiableFunction): new_args.append(f) else: ind_f = f.indices_ref.getters diff --git a/devito/finite_differences/finite_difference.py b/devito/finite_differences/finite_difference.py index 58a051347e..15abd179ce 100644 --- a/devito/finite_differences/finite_difference.py +++ b/devito/finite_differences/finite_difference.py @@ -7,8 +7,16 @@ from .tools import (left, right, generate_indices, centered, direct, transpose, check_input, fd_weights_registry, process_weights) -__all__ = ['first_derivative', 'cross_derivative', 'generic_derivative', - 'left', 'right', 'centered', 'transpose', 'generate_indices'] +__all__ = [ + 'centered', + 'cross_derivative', + 'first_derivative', + 'generate_indices', + 'generic_derivative', + 'left', + 'right', + 'transpose', +] # Number of digits for FD coefficients to avoid roundup errors and non-deterministic # code generation diff --git a/devito/finite_differences/rsfd.py b/devito/finite_differences/rsfd.py index 6f6ecf8a93..12e13a2c64 100644 --- a/devito/finite_differences/rsfd.py +++ b/devito/finite_differences/rsfd.py @@ -4,7 +4,7 @@ from .differentiable import Weights, DiffDerivative from .tools import generate_indices, fd_weights_registry -__all__ = ['drot', 'd45'] +__all__ = ['d45', 'drot'] smapper = {1: (1, 1, 1), 2: (1, 1, -1), 3: (1, -1, 1), 4: (1, -1, -1)} diff --git a/devito/finite_differences/tools.py b/devito/finite_differences/tools.py index 7786629999..8c3398f7cd 100644 --- a/devito/finite_differences/tools.py +++ b/devito/finite_differences/tools.py @@ -273,7 +273,7 @@ def generate_indices(expr, dim, order, side=None, matvec=None, x0=None, nweights f"({order + 1}) for order {order} scheme." f" Reducing order to {order}") # Evaluation point - x0 = sympify(((x0 or {}).get(dim) or expr.indices_ref[dim])) + x0 = sympify((x0 or {}).get(dim) or expr.indices_ref[dim]) # If provided a pure number, assume it's a valid index if x0.is_Number: diff --git a/devito/ir/cgen/printer.py b/devito/ir/cgen/printer.py index e4bff5de80..2d2ecd4e07 100644 --- a/devito/ir/cgen/printer.py +++ b/devito/ir/cgen/printer.py @@ -420,7 +420,7 @@ def _print_Fallback(self, expr): # Lifted from SymPy so that we go through our own `_print_math_func` -for k in ('exp log sin cos tan ceiling floor').split(): +for k in ['exp', 'log', 'sin', 'cos', 'tan', 'ceiling', 'floor']: setattr(BasePrinter, f'_print_{k}', BasePrinter._print_math_func) @@ -432,7 +432,7 @@ def _print_Fallback(self, expr): # Sympy 1.11 has introduced a bug in `_print_Add`, so we enforce here # to always use the correct one from our printer if Version(sympy.__version__) >= Version("1.11"): - setattr(sympy.printing.str.StrPrinter, '_print_Add', BasePrinter._print_Add) + sympy.printing.str.StrPrinter._print_Add = BasePrinter._print_Add def ccode(expr, printer=None, **settings): diff --git a/devito/ir/equations/algorithms.py b/devito/ir/equations/algorithms.py index ce844887aa..310c17ecf3 100644 --- a/devito/ir/equations/algorithms.py +++ b/devito/ir/equations/algorithms.py @@ -13,7 +13,7 @@ from devito.data.allocators import DataReference from devito.logger import warning -__all__ = ['dimension_sort', 'lower_exprs', 'concretize_subdims'] +__all__ = ['concretize_subdims', 'dimension_sort', 'lower_exprs'] def dimension_sort(expr): diff --git a/devito/ir/equations/equation.py b/devito/ir/equations/equation.py index f83dc39c94..be40773f3c 100644 --- a/devito/ir/equations/equation.py +++ b/devito/ir/equations/equation.py @@ -11,8 +11,15 @@ from devito.tools import Pickable, Tag, frozendict from devito.types import Eq, Inc, ReduceMax, ReduceMin, relational_min -__all__ = ['LoweredEq', 'ClusterizedEq', 'DummyEq', 'OpInc', 'OpMin', 'OpMax', - 'identity_mapper'] +__all__ = [ + 'ClusterizedEq', + 'DummyEq', + 'LoweredEq', + 'OpInc', + 'OpMax', + 'OpMin', + 'identity_mapper', +] class IREq(sympy.Eq, Pickable): diff --git a/devito/ir/iet/efunc.py b/devito/ir/iet/efunc.py index 10aa8920e6..c2207094c8 100644 --- a/devito/ir/iet/efunc.py +++ b/devito/ir/iet/efunc.py @@ -5,9 +5,20 @@ from devito.symbolics import uxreplace from devito.tools import as_tuple -__all__ = ['ElementalFunction', 'ElementalCall', 'make_efunc', 'make_callable', - 'EntryFunction', 'AsyncCallable', 'AsyncCall', 'ThreadCallable', - 'DeviceFunction', 'DeviceCall', 'KernelLaunch', 'CommCallable'] +__all__ = [ + 'AsyncCall', + 'AsyncCallable', + 'CommCallable', + 'DeviceCall', + 'DeviceFunction', + 'ElementalCall', + 'ElementalFunction', + 'EntryFunction', + 'KernelLaunch', + 'ThreadCallable', + 'make_callable', + 'make_efunc', +] # ElementalFunction machinery diff --git a/devito/ir/iet/nodes.py b/devito/ir/iet/nodes.py index 3b979ddaf3..1f1fa8e5c6 100644 --- a/devito/ir/iet/nodes.py +++ b/devito/ir/iet/nodes.py @@ -23,14 +23,48 @@ Symbol) from devito.types.object import AbstractObject, LocalObject -__all__ = ['Node', 'MultiTraversable', 'Block', 'Expression', 'Callable', - 'Call', 'ExprStmt', 'Conditional', 'Iteration', 'List', 'Section', - 'TimedList', 'Prodder', 'MetaCall', 'PointerCast', 'HaloSpot', - 'Definition', 'ExpressionBundle', 'AugmentedExpression', 'Break', - 'Increment', 'Return', 'While', 'ListMajor', 'ParallelIteration', - 'ParallelBlock', 'Dereference', 'Lambda', 'SyncSpot', 'Pragma', - 'DummyExpr', 'BlankLine', 'ParallelTree', 'BusyWait', 'UsingNamespace', - 'Using', 'CallableBody', 'Transfer', 'EmptyList', 'Switch'] +__all__ = [ + 'AugmentedExpression', + 'BlankLine', + 'Block', + 'Break', + 'BusyWait', + 'Call', + 'Callable', + 'CallableBody', + 'Conditional', + 'Definition', + 'Dereference', + 'DummyExpr', + 'EmptyList', + 'ExprStmt', + 'Expression', + 'ExpressionBundle', + 'HaloSpot', + 'Increment', + 'Iteration', + 'Lambda', + 'List', + 'ListMajor', + 'MetaCall', + 'MultiTraversable', + 'Node', + 'ParallelBlock', + 'ParallelIteration', + 'ParallelTree', + 'PointerCast', + 'Pragma', + 'Prodder', + 'Return', + 'Section', + 'Switch', + 'SyncSpot', + 'TimedList', + 'Transfer', + 'Using', + 'UsingNamespace', + 'While', +] # First-class IET nodes @@ -446,8 +480,8 @@ def is_initializable(self): """ True if it can be an initializing assignment, False otherwise. """ - return (((self.is_scalar and not self.is_reduction) or - (self.is_tensor and isinstance(self.expr.rhs, ListInitializer)))) + return ((self.is_scalar and not self.is_reduction) or + (self.is_tensor and isinstance(self.expr.rhs, ListInitializer))) @property def defines(self): diff --git a/devito/ir/iet/utils.py b/devito/ir/iet/utils.py index 0ffe7c3d36..1f693a7299 100644 --- a/devito/ir/iet/utils.py +++ b/devito/ir/iet/utils.py @@ -5,8 +5,12 @@ from devito.tools import filter_ordered from devito.types import Global -__all__ = ['filter_iterations', 'retrieve_iteration_tree', 'derive_parameters', - 'maybe_alias'] +__all__ = [ + 'derive_parameters', + 'filter_iterations', + 'maybe_alias', + 'retrieve_iteration_tree', +] class IterationTree(tuple): diff --git a/devito/ir/iet/visitors.py b/devito/ir/iet/visitors.py index 8f6ae8f02f..111a31b8eb 100644 --- a/devito/ir/iet/visitors.py +++ b/devito/ir/iet/visitors.py @@ -29,10 +29,22 @@ IndexedData, DeviceMap) -__all__ = ['FindApplications', 'FindNodes', 'FindWithin', 'FindSections', - 'FindSymbols', 'MapExprStmts', 'MapHaloSpots', 'MapNodes', - 'IsPerfectIteration', 'printAST', 'CGen', 'CInterface', 'Transformer', - 'Uxreplace'] +__all__ = [ + 'CGen', + 'CInterface', + 'FindApplications', + 'FindNodes', + 'FindSections', + 'FindSymbols', + 'FindWithin', + 'IsPerfectIteration', + 'MapExprStmts', + 'MapHaloSpots', + 'MapNodes', + 'Transformer', + 'Uxreplace', + 'printAST', +] class Visitor(GenericVisitor): diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index a85b93460d..07e8094700 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -117,7 +117,7 @@ def stree_build(clusters, profiler=None, **kwargs): candidates = tuple(reversed(tip.ancestors[1:] + (tip,))) if not any(i.is_Iteration and i.dim.is_Time for i in candidates) and \ - not candidates[-1] is stree: + candidates[-1] is not stree: attach_section(candidates[-1]) continue diff --git a/devito/ir/stree/tree.py b/devito/ir/stree/tree.py index 252349d9e6..b04fb2942f 100644 --- a/devito/ir/stree/tree.py +++ b/devito/ir/stree/tree.py @@ -2,8 +2,15 @@ from devito.ir.support import WithLock, PrefetchUpdate -__all__ = ["ScheduleTree", "NodeSection", "NodeIteration", "NodeConditional", - "NodeSync", "NodeExprs", "NodeHalo"] +__all__ = [ + "NodeConditional", + "NodeExprs", + "NodeHalo", + "NodeIteration", + "NodeSection", + "NodeSync", + "ScheduleTree", +] class ScheduleTree(NodeMixin): diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index 102be7a006..c55b772c4a 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -1,7 +1,7 @@ from collections.abc import Iterable from itertools import chain, product from functools import cached_property -from typing import Callable +from collections.abc import Callable from sympy import S, Expr import sympy @@ -19,7 +19,7 @@ CriticalRegion, Function, Symbol, Temp, TempArray, TBArray) -__all__ = ['IterationInstance', 'TimedAccess', 'Scope', 'ExprGeometry'] +__all__ = ['ExprGeometry', 'IterationInstance', 'Scope', 'TimedAccess'] class IndexMode(Tag): diff --git a/devito/ir/support/guards.py b/devito/ir/support/guards.py index a014db8abb..4ccdfa89e9 100644 --- a/devito/ir/support/guards.py +++ b/devito/ir/support/guards.py @@ -17,9 +17,18 @@ from devito.tools import Pickable, as_tuple, frozendict, split from devito.types import Dimension, LocalObject -__all__ = ['GuardFactor', 'GuardBound', 'GuardBoundNext', 'BaseGuardBound', - 'BaseGuardBoundNext', 'GuardOverflow', 'Guards', 'GuardExpr', - 'GuardSwitch', 'GuardCaseSwitch'] +__all__ = [ + 'BaseGuardBound', + 'BaseGuardBoundNext', + 'GuardBound', + 'GuardBoundNext', + 'GuardCaseSwitch', + 'GuardExpr', + 'GuardFactor', + 'GuardOverflow', + 'GuardSwitch', + 'Guards', +] class AbstractGuard: @@ -445,9 +454,7 @@ def simplify_and(relation, v): covered = True try: - if type(a) in (Gt, Ge) and v.rhs > a.rhs: - new_args.append(v) - elif type(a) in (Lt, Le) and v.rhs < a.rhs: + if type(a) in (Gt, Ge) and v.rhs > a.rhs or type(a) in (Lt, Le) and v.rhs < a.rhs: new_args.append(v) else: new_args.append(a) diff --git a/devito/ir/support/space.py b/devito/ir/support/space.py index f43351001c..2e532b7c1c 100644 --- a/devito/ir/support/space.py +++ b/devito/ir/support/space.py @@ -12,9 +12,18 @@ toposort) from devito.types import Dimension, ModuloDimension -__all__ = ['NullInterval', 'Interval', 'IntervalGroup', 'IterationSpace', - 'IterationInterval', 'DataSpace', 'Forward', 'Backward', 'Any', - 'null_ispace'] +__all__ = [ + 'Any', + 'Backward', + 'DataSpace', + 'Forward', + 'Interval', + 'IntervalGroup', + 'IterationInterval', + 'IterationSpace', + 'NullInterval', + 'null_ispace', +] # The default Stamp, used by all new Intervals diff --git a/devito/ir/support/syncs.py b/devito/ir/support/syncs.py index d8dec1686e..44dc39cf67 100644 --- a/devito/ir/support/syncs.py +++ b/devito/ir/support/syncs.py @@ -9,8 +9,18 @@ from devito.tools import Pickable, as_tuple, filter_ordered, frozendict from .utils import IMask -__all__ = ['WaitLock', 'ReleaseLock', 'WithLock', 'InitArray', 'SyncArray', - 'PrefetchUpdate', 'SnapOut', 'SnapIn', 'Ops', 'normalize_syncs'] +__all__ = [ + 'InitArray', + 'Ops', + 'PrefetchUpdate', + 'ReleaseLock', + 'SnapIn', + 'SnapOut', + 'SyncArray', + 'WaitLock', + 'WithLock', + 'normalize_syncs', +] class SyncOp(Pickable): diff --git a/devito/ir/support/utils.py b/devito/ir/support/utils.py index 0e75619cbc..32dd3a9b88 100644 --- a/devito/ir/support/utils.py +++ b/devito/ir/support/utils.py @@ -8,9 +8,20 @@ from devito.types import (Dimension, DimensionTuple, Indirection, ModuloDimension, StencilDimension) -__all__ = ['AccessMode', 'Stencil', 'IMask', 'detect_accesses', 'detect_io', - 'pull_dims', 'unbounded', 'minimum', 'maximum', 'minmax_index', - 'extrema', 'erange'] +__all__ = [ + 'AccessMode', + 'IMask', + 'Stencil', + 'detect_accesses', + 'detect_io', + 'erange', + 'extrema', + 'maximum', + 'minimum', + 'minmax_index', + 'pull_dims', + 'unbounded', +] class AccessMode: diff --git a/devito/ir/support/vector.py b/devito/ir/support/vector.py index 1fa90379af..4aa74ba060 100644 --- a/devito/ir/support/vector.py +++ b/devito/ir/support/vector.py @@ -6,7 +6,7 @@ from devito.tools import as_tuple, is_integer, memoized_meth from devito.types import Dimension -__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax'] +__all__ = ['LabeledVector', 'Vector', 'vmax', 'vmin'] class Vector(tuple): @@ -253,9 +253,7 @@ def distance(self, other): """ try: # Handle quickly the special (yet relevant) cases `other == 0` - if is_integer(other) and other == 0: - return self - elif all(i == 0 for i in other) and self.rank == other.rank: + if is_integer(other) and other == 0 or all(i == 0 for i in other) and self.rank == other.rank: return self except TypeError: pass diff --git a/devito/logger.py b/devito/logger.py index f3390aef86..1efa6f1b9b 100644 --- a/devito/logger.py +++ b/devito/logger.py @@ -4,9 +4,20 @@ import sys from contextlib import contextmanager -__all__ = ('set_log_level', 'set_log_noperf', 'is_log_enabled_for', 'switch_log_level', - 'log', 'warning', 'error', 'perf', 'hint', - 'RED', 'GREEN', 'BLUE') +__all__ = ( + 'BLUE', + 'GREEN', + 'RED', + 'error', + 'hint', + 'is_log_enabled_for', + 'log', + 'perf', + 'set_log_level', + 'set_log_noperf', + 'switch_log_level', + 'warning', +) logger = logging.getLogger('Devito') diff --git a/devito/mpi/distributed.py b/devito/mpi/distributed.py index 431a8fbec1..3c251d0a22 100644 --- a/devito/mpi/distributed.py +++ b/devito/mpi/distributed.py @@ -64,8 +64,15 @@ def __getattr__(self, name): return None -__all__ = ['Distributor', 'SubDistributor', 'SparseDistributor', 'MPI', - 'CustomTopology', 'devito_mpi_init', 'devito_mpi_finalize'] +__all__ = [ + 'MPI', + 'CustomTopology', + 'Distributor', + 'SparseDistributor', + 'SubDistributor', + 'devito_mpi_finalize', + 'devito_mpi_init', +] def devito_mpi_init(): @@ -803,7 +810,7 @@ def __init__(self, comm=None): self.comm = comm def _arg_values(self, *args, **kwargs): - grid = kwargs.get('grid', None) + grid = kwargs.get('grid') # Update `comm` based on object attached to `grid` if grid is not None: return grid.distributor._obj_comm._arg_defaults() @@ -856,7 +863,7 @@ def _arg_defaults(self): return values def _arg_values(self, *args, **kwargs): - grid = kwargs.get('grid', None) + grid = kwargs.get('grid') # Update `nb` based on object attached to `grid` if grid is not None: return grid.distributor._obj_neighborhood._arg_defaults() diff --git a/devito/mpi/halo_scheme.py b/devito/mpi/halo_scheme.py index 9e7bfca40e..062b0a35b6 100644 --- a/devito/mpi/halo_scheme.py +++ b/devito/mpi/halo_scheme.py @@ -523,9 +523,7 @@ def classify(exprs, ispace): mapper = {} for f, r in scope.reads.items(): - if not f.is_DiscreteFunction: - continue - elif f.grid is None: + if not f.is_DiscreteFunction or f.grid is None: continue # In the case of custom topologies, we ignore the Dimensions that aren't diff --git a/devito/mpi/routines.py b/devito/mpi/routines.py index dcf04e8f44..c646951a12 100644 --- a/devito/mpi/routines.py +++ b/devito/mpi/routines.py @@ -1398,7 +1398,7 @@ def __init__(self, arguments, **kwargs): super().__init__('MPI_Allreduce', arguments, **kwargs) -class ReductionBuilder(object): +class ReductionBuilder: """ Build IET routines performing MPI reductions. diff --git a/devito/operations/solve.py b/devito/operations/solve.py index 0203dbe26d..a6d1a55910 100644 --- a/devito/operations/solve.py +++ b/devito/operations/solve.py @@ -7,7 +7,7 @@ from devito.finite_differences.derivative import Derivative from devito.tools import as_tuple -__all__ = ['solve', 'linsolve'] +__all__ = ['linsolve', 'solve'] class SolveError(Exception): diff --git a/devito/passes/clusters/asynchrony.py b/devito/passes/clusters/asynchrony.py index bfbe7a0bba..557ca7c5c2 100644 --- a/devito/passes/clusters/asynchrony.py +++ b/devito/passes/clusters/asynchrony.py @@ -9,7 +9,7 @@ from devito.tools import OrderedSet, is_integer, timed_pass from devito.types import CustomDimension, Lock -__all__ = ['tasking', 'memcpy_prefetch'] +__all__ = ['memcpy_prefetch', 'tasking'] def async_trigger(c, dims): diff --git a/devito/passes/clusters/misc.py b/devito/passes/clusters/misc.py index fff9f71e63..46bc8707da 100644 --- a/devito/passes/clusters/misc.py +++ b/devito/passes/clusters/misc.py @@ -10,7 +10,7 @@ from devito.tools import DAG, Stamp, as_tuple, flatten, frozendict, timed_pass from devito.types import Hyperplane -__all__ = ['Lift', 'fuse', 'optimize_pows', 'fission', 'optimize_hyperplanes'] +__all__ = ['Lift', 'fission', 'fuse', 'optimize_hyperplanes', 'optimize_pows'] class Lift(Queue): @@ -369,17 +369,8 @@ def is_cross(source, sink): # and forbid any sort of fusion. Fences have the same effect elif (any(scope.d_anti_gen()) or any(i.is_iaw for i in scope.d_output_gen()) or - any(c.is_fence for c in flatten(cgroups[n:n1+1]))): - dag.add_edge(cg0, cg1) - - # Any flow-dependences along an inner Dimension (i.e., a Dimension - # that doesn't appear in `prefix`) impose that `cg1` follows `cg0` - elif any(not (i.cause and i.cause & prefix) - for i in scope.d_flow_gen()): - dag.add_edge(cg0, cg1) - - # Clearly, output dependences must be honored - elif any(scope.d_output_gen()): + any(c.is_fence for c in flatten(cgroups[n:n1+1]))) or any(not (i.cause and i.cause & prefix) + for i in scope.d_flow_gen()) or any(scope.d_output_gen()): dag.add_edge(cg0, cg1) return dag diff --git a/devito/passes/clusters/utils.py b/devito/passes/clusters/utils.py index 7a48f3c486..ff9cc2d95e 100644 --- a/devito/passes/clusters/utils.py +++ b/devito/passes/clusters/utils.py @@ -2,7 +2,7 @@ from devito.tools import as_tuple from devito.types import CriticalRegion, Eq, Symbol -__all__ = ['is_memcpy', 'make_critical_sequence', 'in_critical_region'] +__all__ = ['in_critical_region', 'is_memcpy', 'make_critical_sequence'] def is_memcpy(expr): diff --git a/devito/passes/iet/languages/openacc.py b/devito/passes/iet/languages/openacc.py index a6c33992ad..706b45bb17 100644 --- a/devito/passes/iet/languages/openacc.py +++ b/devito/passes/iet/languages/openacc.py @@ -15,7 +15,7 @@ from devito.tools import filter_ordered, UnboundTuple from devito.types import Symbol -__all__ = ['DeviceAccizer', 'DeviceAccDataManager', 'AccOrchestrator'] +__all__ = ['AccOrchestrator', 'DeviceAccDataManager', 'DeviceAccizer'] class DeviceAccIteration(PragmaIteration): diff --git a/devito/passes/iet/languages/openmp.py b/devito/passes/iet/languages/openmp.py index bdbee7fe3e..c12de9a617 100644 --- a/devito/passes/iet/languages/openmp.py +++ b/devito/passes/iet/languages/openmp.py @@ -20,10 +20,20 @@ from devito.symbolics import CondEq, DefFunction from devito.tools import filter_ordered -__all__ = ['SimdOmpizer', 'Ompizer', 'OmpIteration', 'OmpRegion', - 'DeviceOmpizer', 'DeviceOmpIteration', 'DeviceOmpDataManager', - 'OmpDataManager', 'OmpOrchestrator', 'DeviceOmpOrchestrator', - 'CXXOmpDataManager', 'CXXOmpOrchestrator'] +__all__ = [ + 'CXXOmpDataManager', + 'CXXOmpOrchestrator', + 'DeviceOmpDataManager', + 'DeviceOmpIteration', + 'DeviceOmpOrchestrator', + 'DeviceOmpizer', + 'OmpDataManager', + 'OmpIteration', + 'OmpOrchestrator', + 'OmpRegion', + 'Ompizer', + 'SimdOmpizer', +] class OmpRegion(ParallelBlock): diff --git a/devito/passes/iet/languages/targets.py b/devito/passes/iet/languages/targets.py index 09fd05b661..2993a5b3e6 100644 --- a/devito/passes/iet/languages/targets.py +++ b/devito/passes/iet/languages/targets.py @@ -9,8 +9,16 @@ AccOrchestrator, AccPrinter) from devito.passes.iet.instrument import instrument -__all__ = ['CTarget', 'OmpTarget', 'COmpTarget', 'DeviceOmpTarget', 'DeviceAccTarget', - 'CXXTarget', 'CXXOmpTarget', 'DeviceCXXOmpTarget'] +__all__ = [ + 'COmpTarget', + 'CTarget', + 'CXXOmpTarget', + 'CXXTarget', + 'DeviceAccTarget', + 'DeviceCXXOmpTarget', + 'DeviceOmpTarget', + 'OmpTarget', +] class Target: diff --git a/devito/passes/iet/languages/utils.py b/devito/passes/iet/languages/utils.py index b37e6364ca..e22e345791 100644 --- a/devito/passes/iet/languages/utils.py +++ b/devito/passes/iet/languages/utils.py @@ -4,7 +4,7 @@ from devito.exceptions import InvalidOperator from devito.ir import List -__all__ = ['joins', '_atomic_add_split'] +__all__ = ['_atomic_add_split', 'joins'] def joins(*symbols): diff --git a/devito/passes/iet/misc.py b/devito/passes/iet/misc.py index 000763332e..dd1bf2814b 100644 --- a/devito/passes/iet/misc.py +++ b/devito/passes/iet/misc.py @@ -17,8 +17,13 @@ from devito.tools import Bunch, as_mapper, filter_ordered, split, as_tuple from devito.types import FIndexed -__all__ = ['avoid_denormals', 'hoist_prodders', 'relax_incr_dimensions', - 'generate_macros', 'minimize_symbols'] +__all__ = [ + 'avoid_denormals', + 'generate_macros', + 'hoist_prodders', + 'minimize_symbols', + 'relax_incr_dimensions', +] @iet_pass diff --git a/devito/passes/iet/parpragma.py b/devito/passes/iet/parpragma.py index c8443ceb79..880d819e2e 100644 --- a/devito/passes/iet/parpragma.py +++ b/devito/passes/iet/parpragma.py @@ -18,8 +18,13 @@ from devito.tools import as_tuple, flatten, is_integer, prod from devito.types import Symbol -__all__ = ['PragmaSimdTransformer', 'PragmaShmTransformer', - 'PragmaDeviceAwareTransformer', 'PragmaLangBB', 'PragmaTransfer'] +__all__ = [ + 'PragmaDeviceAwareTransformer', + 'PragmaLangBB', + 'PragmaShmTransformer', + 'PragmaSimdTransformer', + 'PragmaTransfer', +] class PragmaTransformer(LangTransformer): diff --git a/devito/symbolics/manipulation.py b/devito/symbolics/manipulation.py index e43494974d..70a4f49cd0 100644 --- a/devito/symbolics/manipulation.py +++ b/devito/symbolics/manipulation.py @@ -25,10 +25,21 @@ from devito.types.equation import Eq from devito.types.relational import Le, Lt, Gt, Ge -__all__ = ['xreplace_indices', 'pow_to_mul', 'indexify', 'subs_op_args', - 'normalize_args', 'uxreplace', 'Uxmapper', 'subs_if_composite', - 'reuse_if_untouched', 'evalrel', 'flatten_args', 'unevaluate', - 'as_long'] +__all__ = [ + 'Uxmapper', + 'as_long', + 'evalrel', + 'flatten_args', + 'indexify', + 'normalize_args', + 'pow_to_mul', + 'reuse_if_untouched', + 'subs_if_composite', + 'subs_op_args', + 'unevaluate', + 'uxreplace', + 'xreplace_indices', +] def uxreplace(expr, rule): diff --git a/devito/symbolics/queries.py b/devito/symbolics/queries.py index 2496a0aeb9..dec4728254 100644 --- a/devito/symbolics/queries.py +++ b/devito/symbolics/queries.py @@ -10,10 +10,26 @@ from devito.types.object import AbstractObject -__all__ = ['q_leaf', 'q_indexed', 'q_terminal', 'q_function', 'q_routine', - 'q_terminalop', 'q_indirect', 'q_constant', 'q_affine', 'q_linear', - 'q_identity', 'q_symbol', 'q_comp_acc', 'q_multivar', 'q_monoaffine', - 'q_dimension', 'q_positive', 'q_negative'] +__all__ = [ + 'q_affine', + 'q_comp_acc', + 'q_constant', + 'q_dimension', + 'q_function', + 'q_identity', + 'q_indexed', + 'q_indirect', + 'q_leaf', + 'q_linear', + 'q_monoaffine', + 'q_multivar', + 'q_negative', + 'q_positive', + 'q_routine', + 'q_symbol', + 'q_terminal', + 'q_terminalop', +] # The following SymPy objects are considered tree leaves: diff --git a/devito/symbolics/search.py b/devito/symbolics/search.py index 9c801470ac..94072520ef 100644 --- a/devito/symbolics/search.py +++ b/devito/symbolics/search.py @@ -9,9 +9,16 @@ q_symbol, q_dimension, q_derivative) from devito.tools import as_tuple -__all__ = ['retrieve_indexed', 'retrieve_functions', 'retrieve_function_carriers', - 'retrieve_terminals', 'retrieve_symbols', 'retrieve_dimensions', - 'retrieve_derivatives', 'search'] +__all__ = [ + 'retrieve_derivatives', + 'retrieve_dimensions', + 'retrieve_function_carriers', + 'retrieve_functions', + 'retrieve_indexed', + 'retrieve_symbols', + 'retrieve_terminals', + 'search', +] Expression = sympy.Basic | np.number | int | float diff --git a/devito/tools/abc.py b/devito/tools/abc.py index 162b3287d3..4fe295ee07 100644 --- a/devito/tools/abc.py +++ b/devito/tools/abc.py @@ -2,7 +2,7 @@ from hashlib import sha1 -__all__ = ['Tag', 'Signer', 'Reconstructable', 'Pickable', 'Singleton', 'Stamp'] +__all__ = ['Pickable', 'Reconstructable', 'Signer', 'Singleton', 'Stamp', 'Tag'] class Tag(abc.ABC): diff --git a/devito/tools/data_structures.py b/devito/tools/data_structures.py index bf411900cd..1e7e46ad54 100644 --- a/devito/tools/data_structures.py +++ b/devito/tools/data_structures.py @@ -10,9 +10,19 @@ from devito.tools.utils import as_tuple, filter_ordered, humanbytes from devito.tools.algorithms import toposort -__all__ = ['Bunch', 'EnrichedTuple', 'ReducerMap', 'DefaultOrderedDict', - 'OrderedSet', 'Ordering', 'DAG', 'frozendict', - 'UnboundTuple', 'UnboundedMultiTuple', 'MemoryEstimate'] +__all__ = [ + 'DAG', + 'Bunch', + 'DefaultOrderedDict', + 'EnrichedTuple', + 'MemoryEstimate', + 'OrderedSet', + 'Ordering', + 'ReducerMap', + 'UnboundTuple', + 'UnboundedMultiTuple', + 'frozendict', +] class Bunch: diff --git a/devito/tools/memoization.py b/devito/tools/memoization.py index b0733dac56..b24a9166f6 100644 --- a/devito/tools/memoization.py +++ b/devito/tools/memoization.py @@ -3,7 +3,7 @@ from itertools import tee from typing import TypeVar -__all__ = ['memoized_func', 'memoized_meth', 'memoized_generator', 'CacheInstances'] +__all__ = ['CacheInstances', 'memoized_func', 'memoized_generator', 'memoized_meth'] class memoized_func: diff --git a/devito/tools/threading.py b/devito/tools/threading.py index 6a0beb59f2..5e661644c7 100644 --- a/devito/tools/threading.py +++ b/devito/tools/threading.py @@ -1,6 +1,6 @@ import threading -__all__ = ['sympy_mutex', 'safe_dict_copy'] +__all__ = ['safe_dict_copy', 'sympy_mutex'] sympy_mutex = threading.RLock() diff --git a/devito/tools/utils.py b/devito/tools/utils.py index 546c5cd49f..51adefd158 100644 --- a/devito/tools/utils.py +++ b/devito/tools/utils.py @@ -8,12 +8,36 @@ import numpy as np import sympy -__all__ = ['prod', 'as_tuple', 'is_integer', 'generator', 'grouper', 'split', - 'roundm', 'powerset', 'invert', 'flatten', 'single_or', 'filter_ordered', - 'as_mapper', 'filter_sorted', 'pprint', 'sweep', 'all_equal', 'as_list', - 'indices_to_slices', 'indices_to_sections', 'transitive_closure', - 'humanbytes', 'contains_val', 'sorted_priority', 'as_set', 'is_number', - 'smart_lt', 'smart_gt'] +__all__ = [ + 'all_equal', + 'as_list', + 'as_mapper', + 'as_set', + 'as_tuple', + 'contains_val', + 'filter_ordered', + 'filter_sorted', + 'flatten', + 'generator', + 'grouper', + 'humanbytes', + 'indices_to_sections', + 'indices_to_slices', + 'invert', + 'is_integer', + 'is_number', + 'powerset', + 'pprint', + 'prod', + 'roundm', + 'single_or', + 'smart_gt', + 'smart_lt', + 'sorted_priority', + 'split', + 'sweep', + 'transitive_closure', +] def prod(iterable, initial=1): diff --git a/devito/types/array.py b/devito/types/array.py index cb1815f644..845eef1976 100644 --- a/devito/types/array.py +++ b/devito/types/array.py @@ -9,8 +9,16 @@ from devito.types.basic import AbstractFunction, LocalType from devito.types.utils import CtypesFactory, DimensionTuple -__all__ = ['Array', 'ArrayMapped', 'ArrayObject', 'PointerArray', 'Bundle', - 'ComponentAccess', 'Bag', 'BundleView'] +__all__ = [ + 'Array', + 'ArrayMapped', + 'ArrayObject', + 'Bag', + 'Bundle', + 'BundleView', + 'ComponentAccess', + 'PointerArray', +] class ArrayBasic(AbstractFunction, LocalType): diff --git a/devito/types/basic.py b/devito/types/basic.py index db455e8924..b49d030031 100644 --- a/devito/types/basic.py +++ b/devito/types/basic.py @@ -19,8 +19,14 @@ from devito.types.lazy import Evaluable from devito.types.utils import DimensionTuple, Offset, Size -__all__ = ['Symbol', 'Scalar', 'Indexed', 'IndexedData', 'DeviceMap', - 'IrregularFunctionInterface'] +__all__ = [ + 'DeviceMap', + 'Indexed', + 'IndexedData', + 'IrregularFunctionInterface', + 'Scalar', + 'Symbol', +] class CodeSymbol: @@ -1587,7 +1593,7 @@ def flat(self): return self._mat def __init_finalize__(self, *args, **kwargs): - self._name = kwargs.get('name', None) + self._name = kwargs.get('name') __hash__ = sympy.ImmutableDenseMatrix.__hash__ diff --git a/devito/types/caching.py b/devito/types/caching.py index 948ae09e88..a1e359bd4d 100644 --- a/devito/types/caching.py +++ b/devito/types/caching.py @@ -7,7 +7,7 @@ from devito.tools import safe_dict_copy -__all__ = ['Cached', 'Uncached', '_SymbolCache', 'CacheManager'] +__all__ = ['CacheManager', 'Cached', 'Uncached', '_SymbolCache'] _SymbolCache = {} """The symbol cache.""" diff --git a/devito/types/dense.py b/devito/types/dense.py index f36d4ba98f..eef4682c65 100644 --- a/devito/types/dense.py +++ b/devito/types/dense.py @@ -30,7 +30,7 @@ Buffer, DimensionTuple, NODE, CELL, Size, Staggering, host_layer ) -__all__ = ['Function', 'TimeFunction', 'SubFunction', 'TempFunction'] +__all__ = ['Function', 'SubFunction', 'TempFunction', 'TimeFunction'] RegionMeta = namedtuple('RegionMeta', 'offset size') @@ -341,12 +341,9 @@ def _size_outhalo(self): if self._distributor.is_parallel and (any(left) or any(right)): try: - warning_msg = """A space order of {0} and a halo size of {1} has been - set but the current rank ({2}) has a domain size of - only {3}""".format(self._space_order, - max(self._size_inhalo), - self._distributor.myrank, - min(self.grid.shape_local)) + warning_msg = f"""A space order of {self._space_order} and a halo size of {max(self._size_inhalo)} has been + set but the current rank ({self._distributor.myrank}) has a domain size of + only {min(self.grid.shape_local)}""" if not self._distributor.is_boundary_rank: warning(warning_msg) else: diff --git a/devito/types/dimension.py b/devito/types/dimension.py index 6e000349e9..d83efece3c 100644 --- a/devito/types/dimension.py +++ b/devito/types/dimension.py @@ -17,11 +17,24 @@ from devito.types.relational import relational_min, relational_max -__all__ = ['Dimension', 'SpaceDimension', 'TimeDimension', 'DefaultDimension', - 'CustomDimension', 'SteppingDimension', 'SubDimension', - 'MultiSubDimension', 'ConditionalDimension', 'ModuloDimension', - 'IncrDimension', 'BlockDimension', 'StencilDimension', - 'VirtualDimension', 'Spacing', 'dimensions'] +__all__ = [ + 'BlockDimension', + 'ConditionalDimension', + 'CustomDimension', + 'DefaultDimension', + 'Dimension', + 'IncrDimension', + 'ModuloDimension', + 'MultiSubDimension', + 'SpaceDimension', + 'Spacing', + 'StencilDimension', + 'SteppingDimension', + 'SubDimension', + 'TimeDimension', + 'VirtualDimension', + 'dimensions', +] SubDimensionThickness = namedtuple('SubDimensionThickness', 'left right') diff --git a/devito/types/grid.py b/devito/types/grid.py index 6107799ce0..005d92dded 100644 --- a/devito/types/grid.py +++ b/devito/types/grid.py @@ -20,7 +20,7 @@ MultiSubDimension, DefaultDimension) from devito.deprecations import deprecations -__all__ = ['Grid', 'SubDomain', 'SubDomainSet', 'Border'] +__all__ = ['Border', 'Grid', 'SubDomain', 'SubDomainSet'] GlobalLocal = namedtuple('GlobalLocal', 'glb loc') @@ -621,7 +621,7 @@ def __subdomain_finalize_legacy__(self, grid): except ValueError: side, thickness = v constructor = {'left': SubDimension.left, - 'right': SubDimension.right}.get(side, None) + 'right': SubDimension.right}.get(side) if constructor is None: raise ValueError(f"Expected sides 'left|right', not `{side}`") @@ -820,7 +820,7 @@ class SubDomainSet(MultiSubDomain): def __init__(self, **kwargs): self._n_domains = kwargs.get('N', 1) - self._global_bounds = kwargs.get('bounds', None) + self._global_bounds = kwargs.get('bounds') super().__init__(**kwargs) try: diff --git a/devito/types/misc.py b/devito/types/misc.py index 8cdad91b07..599d606b60 100644 --- a/devito/types/misc.py +++ b/devito/types/misc.py @@ -12,10 +12,27 @@ from devito.types.basic import IndexedData from devito.tools import CustomDtype, Pickable, as_tuple, frozendict -__all__ = ['Timer', 'Pointer', 'VolatileInt', 'FIndexed', 'Wildcard', 'Fence', - 'Global', 'Hyperplane', 'Indirection', 'Temp', 'TempArray', 'Jump', - 'nop', 'WeakFence', 'CriticalRegion', 'Auto', 'AutoRef', 'auto', - 'size_t'] +__all__ = [ + 'Auto', + 'AutoRef', + 'CriticalRegion', + 'FIndexed', + 'Fence', + 'Global', + 'Hyperplane', + 'Indirection', + 'Jump', + 'Pointer', + 'Temp', + 'TempArray', + 'Timer', + 'VolatileInt', + 'WeakFence', + 'Wildcard', + 'auto', + 'nop', + 'size_t', +] class Timer(CompositeObject): diff --git a/devito/types/object.py b/devito/types/object.py index 637e19dea0..05fb6e1069 100644 --- a/devito/types/object.py +++ b/devito/types/object.py @@ -8,7 +8,7 @@ from devito.types.basic import Basic, LocalType from devito.types.utils import CtypesFactory -__all__ = ['Object', 'LocalObject', 'CompositeObject'] +__all__ = ['CompositeObject', 'LocalObject', 'Object'] class AbstractObject(Basic, sympy.Basic, Pickable): diff --git a/devito/types/parallel.py b/devito/types/parallel.py index 4383e6c208..89ebaab520 100644 --- a/devito/types/parallel.py +++ b/devito/types/parallel.py @@ -20,10 +20,26 @@ from devito.types.dimension import CustomDimension from devito.types.misc import Fence, VolatileInt -__all__ = ['NThreads', 'NThreadsNested', 'NThreadsNonaffine', 'NThreadsBase', - 'DeviceID', 'ThreadID', 'Lock', 'ThreadArray', 'PThreadArray', - 'SharedData', 'NPThreads', 'DeviceRM', 'QueueID', 'Barrier', 'TBArray', - 'ThreadPoolSync', 'ThreadCommit', 'ThreadWait'] +__all__ = [ + 'Barrier', + 'DeviceID', + 'DeviceRM', + 'Lock', + 'NPThreads', + 'NThreads', + 'NThreadsBase', + 'NThreadsNested', + 'NThreadsNonaffine', + 'PThreadArray', + 'QueueID', + 'SharedData', + 'TBArray', + 'ThreadArray', + 'ThreadCommit', + 'ThreadID', + 'ThreadPoolSync', + 'ThreadWait', +] class NThreadsAbstract(Scalar): diff --git a/devito/types/relational.py b/devito/types/relational.py index 47d9768c54..731ec29bc7 100644 --- a/devito/types/relational.py +++ b/devito/types/relational.py @@ -3,7 +3,7 @@ import sympy -__all__ = ['Le', 'Lt', 'Ge', 'Gt', 'Ne', 'relational_min', 'relational_max'] +__all__ = ['Ge', 'Gt', 'Le', 'Lt', 'Ne', 'relational_max', 'relational_min'] class AbstractRel: diff --git a/devito/types/sparse.py b/devito/types/sparse.py index afedbaaee3..eaee37528a 100644 --- a/devito/types/sparse.py +++ b/devito/types/sparse.py @@ -21,8 +21,13 @@ from devito.types.utils import IgnoreDimSort -__all__ = ['SparseFunction', 'SparseTimeFunction', 'PrecomputedSparseFunction', - 'PrecomputedSparseTimeFunction', 'MatrixSparseTimeFunction'] +__all__ = [ + 'MatrixSparseTimeFunction', + 'PrecomputedSparseFunction', + 'PrecomputedSparseTimeFunction', + 'SparseFunction', + 'SparseTimeFunction', +] _interpolators = {'linear': LinearInterpolator, 'sinc': SincInterpolator} diff --git a/devito/types/tensor.py b/devito/types/tensor.py index e83679d11d..f8bc533a3c 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -114,7 +114,7 @@ def __subfunc_setup__(cls, *args, **kwargs): raise TypeError("Need either `grid` or `dimensions`") else: dims = grid.dimensions - stagg = kwargs.get("staggered", None) + stagg = kwargs.get("staggered") name = kwargs.get("name") symm = kwargs.get('symmetric', True) diag = kwargs.get('diagonal', False) @@ -347,7 +347,7 @@ def __subfunc_setup__(cls, *args, **kwargs): raise TypeError("Need either `grid` or `dimensions`") else: dims = grid.dimensions - stagg = kwargs.get("staggered", None) + stagg = kwargs.get("staggered") name = kwargs.get("name") for i, d in enumerate(dims): sub_kwargs = cls._component_kwargs(i, **kwargs) diff --git a/devito/types/utils.py b/devito/types/utils.py index 53b0d0b928..d8a7877cf1 100644 --- a/devito/types/utils.py +++ b/devito/types/utils.py @@ -5,9 +5,22 @@ from devito.tools import EnrichedTuple, Tag # Additional Function-related APIs -__all__ = ['Buffer', 'DimensionTuple', 'NODE', 'CELL', 'Size', 'Offset', - 'IgnoreDimSort', 'HierarchyLayer', 'HostLayer', 'DeviceLayer', - 'DiskLayer', 'host_layer', 'device_layer', 'disk_layer'] +__all__ = [ + 'CELL', + 'NODE', + 'Buffer', + 'DeviceLayer', + 'DimensionTuple', + 'DiskLayer', + 'HierarchyLayer', + 'HostLayer', + 'IgnoreDimSort', + 'Offset', + 'Size', + 'device_layer', + 'disk_layer', + 'host_layer', +] class Buffer(Tag): diff --git a/examples/cfd/01_convection.ipynb b/examples/cfd/01_convection.ipynb index 8cdbd9dc60..7a7fe3b6c6 100644 --- a/examples/cfd/01_convection.ipynb +++ b/examples/cfd/01_convection.ipynb @@ -112,17 +112,17 @@ "for n in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", - " \n", + "\n", " # Update the new result with a 3-point stencil\n", " u[1:, 1:] = (un[1:, 1:] - (c * dt / dy * (un[1:, 1:] - un[1:, :-1])) -\n", " (c * dt / dx * (un[1:, 1:] - un[:-1, 1:])))\n", "\n", - " # Apply boundary conditions. \n", + " # Apply boundary conditions.\n", " u[0, :] = 1. # left\n", " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", " u[:, -1] = 1. # top\n", - " # Note that in the above expressions the NumPy index -1 corresponds to the final point of the array along the indexed dimension, \n", + " # Note that in the above expressions the NumPy index -1 corresponds to the final point of the array along the indexed dimension,\n", " # i.e. here u[-1, :] is equivalent to u[80, :].\n" ] }, diff --git a/examples/cfd/01_convection_revisited.ipynb b/examples/cfd/01_convection_revisited.ipynb index 1b734099e7..e7be39a448 100644 --- a/examples/cfd/01_convection_revisited.ipynb +++ b/examples/cfd/01_convection_revisited.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "from examples.cfd import plot_field, init_hat, init_smooth\n", + "from examples.cfd import plot_field, init_smooth\n", "import numpy as np\n", "%matplotlib inline\n", "\n", @@ -106,12 +106,12 @@ "for n in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", - " \n", + "\n", " # Update the new result with a 3-point stencil\n", " u[1:, 1:] = (un[1:, 1:] - (c * dt / dy * (un[1:, 1:] - un[1:, :-1])) -\n", " (c * dt / dx * (un[1:, 1:] - un[:-1, 1:])))\n", - " \n", - " # Apply boundary conditions. \n", + "\n", + " # Apply boundary conditions.\n", " # Note: -1 here is the last index in the array, not the one at x=-1 or y=-1.\n", " u[0, :] = 1. # left\n", " u[-1, :] = 1. # right\n", @@ -238,7 +238,7 @@ } ], "source": [ - "from devito import solve \n", + "from devito import solve\n", "from sympy import nsimplify, pprint\n", "\n", "stencil = solve(eq, u.forward)\n", @@ -287,13 +287,13 @@ "init_smooth(field=u.data[1], dx=dx, dy=dy)\n", "\n", "# Apply boundary conditions.\n", - "# Note that as the u.data method is from numpy, we can use the \n", + "# Note that as the u.data method is from numpy, we can use the\n", "# -1 syntax to represent the last item in the array.\n", "u.data[:, 0, :] = 1.\n", "u.data[:, -1, :] = 1.\n", "u.data[:, :, 0] = 1.\n", "u.data[:, :, -1] = 1.\n", - " \n", + "\n", "# Create an Operator that updates the forward stencil\n", "# point in the interior subdomain only.\n", "op = Operator(Eq(u.forward, stencil, subdomain=grid.interior))\n", @@ -346,8 +346,8 @@ "init_smooth(field=u.data[1], dx=dx, dy=dy)\n", "\n", "# For defining BCs, we generally to explicitly set rows/columns\n", - "# in our field using an expression. We can use Devito's \"indexed\" \n", - "# notation to do this. A u in this instance is a sympy function \n", + "# in our field using an expression. We can use Devito's \"indexed\"\n", + "# notation to do this. A u in this instance is a sympy function\n", "# we cannot use the numpy shortcut u[-1] to refer to the last location:\n", "x, y = grid.dimensions\n", "t = grid.stepping_dim\n", diff --git a/examples/cfd/02_convection_nonlinear.ipynb b/examples/cfd/02_convection_nonlinear.ipynb index e77326274f..bd3559d268 100644 --- a/examples/cfd/02_convection_nonlinear.ipynb +++ b/examples/cfd/02_convection_nonlinear.ipynb @@ -36,7 +36,6 @@ "source": [ "from examples.cfd import plot_field, init_hat\n", "import numpy as np\n", - "import sympy\n", "%matplotlib inline\n", "\n", "# Some variable declarations\n", @@ -114,23 +113,23 @@ "for n in range(nt + 1): ##loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", - " u[1:, 1:] = (un[1:, 1:] - \n", + " u[1:, 1:] = (un[1:, 1:] -\n", " (un[1:, 1:] * c * dt / dy * (un[1:, 1:] - un[1:, :-1])) -\n", " vn[1:, 1:] * c * dt / dx * (un[1:, 1:] - un[:-1, 1:]))\n", " v[1:, 1:] = (vn[1:, 1:] -\n", " (un[1:, 1:] * c * dt / dy * (vn[1:, 1:] - vn[1:, :-1])) -\n", " vn[1:, 1:] * c * dt / dx * (vn[1:, 1:] - vn[:-1, 1:]))\n", - " \n", + "\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", " u[:, -1] = 1\n", - " \n", + "\n", " v[0, :] = 1\n", " v[-1, :] = 1\n", " v[:, 0] = 1\n", " v[:, -1] = 1\n", - " \n", + "\n", "plot_field(u)" ] }, diff --git a/examples/cfd/03_diffusion.ipynb b/examples/cfd/03_diffusion.ipynb index 2861815528..0fbd64c580 100644 --- a/examples/cfd/03_diffusion.ipynb +++ b/examples/cfd/03_diffusion.ipynb @@ -57,9 +57,9 @@ "outputs": [], "source": [ "def diffuse(u, nt):\n", - " for n in range(nt + 1): \n", + " for n in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] + \n", + " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", " nu * dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", " nu * dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", @@ -139,7 +139,7 @@ "u = np.empty((nx, ny))\n", "init_hat(field=u, dx=dx, dy=dy, value=1)\n", "\n", - "# Field initialization. \n", + "# Field initialization.\n", "# This will create 4 equally spaced 10x10 hat functions of various values.\n", "u[ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", "u[ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", @@ -187,7 +187,6 @@ "source": [ "from devito import Grid, TimeFunction, Eq, solve\n", "from sympy.abc import a\n", - "from sympy import nsimplify\n", "\n", "# Initialize `u` for space order 2\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", diff --git a/examples/cfd/03_diffusion_nonuniform.ipynb b/examples/cfd/03_diffusion_nonuniform.ipynb index 5a6d5337fd..e42a2c6a26 100644 --- a/examples/cfd/03_diffusion_nonuniform.ipynb +++ b/examples/cfd/03_diffusion_nonuniform.ipynb @@ -44,8 +44,8 @@ "\n", "visc = np.full((nx, ny), nu) # Initialize viscosity\n", "visc[nx//4-offset:nx//4+offset, 1:-1] = 0.0001 # Adding a material with different viscosity\n", - "visc[1:-1,nx//4-offset:nx//4+offset ] = 0.0001 \n", - "visc[3*nx//4-offset:3*nx//4+offset, 1:-1] = 0.0001 \n", + "visc[1:-1,nx//4-offset:nx//4+offset ] = 0.0001\n", + "visc[3*nx//4-offset:3*nx//4+offset, 1:-1] = 0.0001\n", "\n", "visc_nb = visc[1:-1,1:-1]\n", "\n", @@ -80,9 +80,9 @@ "outputs": [], "source": [ "def diffuse(u, nt ,visc):\n", - " for n in range(nt + 1): \n", + " for n in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] + \n", + " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", " visc*dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", " visc*dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", @@ -341,7 +341,7 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "from devito import Operator, Constant, Eq, solve, Function\n", + "from devito import Operator, Eq, solve, Function\n", "\n", "\n", "# Reset our data field and ICs\n", diff --git a/examples/cfd/04_burgers.ipynb b/examples/cfd/04_burgers.ipynb index e2a70610e7..1ad8811331 100644 --- a/examples/cfd/04_burgers.ipynb +++ b/examples/cfd/04_burgers.ipynb @@ -200,36 +200,36 @@ " vn = v.copy()\n", "\n", " u[1:-1, 1:-1] = (un[1:-1, 1:-1] -\n", - " dt / dy * un[1:-1, 1:-1] * \n", - " (un[1:-1, 1:-1] - un[1:-1, 0:-2]) - \n", - " dt / dx * vn[1:-1, 1:-1] * \n", - " (un[1:-1, 1:-1] - un[0:-2, 1:-1]) + \n", - " nu * dt / dy**2 * \n", - " (un[1:-1,2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) + \n", - " nu * dt / dx**2 * \n", + " dt / dy * un[1:-1, 1:-1] *\n", + " (un[1:-1, 1:-1] - un[1:-1, 0:-2]) -\n", + " dt / dx * vn[1:-1, 1:-1] *\n", + " (un[1:-1, 1:-1] - un[0:-2, 1:-1]) +\n", + " nu * dt / dy**2 *\n", + " (un[1:-1,2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", + " nu * dt / dx**2 *\n", " (un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", - " \n", - " v[1:-1, 1:-1] = (vn[1:-1, 1:-1] - \n", + "\n", + " v[1:-1, 1:-1] = (vn[1:-1, 1:-1] -\n", " dt / dy * un[1:-1, 1:-1] *\n", " (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -\n", - " dt / dx * vn[1:-1, 1:-1] * \n", - " (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) + \n", - " nu * dt / dy**2 * \n", + " dt / dx * vn[1:-1, 1:-1] *\n", + " (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) +\n", + " nu * dt / dy**2 *\n", " (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +\n", " nu * dt / dx**2 *\n", " (vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]))\n", - " \n", + "\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", " u[:, -1] = 1\n", - " \n", + "\n", " v[0, :] = 1\n", " v[-1, :] = 1\n", " v[:, 0] = 1\n", " v[:, -1] = 1\n", - " \n", - " \n", + "\n", + "\n", " # A figure of the wave state will be produced for each batch\n", " if (n%batch_size) == 0:\n", " print (\"Batch:\",n/(batch_size))\n", @@ -486,9 +486,9 @@ } ], "source": [ - "from devito import VectorTimeFunction, grad, div, NODE\n", + "from devito import VectorTimeFunction, grad\n", "x, y = grid.dimensions\n", - "# Reinitialise \n", + "# Reinitialise\n", "U = VectorTimeFunction(name='U', grid=grid, space_order=2)\n", "init_hat(field=U[0].data[0], dx=dx, dy=dy, value=2.)\n", "init_hat(field=U[1].data[0], dx=dx, dy=dy, value=2.)\n", diff --git a/examples/cfd/05_laplace.ipynb b/examples/cfd/05_laplace.ipynb index 9700c65c80..74ee48608e 100644 --- a/examples/cfd/05_laplace.ipynb +++ b/examples/cfd/05_laplace.ipynb @@ -65,14 +65,14 @@ " p[1:-1, 1:-1] = ((dx**2 * (pn[2:, 1:-1] + pn[0:-2, 1:-1]) +\n", " dy**2 * (pn[1:-1, 2:] + pn[1:-1, 0:-2])) /\n", " (2 * (dx**2 + dy**2)))\n", - " \n", + "\n", " p[0, :] = 0 # p = 0 @ x = 0\n", " p[-1, :] = bc_y # p = y @ x = 2\n", " p[:, 0] = p[:, 1] # dp/dy = 0 @ y = 0\n", " p[:, -1] = p[:, -2] # dp/dy = 0 @ y = 1\n", " l1norm = (np.sum(np.abs(p[:]) - np.abs(pn[:])) /\n", " np.sum(np.abs(pn[:])))\n", - " \n", + "\n", " return p" ] }, @@ -294,7 +294,7 @@ " # This call implies a deep data copy\n", " pn.data[:] = p.data[:]\n", " op(p=p, pn=pn)\n", - " \n", + "\n", " l1norm = (np.sum(np.abs(p.data[:]) - np.abs(pn.data[:])) /\n", " np.sum(np.abs(pn.data[:])))\n", "\n", @@ -371,12 +371,12 @@ "\n", " # Apply operator\n", " op(p=_p, pn=_pn)\n", - " \n", + "\n", " # Compute L1 norm\n", " l1norm = (np.sum(np.abs(_p.data[:]) - np.abs(_pn.data[:])) /\n", " np.sum(np.abs(_pn.data[:])))\n", " counter += 1\n", - " \n", + "\n", "plot_field(p.data, ymax=1.0, view=(30, 225))" ] } diff --git a/examples/cfd/06_poisson.ipynb b/examples/cfd/06_poisson.ipynb index a3accb5865..8fd2e7875e 100644 --- a/examples/cfd/06_poisson.ipynb +++ b/examples/cfd/06_poisson.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from examples.cfd import plot_field, init_hat\n", + "from examples.cfd import plot_field\n", "import numpy as np\n", "%matplotlib inline\n", "\n", @@ -86,14 +86,14 @@ " pd = p.copy()\n", " p[1:-1,1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", " (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -\n", - " b[1:-1, 1:-1] * dx**2 * dy**2) / \n", + " b[1:-1, 1:-1] * dx**2 * dy**2) /\n", " (2 * (dx**2 + dy**2)))\n", "\n", " p[0, :] = 0\n", " p[nx-1, :] = 0\n", " p[:, 0] = 0\n", " p[:, ny-1] = 0\n", - " \n" + "\n" ] }, { @@ -237,11 +237,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Now with Devito we will turn `p` into `TimeFunction` object \n", + "# Now with Devito we will turn `p` into `TimeFunction` object\n", "# to make all the buffer switching implicit\n", "p = TimeFunction(name='p', grid=grid, space_order=2)\n", "p.data[:] = 0.\n", - " \n", + "\n", "\n", "# Initialise the source term `b`\n", "b = Function(name='b', grid=grid)\n", diff --git a/examples/cfd/07_cavity_flow.ipynb b/examples/cfd/07_cavity_flow.ipynb index 15bf225c88..de5b61acaa 100644 --- a/examples/cfd/07_cavity_flow.ipynb +++ b/examples/cfd/07_cavity_flow.ipynb @@ -130,7 +130,7 @@ "\n", "u = np.zeros((nx, ny))\n", "v = np.zeros((nx, ny))\n", - "p = np.zeros((nx, ny)) " + "p = np.zeros((nx, ny))" ] }, { @@ -147,9 +147,9 @@ "outputs": [], "source": [ "def build_up_b(b, rho, dt, u, v, dx, dy):\n", - " \n", - " b[1:-1, 1:-1] = (rho * (1 / dt * \n", - " ((u[2:, 1:-1] - u[0:-2, 1:-1]) / \n", + "\n", + " b[1:-1, 1:-1] = (rho * (1 / dt *\n", + " ((u[2:, 1:-1] - u[0:-2, 1:-1]) /\n", " (2 * dx) + (v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dy)) -\n", " ((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dx))**2 -\n", " 2 * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dy) *\n", @@ -175,21 +175,21 @@ "def pressure_poisson(p, dx, dy, b):\n", " pn = np.empty_like(p)\n", " pn = p.copy()\n", - " \n", + "\n", " for q in range(nit):\n", " pn = p.copy()\n", - " p[1:-1, 1:-1] = (((pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dy**2 + \n", + " p[1:-1, 1:-1] = (((pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dy**2 +\n", " (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dx**2) /\n", " (2 * (dx**2 + dy**2)) -\n", - " dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * \n", + " dx**2 * dy**2 / (2 * (dx**2 + dy**2)) *\n", " b[1:-1,1:-1])\n", - " \n", + "\n", " p[-1, :] = p[-2, :] # dp/dx = 0 at x = 2\n", " p[:, 0] = p[:, 1] # dp/dy = 0 at y = 0\n", " p[0, :] = p[1, :] # dp/dx = 0 at x = 0\n", " p[:, -1] = p[:, -2] # p = 0 at y = 2\n", - " p[0, 0] = 0 \n", - " \n", + " p[0, 0] = 0\n", + "\n", " return p, pn" ] }, @@ -210,15 +210,15 @@ " un = np.empty_like(u)\n", " vn = np.empty_like(v)\n", " b = np.zeros((nx, ny))\n", - " \n", + "\n", " for n in range(0,nt):\n", " un = u.copy()\n", " vn = v.copy()\n", - " \n", + "\n", " b = build_up_b(b, rho, dt, u, v, dx, dy)\n", " p = pressure_poisson(p, dx, dy, b)[0]\n", " pn = pressure_poisson(p, dx, dy, b)[1]\n", - " \n", + "\n", " u[1:-1, 1:-1] = (un[1:-1, 1:-1]-\n", " un[1:-1, 1:-1] * dt / dx *\n", " (un[1:-1, 1:-1] - un[0:-2, 1:-1]) -\n", @@ -245,12 +245,12 @@ " u[0, :] = 0\n", " u[-1, :] = 0\n", " u[:, -1] = 1 # Set velocity on cavity lid equal to 1\n", - " \n", + "\n", " v[:, 0] = 0\n", " v[:, -1] = 0\n", " v[0, :] = 0\n", " v[-1, :] = 0\n", - " \n", + "\n", " return u, v, p, pn" ] }, @@ -450,8 +450,7 @@ } ], "source": [ - "from devito import TimeFunction, Function, \\\n", - "Eq, solve, Operator, configuration\n", + "from devito import TimeFunction, Eq, solve, Operator, configuration\n", "\n", "# Build Required Functions and derivatives:\n", "# --------------------------------------\n", @@ -476,20 +475,20 @@ "stencil_u =solve(eq_u , u.forward)\n", "stencil_v =solve(eq_v , v.forward)\n", "stencil_p=solve(eq_p, p)\n", - " \n", + "\n", "update_u =Eq(u.forward, stencil_u)\n", "update_v =Eq(v.forward, stencil_v)\n", - "update_p =Eq(p.forward, stencil_p) \n", + "update_p =Eq(p.forward, stencil_p)\n", "\n", "# Boundary Conds. u=v=0 for all sides\n", - "bc_u = [Eq(u[t+1, 0, y], 0)] \n", - "bc_u += [Eq(u[t+1, nx-1, y], 0)] \n", - "bc_u += [Eq(u[t+1, x, 0], 0)] \n", + "bc_u = [Eq(u[t+1, 0, y], 0)]\n", + "bc_u += [Eq(u[t+1, nx-1, y], 0)]\n", + "bc_u += [Eq(u[t+1, x, 0], 0)]\n", "bc_u += [Eq(u[t+1, x, ny-1], 1)] # except u=1 for y=2\n", - "bc_v = [Eq(v[t+1, 0, y], 0)] \n", - "bc_v += [Eq(v[t+1, nx-1, y], 0)] \n", - "bc_v += [Eq(v[t+1, x, ny-1], 0)] \n", - "bc_v += [Eq(v[t+1, x, 0], 0)] \n", + "bc_v = [Eq(v[t+1, 0, y], 0)]\n", + "bc_v += [Eq(v[t+1, nx-1, y], 0)]\n", + "bc_v += [Eq(v[t+1, x, ny-1], 0)]\n", + "bc_v += [Eq(v[t+1, x, 0], 0)]\n", "\n", "bc_p = [Eq(p[t+1, 0, y],p[t+1, 1,y])] # dpn/dx = 0 for x=0.\n", "bc_p += [Eq(p[t+1,nx-1, y],p[t+1,nx-2, y])] # dpn/dx = 0 for x=2.\n", @@ -541,12 +540,12 @@ "#NBVAL_IGNORE_OUTPUT\n", "fig = pyplot.figure(figsize=(11,7), dpi=100)\n", "# Plotting the pressure field as a contour.\n", - "pyplot.contourf(X, Y, p.data[0], alpha=0.5, cmap=cm.viridis) \n", + "pyplot.contourf(X, Y, p.data[0], alpha=0.5, cmap=cm.viridis)\n", "pyplot.colorbar()\n", "# Plotting the pressure field outlines.\n", - "pyplot.contour(X, Y, p.data[0], cmap=cm.viridis) \n", + "pyplot.contour(X, Y, p.data[0], cmap=cm.viridis)\n", "# Plotting velocity field.\n", - "pyplot.quiver(X[::2,::2], Y[::2,::2], u.data[0,::2,::2], v.data[0,::2,::2]) \n", + "pyplot.quiver(X[::2,::2], Y[::2,::2], u.data[0,::2,::2], v.data[0,::2,::2])\n", "pyplot.xlabel('X')\n", "pyplot.ylabel('Y');\n" ] diff --git a/examples/cfd/08_shallow_water_equation.ipynb b/examples/cfd/08_shallow_water_equation.ipynb index 090d01d934..60610c2cd6 100644 --- a/examples/cfd/08_shallow_water_equation.ipynb +++ b/examples/cfd/08_shallow_water_equation.ipynb @@ -112,9 +112,9 @@ " and is responsible for saving the snapshots required for the following\n", " animations.\n", " \"\"\"\n", - " \n", + "\n", " eps = np.finfo(grid.dtype).eps\n", - " \n", + "\n", " # Friction term expresses the loss of amplitude from the friction with the seafloor\n", " frictionTerm = g * alpha**2 * sqrt(M**2 + N**2) / D**(7./3.)\n", "\n", @@ -152,7 +152,6 @@ "outputs": [], "source": [ "from IPython.display import HTML, display\n", - "import matplotlib.pyplot as plt\n", "import matplotlib.animation as animation\n", "\n", "\n", @@ -163,7 +162,7 @@ "\n", " plt.xlabel('x')\n", " plt.ylabel('z')\n", - " plt.title(title) \n", + " plt.title(title)\n", "\n", " def update(i):\n", " matrice.set_array(eta.data[i, :, :].T)\n", diff --git a/examples/cfd/09_Darcy_flow_equation.ipynb b/examples/cfd/09_Darcy_flow_equation.ipynb index 7a6189c6a1..a4efc75a1f 100644 --- a/examples/cfd/09_Darcy_flow_equation.ipynb +++ b/examples/cfd/09_Darcy_flow_equation.ipynb @@ -118,7 +118,7 @@ "\n", " coeff = np.random.randn(N, *self.size)\n", " coeff = self.sqrt_eig * coeff\n", - " \n", + "\n", "\n", " return fft.ifftn(coeff).real" ] @@ -143,7 +143,7 @@ "# Silence the runtime performance logging\n", "configuration['log-level'] = 'ERROR'\n", "\n", - "# Number of grid points on [0,1]^2 \n", + "# Number of grid points on [0,1]^2\n", "s = 256\n", "\n", "# Create s x s grid with spacing 1\n", @@ -258,7 +258,7 @@ }, "outputs": [], "source": [ - "# Forcing function, f(x) = 1 \n", + "# Forcing function, f(x) = 1\n", "f = np.ones((s, s))\n", "\n", "# Create function on grid\n", @@ -374,16 +374,16 @@ " The forcing function f(x) = 1\n", " '''\n", "def darcy_flow_2d(perm, f):\n", - " \n", + "\n", " # a(x) is the coefficients\n", " # f is the forcing function\n", " # initialize a, f with inputs permeability and forcing\n", " f1.data[:] = f[:]\n", " initialize_function(a, perm, 0)\n", - " \n", + "\n", " # call operator for the 15,000th pseudo-timestep\n", " op(time= 15000)\n", - " \n", + "\n", " return np.array(u.data[0])" ] }, @@ -458,7 +458,7 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "# plot to show the output: \n", + "# plot to show the output:\n", "ax1 = plt.subplot(221)\n", "ax2 = plt.subplot(222)\n", "ax3 = plt.subplot(212)\n", diff --git a/examples/cfd/example_diffusion.py b/examples/cfd/example_diffusion.py index cd9cffc6b2..4acdcdeeca 100644 --- a/examples/cfd/example_diffusion.py +++ b/examples/cfd/example_diffusion.py @@ -29,7 +29,7 @@ def ring_initial(spacing=0.01): np.linspace(0., 1., ny, dtype=np.float32)) ui = np.zeros((nx, ny), dtype=np.float32) r = (xx - .5)**2. + (yy - .5)**2. - ui[np.logical_and(.05 <= r, r <= .1)] = 1. + ui[np.logical_and(r >= .05, r <= .1)] = 1. return ui diff --git a/examples/compiler/02_indexification.ipynb b/examples/compiler/02_indexification.ipynb index b997d49c18..793d8b008f 100644 --- a/examples/compiler/02_indexification.ipynb +++ b/examples/compiler/02_indexification.ipynb @@ -272,7 +272,7 @@ } ], "source": [ - "a.function is b.function " + "a.function is b.function" ] }, { diff --git a/examples/compiler/04_iet-B.ipynb b/examples/compiler/04_iet-B.ipynb index e0da83cdff..5f49f72e78 100644 --- a/examples/compiler/04_iet-B.ipynb +++ b/examples/compiler/04_iet-B.ipynb @@ -70,11 +70,11 @@ "symbs = {'a': Scalar(name='a'),\n", " 'b': Constant(name='b'),\n", " 'c': Array(name='c', shape=(3,), dimensions=(dims['i'],)).indexify(),\n", - " 'd': Array(name='d', \n", - " shape=(3,3), \n", + " 'd': Array(name='d',\n", + " shape=(3,3),\n", " dimensions=(dims['j'],dims['k'])).indexify(),\n", - " 'e': Function(name='e', \n", - " shape=(3,3,3), \n", + " 'e': Function(name='e',\n", + " shape=(3,3,3),\n", " dimensions=(dims['t0'],dims['t1'],dims['i'])).indexify(),\n", " 'f': TimeFunction(name='f', grid=grid).indexify()}\n", "symbs" @@ -198,7 +198,7 @@ " # for k\n", " # expr0\n", " return iters[0](iters[1](iters[2](exprs[0])))\n", - " \n", + "\n", "def get_block2(exprs, iters):\n", " # Non-perfect simple loop nest:\n", " # for i\n", diff --git a/examples/finance/bs_ivbp.ipynb b/examples/finance/bs_ivbp.ipynb index e59d6ec2b0..3b1970ed92 100644 --- a/examples/finance/bs_ivbp.ipynb +++ b/examples/finance/bs_ivbp.ipynb @@ -36,14 +36,11 @@ } ], "source": [ - "from devito import (Eq, Grid, TimeFunction, Operator, solve, Constant, \n", - " SpaceDimension, configuration, SubDomain, centered)\n", + "from devito import (Eq, Grid, TimeFunction, Operator, solve, Constant,\n", + " SpaceDimension, configuration, centered)\n", "\n", - "from mpl_toolkits.mplot3d import Axes3D\n", - "from mpl_toolkits.mplot3d.axis3d import Axis\n", "import matplotlib.pyplot as plt\n", "import matplotlib as mpl\n", - "from matplotlib import cm\n", "\n", "from sympy.stats import Normal, cdf\n", "import numpy as np\n", @@ -94,7 +91,7 @@ "dt0 = 0.0005\n", "ds0 = 1.0\n", "nt = (int)(tmax / dt0) + 1\n", - "ns = int((smax - smin) / ds0) + 1 \n", + "ns = int((smax - smin) / ds0) + 1\n", "\n", "shape = (ns, )\n", "origin =(smin, )\n", @@ -263,7 +260,7 @@ "\n", "# Run our operators\n", "startDevito = timer.time()\n", - " \n", + "\n", "# Apply operator\n", "op.apply(dt=dt0)\n", "\n", @@ -408,7 +405,6 @@ "source": [ "#NBVAL_IGNORE_OUTPUT\n", "\n", - "from mpl_toolkits.mplot3d import Axes3D\n", "\n", "# Trim the padding off smin and smax\n", "trim_data = v.data[:, padding:-padding]\n", @@ -497,14 +493,14 @@ "source": [ "#NBVAL_IGNORE_OUTPUT\n", "\n", - "# Derived formula for Black Scholes call from \n", + "# Derived formula for Black Scholes call from\n", "# https://aaronschlegel.me/black-scholes-formula-python.html\n", "def call_value_bs(S, K, T, r, sigma):\n", " N = Normal('x', 0.0, 1.0)\n", - " \n", + "\n", " d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n", " d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n", - " \n", + "\n", " call = (S * cdf(N)(d1) - K * np.exp(-r * T) * cdf(N)(d2))\n", " return call\n", "\n", @@ -524,7 +520,7 @@ "\n", "print(\"devito pde timesteps: %12.6s, %12.6fs runtime\" % (nt-1, endDevito - startDevito))\n", "print(\"call_value_bs timesteps: %12.6s, %12.6fs runtime\" % (len(time), endBF - startBF))\n", - " \n", + "\n", "s2 = np.linspace(smin, smax, shape[0])\n", "plt.figure(figsize=(12,10))\n", "\n", @@ -600,7 +596,7 @@ "\n", " rms = np.sqrt(np.float64(l2 / len(x_range)))\n", " vals.append(rms)\n", - " \n", + "\n", "plt.figure(figsize=(12,10))\n", "plt.plot(t_range, np.array(vals))" ] diff --git a/examples/misc/linalg.py b/examples/misc/linalg.py index fbe226940f..45aadfb41e 100644 --- a/examples/misc/linalg.py +++ b/examples/misc/linalg.py @@ -3,8 +3,13 @@ from devito import Inc, Operator, Function, dimensions, info from devito.tools import as_tuple -__all__ = ['mat_vec', 'transpose_mat_vec', 'mat_mat', 'mat_mat_sum', - 'chain_contractions'] +__all__ = [ + 'chain_contractions', + 'mat_mat', + 'mat_mat_sum', + 'mat_vec', + 'transpose_mat_vec', +] @click.group(chain=True) diff --git a/examples/performance/01_gpu.ipynb b/examples/performance/01_gpu.ipynb index 2bf4a3eeaf..54eeb2e9fc 100644 --- a/examples/performance/01_gpu.ipynb +++ b/examples/performance/01_gpu.ipynb @@ -18,7 +18,7 @@ "# Some imports we will need below\n", "import numpy as np\n", "from devito import *\n", - "import matplotlib.pyplot as plt \n", + "import matplotlib.pyplot as plt\n", "%matplotlib inline" ] }, @@ -123,7 +123,7 @@ " np.linspace(0., 1., ny, dtype=np.float32))\n", "r = (xx - .5)**2. + (yy - .5)**2.\n", "# Inserting the ring\n", - "u.data[0, np.logical_and(.05 <= r, r <= .1)] = 1." + "u.data[0, np.logical_and(r >= .05, r <= .1)] = 1." ] }, { @@ -204,7 +204,7 @@ "# We also need the `gpu-fit` option to tell Devito that `u` will definitely\n", "# fit in the GPU memory. This is necessary every time a TimeFunction with\n", "# `save != None` is used. Otherwise, Devito could generate code such that\n", - "# `u` gets streamed between the CPU and the GPU, but for this advanced \n", + "# `u` gets streamed between the CPU and the GPU, but for this advanced\n", "# feature you will need `devitopro`.\n", "op = Operator([step], platform='nvidiaX', opt=('advanced', {'gpu-fit': u}))" ] diff --git a/examples/performance/utils.py b/examples/performance/utils.py index 82e3294d46..c941f889fb 100644 --- a/examples/performance/utils.py +++ b/examples/performance/utils.py @@ -1,6 +1,6 @@ import difflib -__all__ = ['unidiff_output', 'print_kernel'] +__all__ = ['print_kernel', 'unidiff_output'] def unidiff_output(expected, actual): diff --git a/examples/seismic/abc_methods/01_introduction.ipynb b/examples/seismic/abc_methods/01_introduction.ipynb index 526c485eba..1d3b94f8e4 100644 --- a/examples/seismic/abc_methods/01_introduction.ipynb +++ b/examples/seismic/abc_methods/01_introduction.ipynb @@ -190,8 +190,7 @@ "\n", "import numpy as np\n", "import matplotlib.pyplot as plot\n", - "import math as mt\n", - "import matplotlib.ticker as mticker \n", + "import matplotlib.ticker as mticker\n", "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", "from matplotlib import cm" ] @@ -248,11 +247,11 @@ "nptx = 101\n", "nptz = 101\n", "x0 = 0.\n", - "x1 = 1000. \n", + "x1 = 1000.\n", "compx = x1-x0\n", "z0 = 0.\n", "z1 = 1000.\n", - "compz = z1-z0;\n", + "compz = z1-z0\n", "hx = (x1-x0)/(nptx-1)\n", "hz = (z1-z0)/(nptz-1)" ] @@ -333,10 +332,10 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz)) \n", - "p0 = 0 \n", + "v0 = np.zeros((nptx,nptz))\n", + "p0 = 0\n", "p1 = int((1/2)*nptz)\n", - "p2 = nptz \n", + "p2 = nptz\n", "v0[0:nptx,p0:p1] = 1.5\n", "v0[0:nptx,p1:p2] = 2.5" ] @@ -435,9 +434,9 @@ "outputs": [], "source": [ "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "CFL = 0.4\n", - "vmax = np.amax(v0) \n", + "vmax = np.amax(v0)\n", "dtmax = np.float64((min(hx,hz)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", "dt0 = np.float64((tn-t0)/ntmax)" @@ -488,8 +487,8 @@ "metadata": {}, "outputs": [], "source": [ - "(hxs,hzs) = grid.spacing_map \n", - "(x, z) = grid.dimensions \n", + "(hxs,hzs) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", "t = grid.stepping_dim\n", "dt = grid.stepping_dim.spacing" ] @@ -945,7 +944,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U): \n", + "def graph2d(U):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(3)\n", @@ -1023,7 +1022,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec): \n", + "def graph2drec(rec):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscaled = 1/10**(3)\n", diff --git a/examples/seismic/abc_methods/02_damping.ipynb b/examples/seismic/abc_methods/02_damping.ipynb index bff4447bc5..60114f8991 100644 --- a/examples/seismic/abc_methods/02_damping.ipynb +++ b/examples/seismic/abc_methods/02_damping.ipynb @@ -150,8 +150,7 @@ "\n", "import numpy as np\n", "import matplotlib.pyplot as plot\n", - "import math as mt\n", - "import matplotlib.ticker as mticker \n", + "import matplotlib.ticker as mticker\n", "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", "from matplotlib import cm" ] @@ -194,11 +193,11 @@ "nptx = 101\n", "nptz = 101\n", "x0 = 0.\n", - "x1 = 1000. \n", + "x1 = 1000.\n", "compx = x1-x0\n", "z0 = 0.\n", "z1 = 1000.\n", - "compz = z1-z0;\n", + "compz = z1-z0\n", "hxv = (x1-x0)/(nptx-1)\n", "hzv = (z1-z0)/(nptz-1)" ] @@ -393,32 +392,32 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz)) \n", + "v0 = np.zeros((nptx,nptz))\n", "X0 = np.linspace(x0,x1,nptx)\n", "Z0 = np.linspace(z0,z1,nptz)\n", - " \n", + "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", - " \n", + "\n", "z10 = z0\n", "z11 = z1 - lz\n", "\n", "xm = 0.5*(x10+x11)\n", "zm = 0.5*(z10+z11)\n", - " \n", + "\n", "pxm = 0\n", "pzm = 0\n", - " \n", + "\n", "for i in range(0,nptx):\n", " if(X0[i]==xm): pxm = i\n", - " \n", + "\n", "for j in range(0,nptz):\n", " if(Z0[j]==zm): pzm = j\n", - " \n", - "p0 = 0 \n", + "\n", + "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", - " \n", + "\n", "v0[0:nptx,p0:p1] = 1.5\n", "v0[0:nptx,p1:p2] = 2.5" ] @@ -507,9 +506,9 @@ "outputs": [], "source": [ "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "CFL = 0.4\n", - "vmax = np.amax(v0) \n", + "vmax = np.amax(v0)\n", "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", "dt0 = np.float64((tn-t0)/ntmax)" @@ -545,8 +544,8 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map \n", - "(x, z) = grid.dimensions \n", + "(hx,hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", "t = grid.stepping_dim\n", "dt = grid.stepping_dim.spacing" ] @@ -724,10 +723,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv \n", - "x1pml = x1 - npmlx*hxv \n", - "z0pml = z0 \n", - "z1pml = z1 - npmlz*hzv " + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -754,7 +753,7 @@ "\n", " quibar = 1.5*np.log(1.0/0.001)/(40)\n", " cte = 1./vmax\n", - " \n", + "\n", " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", " b = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", " adamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))/hxv\n", @@ -778,13 +777,13 @@ "outputs": [], "source": [ "def generatemdamp():\n", - " \n", - " X0 = np.linspace(x0,x1,nptx) \n", - " Z0 = np.linspace(z0,z1,nptz) \n", - " X0grid,Z0grid = np.meshgrid(X0,Z0) \n", - " D0 = np.zeros((nptx,nptz)) \n", + "\n", + " X0 = np.linspace(x0,x1,nptx)\n", + " Z0 = np.linspace(z0,z1,nptz)\n", + " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", + " D0 = np.zeros((nptx,nptz))\n", " D0 = np.transpose(fdamp(X0grid,Z0grid))\n", - " \n", + "\n", " return D0" ] }, @@ -817,7 +816,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2damp(D): \n", + "def graph2damp(D):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(-3)\n", @@ -1076,7 +1075,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U): \n", + "def graph2d(U):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(3)\n", @@ -1141,7 +1140,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec): \n", + "def graph2drec(rec):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscaled = 1/10**(3)\n", diff --git a/examples/seismic/abc_methods/03_pml.ipynb b/examples/seismic/abc_methods/03_pml.ipynb index 6fff176e8d..3be3fc9deb 100644 --- a/examples/seismic/abc_methods/03_pml.ipynb +++ b/examples/seismic/abc_methods/03_pml.ipynb @@ -155,8 +155,7 @@ "\n", "import numpy as np\n", "import matplotlib.pyplot as plot\n", - "import math as mt\n", - "import matplotlib.ticker as mticker \n", + "import matplotlib.ticker as mticker\n", "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", "from matplotlib import cm" ] @@ -197,7 +196,7 @@ "nptx = 101\n", "nptz = 101\n", "x0 = 0.\n", - "x1 = 1000. \n", + "x1 = 1000.\n", "compx = x1-x0\n", "z0 = 0.\n", "z1 = 1000.\n", @@ -393,32 +392,32 @@ "v1 = np.zeros((nptx-1,nptz-1))\n", "X0 = np.linspace(x0,x1,nptx)\n", "Z0 = np.linspace(z0,z1,nptz)\n", - " \n", + "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", - " \n", + "\n", "z10 = z0\n", "z11 = z1 - lz\n", "\n", "xm = 0.5*(x10+x11)\n", "zm = 0.5*(z10+z11)\n", - " \n", + "\n", "pxm = 0\n", "pzm = 0\n", - " \n", + "\n", "for i in range(0,nptx):\n", " if(X0[i]==xm): pxm = i\n", - " \n", + "\n", "for j in range(0,nptz):\n", " if(Z0[j]==zm): pzm = j\n", - " \n", - "p0 = 0 \n", + "\n", + "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", "v0[0:nptx,p0:p1] = 1.5\n", "v0[0:nptx,p1:p2] = 2.5\n", "\n", - "p0 = 0 \n", + "p0 = 0\n", "p1 = pzm\n", "p2 = nptz-1\n", "v1[0:nptx-1,p0:p1] = 1.5\n", @@ -509,9 +508,9 @@ "outputs": [], "source": [ "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "CFL = 0.4\n", - "vmax = np.amax(v0) \n", + "vmax = np.amax(v0)\n", "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", "dt0 = np.float64((tn-t0)/ntmax)" @@ -540,8 +539,8 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map \n", - "(x, z) = grid.dimensions \n", + "(hx,hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", "t = grid.stepping_dim\n", "dt = grid.stepping_dim.spacing" ] @@ -765,10 +764,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv \n", - "x1pml = x1 - npmlx*hxv \n", - "z0pml = z0 \n", - "z1pml = z1 - npmlz*hzv " + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -785,16 +784,16 @@ "outputs": [], "source": [ "def fdamp(x,z,i):\n", - " \n", + "\n", " quibar = 0.05\n", - " \n", + "\n", " if(i==1):\n", " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", " if(i==2):\n", " a = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", - " \n", + "\n", " return fdamp" ] }, @@ -816,25 +815,25 @@ "outputs": [], "source": [ "def generatemdamp():\n", - " \n", - " X0 = np.linspace(x0,x1,nptx) \n", + "\n", + " X0 = np.linspace(x0,x1,nptx)\n", " Z0 = np.linspace(z0,z1,nptz)\n", " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", " X1 = np.linspace((x0+0.5*hxv),(x1-0.5*hxv),nptx-1)\n", " Z1 = np.linspace((z0+0.5*hzv),(z1-0.5*hzv),nptz-1)\n", " X1grid,Z1grid = np.meshgrid(X1,Z1)\n", - " \n", + "\n", " D01 = np.zeros((nptx,nptz))\n", " D02 = np.zeros((nptx,nptz))\n", " D11 = np.zeros((nptx,nptz))\n", " D12 = np.zeros((nptx,nptz))\n", - " \n", + "\n", " D01 = np.transpose(fdamp(X0grid,Z0grid,1))\n", " D02 = np.transpose(fdamp(X0grid,Z0grid,2))\n", - " \n", + "\n", " D11 = np.transpose(fdamp(X1grid,Z1grid,1))\n", " D12 = np.transpose(fdamp(X1grid,Z1grid,2))\n", - " \n", + "\n", " return D01, D02, D11, D12" ] }, @@ -860,7 +859,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2damp(D): \n", + "def graph2damp(D):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(-3)\n", @@ -1057,23 +1056,23 @@ "outputs": [], "source": [ "# White Region\n", - "pde01 = Eq(u.dt2-u.laplace*vel0**2) \n", + "pde01 = Eq(u.dt2-u.laplace*vel0**2)\n", "\n", "# Blue Region\n", - "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0 \n", + "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0\n", "pde02b = - (0.5/hx)*(phi1[t,x,z-1]+phi1[t,x,z]-phi1[t,x-1,z-1]-phi1[t,x-1,z])\n", "pde02c = - (0.5/hz)*(phi2[t,x-1,z]+phi2[t,x,z]-phi2[t,x-1,z-1]-phi2[t,x,z-1])\n", "pde02 = Eq(pde02a + pde02b + pde02c)\n", "\n", "pde10 = phi1.dt + dampx1*0.5*(phi1.forward+phi1)\n", - "a1 = u[t+1,x+1,z] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x,z+1] \n", - "a2 = u[t,x+1,z] + u[t,x+1,z+1] - u[t,x,z] - u[t,x,z+1] \n", + "a1 = u[t+1,x+1,z] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x,z+1]\n", + "a2 = u[t,x+1,z] + u[t,x+1,z+1] - u[t,x,z] - u[t,x,z+1]\n", "pde11 = -(dampz1-dampx1)*0.5*(0.5/hx)*(a1+a2)*vel1**2\n", "pde1 = Eq(pde10+pde11)\n", - " \n", - "pde20 = phi2.dt + dampz1*0.5*(phi2.forward+phi2) \n", - "b1 = u[t+1,x,z+1] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x+1,z] \n", - "b2 = u[t,x,z+1] + u[t,x+1,z+1] - u[t,x,z] - u[t,x+1,z] \n", + "\n", + "pde20 = phi2.dt + dampz1*0.5*(phi2.forward+phi2)\n", + "b1 = u[t+1,x,z+1] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x+1,z]\n", + "b2 = u[t,x,z+1] + u[t,x+1,z+1] - u[t,x,z] - u[t,x+1,z]\n", "pde21 = -(dampx1-dampz1)*0.5*(0.5/hz)*(b1+b2)*vel1**2\n", "pde2 = Eq(pde20+pde21)" ] @@ -1245,7 +1244,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U): \n", + "def graph2d(U):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(3)\n", @@ -1310,7 +1309,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec): \n", + "def graph2drec(rec):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscaled = 1/10**(3)\n", diff --git a/examples/seismic/abc_methods/04_habc.ipynb b/examples/seismic/abc_methods/04_habc.ipynb index 607c0dbe3f..cf338f8fba 100644 --- a/examples/seismic/abc_methods/04_habc.ipynb +++ b/examples/seismic/abc_methods/04_habc.ipynb @@ -204,8 +204,7 @@ "\n", "import numpy as np\n", "import matplotlib.pyplot as plot\n", - "import math as mt\n", - "import matplotlib.ticker as mticker \n", + "import matplotlib.ticker as mticker\n", "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", "from matplotlib import cm" ] @@ -229,7 +228,6 @@ "from examples.seismic import TimeAxis\n", "from examples.seismic import RickerSource\n", "from examples.seismic import Receiver\n", - "from examples.seismic import plot_velocity\n", "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, @@ -249,11 +247,11 @@ "nptx = 101\n", "nptz = 101\n", "x0 = 0.\n", - "x1 = 1000. \n", + "x1 = 1000.\n", "compx = x1-x0\n", "z0 = 0.\n", "z1 = 1000.\n", - "compz = z1-z0;\n", + "compz = z1-z0\n", "hxv = (x1-x0)/(nptx-1)\n", "hzv = (z1-z0)/(nptz-1)" ] @@ -440,32 +438,32 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz)) \n", + "v0 = np.zeros((nptx,nptz))\n", "X0 = np.linspace(x0,x1,nptx)\n", "Z0 = np.linspace(z0,z1,nptz)\n", - " \n", + "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", - " \n", + "\n", "z10 = z0\n", "z11 = z1 - lz\n", "\n", "xm = 0.5*(x10+x11)\n", "zm = 0.5*(z10+z11)\n", - " \n", + "\n", "pxm = 0\n", "pzm = 0\n", - " \n", + "\n", "for i in range(0,nptx):\n", " if(X0[i]==xm): pxm = i\n", - " \n", + "\n", "for j in range(0,nptz):\n", " if(Z0[j]==zm): pzm = j\n", - " \n", - "p0 = 0 \n", + "\n", + "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", - " \n", + "\n", "v0[0:nptx,p0:p1] = 1.5\n", "v0[0:nptx,p1:p2] = 2.5" ] @@ -554,9 +552,9 @@ "outputs": [], "source": [ "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "CFL = 0.4\n", - "vmax = np.amax(v0) \n", + "vmax = np.amax(v0)\n", "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", "dt0 = np.float64((tn-t0)/ntmax)" @@ -592,8 +590,8 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map \n", - "(x, z) = grid.dimensions \n", + "(hx,hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", "t = grid.stepping_dim\n", "dt = grid.stepping_dim.spacing" ] @@ -758,69 +756,69 @@ "outputs": [], "source": [ "def generateweights():\n", - " \n", + "\n", " weightsx = np.zeros(npmlx)\n", " weightsz = np.zeros(npmlz)\n", " Mweightsx = np.zeros((nptx,nptz))\n", " Mweightsz = np.zeros((nptx,nptz))\n", - " \n", + "\n", " if(habcw==1):\n", - " \n", + "\n", " for i in range(0,npmlx):\n", " weightsx[i] = (npmlx-i)/(npmlx)\n", - " \n", + "\n", " for i in range(0,npmlz):\n", " weightsz[i] = (npmlz-i)/(npmlz)\n", - " \n", + "\n", " if(habcw==2):\n", - " \n", + "\n", " mx = 2\n", " mz = 2\n", - " \n", + "\n", " if(habctype==3):\n", - " \n", - " alphax = 1.0 + 0.15*(npmlx-mx) \n", + "\n", + " alphax = 1.0 + 0.15*(npmlx-mx)\n", " alphaz = 1.0 + 0.15*(npmlz-mz)\n", - " \n", + "\n", " else:\n", - " \n", - " alphax = 1.5 + 0.07*(npmlx-mx) \n", + "\n", + " alphax = 1.5 + 0.07*(npmlx-mx)\n", " alphaz = 1.5 + 0.07*(npmlz-mz)\n", - " \n", + "\n", " for i in range(0,npmlx):\n", - " \n", + "\n", " if(0<=i<=(mx)):\n", " weightsx[i] = 1\n", " elif((mx+1)<=i<=npmlx-1):\n", " weightsx[i] = ((npmlx-i)/(npmlx-mx))**(alphax)\n", " else:\n", " weightsx[i] = 0\n", - " \n", + "\n", " for i in range(0,npmlz):\n", - " \n", + "\n", " if(0<=i<=(mz)):\n", " weightsz[i] = 1\n", " elif((mz+1)<=i<=npmlz-1):\n", " weightsz[i] = ((npmlz-i)/(npmlz-mz))**(alphaz)\n", " else:\n", " weightsz[i] = 0\n", - " \n", + "\n", " for k in range(0,npmlx):\n", - " \n", + "\n", " ai = k\n", - " af = nptx - k - 1 \n", + " af = nptx - k - 1\n", " bi = 0\n", " bf = nptz - k\n", " Mweightsx[ai,bi:bf] = weightsx[k]\n", " Mweightsx[af,bi:bf] = weightsx[k]\n", - " \n", + "\n", " for k in range(0,npmlz):\n", - " \n", + "\n", " ai = k\n", - " af = nptx - k \n", - " bf = nptz - k - 1 \n", + " af = nptx - k\n", + " bf = nptz - k - 1\n", " Mweightsz[ai:af,bf] = weightsz[k]\n", - " \n", + "\n", " return Mweightsx,Mweightsz" ] }, @@ -853,7 +851,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2dweight(D): \n", + "def graph2dweight(D):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(-3)\n", @@ -1116,10 +1114,10 @@ "outputs": [], "source": [ "if(habctype==2):\n", - " \n", + "\n", " # Region B_{1}\n", " cte11 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", - " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]*vel[x,z] \n", + " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]*vel[x,z]\n", " cte31 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", " cte41 = (1/(dt**2))\n", " cte51 = (1/(4*hz**2))*vel[x,z]**2\n", @@ -1127,14 +1125,14 @@ " aux1 = (cte21*(u3[x+1,z] + u1[x,z]) + cte31*u1[x+1,z] + cte41*(u2[x,z]+u2[x+1,z]) + cte51*(u3[x+1,z+1] + u3[x+1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte11\n", " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", - " \n", + "\n", " # Region B_{3}\n", " cte12 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", " cte22 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]**2\n", " cte32 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", " cte42 = (1/(dt**2))\n", " cte52 = (1/(4*hz**2))*vel[x,z]*vel[x,z]\n", - " \n", + "\n", " aux2 = (cte22*(u3[x-1,z] + u1[x,z]) + cte32*u1[x-1,z] + cte42*(u2[x,z]+u2[x-1,z]) + cte52*(u3[x-1,z+1] + u3[x-1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte12\n", " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", @@ -1151,26 +1149,26 @@ " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])\n", "\n", " # Red point rigth side\n", - " stencil4 = [Eq(u[t+1,nptx-1-k,nptz-1-k],(1-weightsz[nptx-1-k,nptz-1-k])*u3[nptx-1-k,nptz-1-k] + \n", - " weightsz[nptx-1-k,nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-1-k,nptz-2-k] \n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-1-k] \n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-2-k] \n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-1-k] \n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-2-k] \n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-1-k] \n", + " stencil4 = [Eq(u[t+1,nptx-1-k,nptz-1-k],(1-weightsz[nptx-1-k,nptz-1-k])*u3[nptx-1-k,nptz-1-k] +\n", + " weightsz[nptx-1-k,nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-1-k,nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-1-k]\n", " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-2-k])\n", - " / (((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))))) for k in range(0,npmlz)] \n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt)))) for k in range(0,npmlz)]\n", "\n", " # Red point left side\n", - " stencil5 = [Eq(u[t+1,k,nptz-1-k],(1-weightsx[k,nptz-1-k] )*u3[k,nptz-1-k] \n", - " + weightsx[k,nptz-1-k]*(( (-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k,nptz-2-k] \n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-1-k] \n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-2-k] \n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-1-k] \n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-2-k] \n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-1-k] \n", + " stencil5 = [Eq(u[t+1,k,nptz-1-k],(1-weightsx[k,nptz-1-k] )*u3[k,nptz-1-k]\n", + " + weightsx[k,nptz-1-k]*(( (-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k,nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-1-k]\n", " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-2-k])\n", - " / (((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))))) for k in range(0,npmlx)]" + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt)))) for k in range(0,npmlx)]" ] }, { @@ -1200,23 +1198,23 @@ " gama121 = np.cos(alpha1)*(a1)*(1/dt)\n", " gama131 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", " gama141 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", - " \n", + "\n", " gama211 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama221 = np.cos(alpha2)*(a2)*(1/dt)\n", " gama231 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", " gama241 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", - " \n", + "\n", " c111 = gama111 + gama131\n", " c121 = -gama111 + gama141\n", " c131 = gama121 - gama131\n", " c141 = -gama121 - gama141\n", - " \n", + "\n", " c211 = gama211 + gama231\n", " c221 = -gama211 + gama241\n", " c231 = gama221 - gama231\n", " c241 = -gama221 - gama241\n", "\n", - " aux1 = ( u2[x,z]*(-c111*c221-c121*c211) + u3[x+1,z]*(-c111*c231-c131*c211) + u2[x+1,z]*(-c111*c241-c121*c231-c141*c211-c131*c221) \n", + " aux1 = ( u2[x,z]*(-c111*c221-c121*c211) + u3[x+1,z]*(-c111*c231-c131*c211) + u2[x+1,z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", " + u1[x,z]*(-c121*c221) + u1[x+1,z]*(-c121*c241-c141*c221) + u3[x+2,z]*(-c131*c231) +u2[x+2,z]*(-c131*c241-c141*c231)\n", " + u1[x+2,z]*(-c141*c241))/(c111*c211)\n", " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", @@ -1227,23 +1225,23 @@ " gama122 = np.cos(alpha1)*(a1)*(1/dt)\n", " gama132 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", " gama142 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", - " \n", + "\n", " gama212 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama222 = np.cos(alpha2)*(a2)*(1/dt)\n", " gama232 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", " gama242 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", - " \n", + "\n", " c112 = gama112 + gama132\n", " c122 = -gama112 + gama142\n", " c132 = gama122 - gama132\n", " c142 = -gama122 - gama142\n", - " \n", + "\n", " c212 = gama212 + gama232\n", " c222 = -gama212 + gama242\n", " c232 = gama222 - gama232\n", " c242 = -gama222 - gama242\n", "\n", - " aux2 = ( u2[x,z]*(-c112*c222-c122*c212) + u3[x-1,z]*(-c112*c232-c132*c212) + u2[x-1,z]*(-c112*c242-c122*c232-c142*c212-c132*c222) \n", + " aux2 = ( u2[x,z]*(-c112*c222-c122*c212) + u3[x-1,z]*(-c112*c232-c132*c212) + u2[x-1,z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", " + u1[x,z]*(-c122*c222) + u1[x-1,z]*(-c122*c242-c142*c222) + u3[x-2,z]*(-c132*c232) +u2[x-2,z]*(-c132*c242-c142*c232)\n", " + u1[x-2,z]*(-c142*c242))/(c112*c212)\n", " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", @@ -1254,23 +1252,23 @@ " gama123 = np.cos(alpha1)*(a1)*(1/dt)\n", " gama133 = np.cos(alpha1)*(1-b1)*(1/hz)*vel[x,z]\n", " gama143 = np.cos(alpha1)*(b1)*(1/hz)*vel[x,z]\n", - " \n", + "\n", " gama213 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama223 = np.cos(alpha2)*(a2)*(1/dt)\n", " gama233 = np.cos(alpha2)*(1-b2)*(1/hz)*vel[x,z]\n", " gama243 = np.cos(alpha2)*(b2)*(1/hz)*vel[x,z]\n", - " \n", + "\n", " c113 = gama113 + gama133\n", " c123 = -gama113 + gama143\n", " c133 = gama123 - gama133\n", " c143 = -gama123 - gama143\n", - " \n", + "\n", " c213 = gama213 + gama233\n", " c223 = -gama213 + gama243\n", " c233 = gama223 - gama233\n", " c243 = -gama223 - gama243\n", "\n", - " aux3 = ( u2[x,z]*(-c113*c223-c123*c213) + u3[x,z-1]*(-c113*c233-c133*c213) + u2[x,z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223) \n", + " aux3 = ( u2[x,z]*(-c113*c223-c123*c213) + u3[x,z-1]*(-c113*c233-c133*c213) + u2[x,z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", " + u1[x,z]*(-c123*c223) + u1[x,z-1]*(-c123*c243-c143*c223) + u3[x,z-2]*(-c133*c233) +u2[x,z-2]*(-c133*c243-c143*c233)\n", " + u1[x,z-2]*(-c143*c243))/(c113*c213)\n", " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", @@ -1412,13 +1410,13 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U,i): \n", + "def graph2d(U,i):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscale = 1/10**(3)\n", " x0pml = x0 + npmlx*hxv\n", " x1pml = x1 - npmlx*hxv\n", - " z0pml = z0 \n", + " z0pml = z0\n", " z1pml = z1 - npmlz*hzv\n", " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", @@ -1483,7 +1481,7 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec,i): \n", + "def graph2drec(rec,i):\n", " plot.figure()\n", " plot.figure(figsize=(16,8))\n", " fscaled = 1/10**(3)\n", diff --git a/examples/seismic/acoustic/accuracy.ipynb b/examples/seismic/acoustic/accuracy.ipynb index aef7fd876b..b9e4aa6533 100644 --- a/examples/seismic/acoustic/accuracy.ipynb +++ b/examples/seismic/acoustic/accuracy.ipynb @@ -9,11 +9,10 @@ "import numpy as np\n", "from scipy.special import hankel2\n", "from examples.seismic.acoustic import AcousticWaveSolver\n", - "from examples.seismic import Model, RickerSource, Receiver, TimeAxis, AcquisitionGeometry\n", + "from examples.seismic import Model, AcquisitionGeometry\n", "from devito import set_log_level\n", "\n", "import matplotlib.pyplot as plt\n", - "from matplotlib import cm\n", "%matplotlib inline" ] }, @@ -146,9 +145,9 @@ " \"and a physical extent of (%sm, %sm)\" % (*model.grid.shape, *model.grid.extent))\n", "print(\"Source is at the center with coordinates (%sm, %sm)\" % tuple(src_coordinates[0]))\n", "print(\"Receiver (single receiver) is located at (%sm, %sm) \" % tuple(rec_coordinates[0]))\n", - " \n", + "\n", "# Note: gets time sampling from model.critical_dt\n", - "geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates, \n", + "geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", " t0=t0, tn=tn, src_type='Ricker', f0=f0, t0w=1.5/f0)" ] }, @@ -225,12 +224,12 @@ " U_a = np.zeros((nf), dtype=complex)\n", " for a in range(1, nf-1):\n", " k = 2 * np.pi * faxis[a] / c0\n", - " tmp = k * np.sqrt(((rx - sx))**2 + ((rz - sz))**2)\n", + " tmp = k * np.sqrt(((rx - sx))**2 + (rz - sz)**2)\n", " U_a[a] = -1j * np.pi * hankel2(0.0, tmp) * R[a]\n", "\n", " # Do inverse fft on 0:dt:T and you have analytical solution\n", " U_t = 1.0/(2.0 * np.pi) * np.real(np.fft.ifft(U_a[:], nt))\n", - " \n", + "\n", " # The analytic solution needs be scaled by dx^2 to convert to pressure\n", " return np.real(U_t) * (model.spacing[0]**2)" ] @@ -262,9 +261,9 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "print(\"Numerical data min,max,abs; %+.6e %+.6e %+.6e\" % \n", + "print(\"Numerical data min,max,abs; %+.6e %+.6e %+.6e\" %\n", " (np.min(ref_rec.data), np.max(ref_rec.data), np.max(np.abs(ref_rec.data)) ))\n", - "print(\"Analytic data min,max,abs; %+.6e %+.6e %+.6e\" % \n", + "print(\"Analytic data min,max,abs; %+.6e %+.6e %+.6e\" %\n", " (np.min(U_t), np.max(U_t), (np.max(np.abs(U_t)))))" ] }, @@ -409,14 +408,14 @@ " rec_coordinates = np.empty((1, 2))\n", " rec_coordinates[:, :] = 260.\n", "\n", - " geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates, \n", + " geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", " t0=t0, tn=tn, src_type='Ricker', f0=f0, t0w=1.5/f0)\n", "\n", - " # Note: incorrect data size will be generated here due to AcquisitionGeometry bug ... \n", + " # Note: incorrect data size will be generated here due to AcquisitionGeometry bug ...\n", " # temporarily fixed below by resizing the output from the solver\n", " geometry.resample(dt[i])\n", " print(\"geometry.time_axes; \", geometry.time_axis)\n", - " \n", + "\n", " solver = AcousticWaveSolver(model, geometry, time_order=2, space_order=8)\n", " ref_rec1, ref_u1, _ = solver.forward(dt=dt[i])\n", " ref_rec1_data = ref_rec1.data[0:nnt[i],:]\n", @@ -429,7 +428,7 @@ "\n", " ratio_d = dt[i-1]/dt[i] if i > 0 else 1.0\n", " ratio_e = error_time[i-1]/error_time[i] if i > 0 else 1.0\n", - " print(\"error for dt=%.4f is %12.6e -- ratio dt^2,ratio err; %12.6f %12.6f \\n\" % \n", + " print(\"error for dt=%.4f is %12.6e -- ratio dt^2,ratio err; %12.6f %12.6f \\n\" %\n", " (dt[i], error_time[i], ratio_d**2, ratio_e))\n", " errors_plot.append((geometry.time_axis.time_values, U_t1[:-1] - ref_rec1_data[:-1, 0]))" ] @@ -594,19 +593,19 @@ " rec_coordinates = np.empty((1, 2))\n", " rec_coordinates[:, :] = 260.\n", "\n", - " geometry = AcquisitionGeometry(model_space, rec_coordinates, src_coordinates, \n", + " geometry = AcquisitionGeometry(model_space, rec_coordinates, src_coordinates,\n", " t0=t0, tn=tn, src_type='Ricker', f0=f0, t0w=1.5/f0)\n", "\n", " solver = AcousticWaveSolver(model_space, geometry, time_order=2, space_order=spc)\n", " loc_rec, loc_u, summary = solver.forward()\n", "\n", - " # Note: we need to correct for fixed spacing pressure corrections in both analytic \n", + " # Note: we need to correct for fixed spacing pressure corrections in both analytic\n", " # (run at the old model spacing) and numerical (run at the new model spacing) solutions\n", " c_ana = 1 / model.spacing[0]**2\n", " c_num = 1 / model_space.spacing[0]**2\n", "\n", " # Compare to reference solution\n", - " # Note: we need to normalize by the factor of grid spacing squared \n", + " # Note: we need to normalize by the factor of grid spacing squared\n", " errorl2[ind_o, ind_spc] = np.linalg.norm(loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2) / np.sqrt(U_t.shape[0] - 1)\n", " timing[ind_o, ind_spc] = np.max([v for _, v in summary.timings.items()])\n", " print(\"starting space order %s with (%s, %s) grid points the error is %s for %s seconds runtime\" %\n", diff --git a/examples/seismic/model.py b/examples/seismic/model.py index c40ee232fe..16d8f7a3a6 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -10,8 +10,13 @@ from devito.builtins import initialize_function, gaussian_smooth, mmax, mmin from devito.tools import as_tuple -__all__ = ['SeismicModel', 'Model', 'ModelElastic', - 'ModelViscoelastic', 'ModelViscoacoustic'] +__all__ = [ + 'Model', + 'ModelElastic', + 'ModelViscoacoustic', + 'ModelViscoelastic', + 'SeismicModel', +] def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): diff --git a/examples/seismic/preset_models.py b/examples/seismic/preset_models.py index 5748bc383f..a2ab9fbf28 100644 --- a/examples/seismic/preset_models.py +++ b/examples/seismic/preset_models.py @@ -277,7 +277,7 @@ def demo_model(preset, **kwargs): nbl = kwargs.pop('nbl', 20) # Read 2D Marmousi model from devitocodes/data repo - data_path = kwargs.get('data_path', None) + data_path = kwargs.get('data_path') if data_path is None: raise ValueError("Path to devitocodes/data not found! Please specify with " "'data_path='") diff --git a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb index 13abfdfceb..7cd6905201 100644 --- a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb +++ b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb @@ -146,16 +146,13 @@ "source": [ "import numpy as np\n", "from examples.seismic import RickerSource, Receiver, TimeAxis\n", - "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant, \n", - " Eq, Operator, solve, configuration, norm)\n", - "from devito.finite_differences import Derivative\n", - "from devito.builtins import gaussian_smooth\n", + "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant,\n", + " Eq, Operator, configuration, norm)\n", "from examples.seismic.self_adjoint import setup_w_over_q\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", - "from timeit import default_timer as timer\n", - "# These lines force images to be displayed in the notebook, and scale up fonts \n", + "# These lines force images to be displayed in the notebook, and scale up fonts\n", "%matplotlib inline\n", "mpl.rc('font', size=14)\n", "\n", @@ -230,18 +227,18 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "# Make 1D grid to test derivatives \n", + "# Make 1D grid to test derivatives\n", "n = 101\n", "d = 1.0\n", "shape = (n, )\n", - "spacing = (1 / (n-1), ) \n", + "spacing = (1 / (n-1), )\n", "origin = (0., )\n", "extent = (d * (n-1), )\n", "dtype = np.float64\n", "\n", "# Initialize Devito grid and Functions for input(f1,g1) and output(f2,g2)\n", - "# Note that space_order=8 allows us to use an 8th order finite difference \n", - "# operator by properly setting up grid accesses with halo cells \n", + "# Note that space_order=8 allows us to use an 8th order finite difference\n", + "# operator by properly setting up grid accesses with halo cells\n", "grid1d = Grid(shape=shape, extent=extent, origin=origin, dtype=dtype)\n", "x = grid1d.dimensions[0]\n", "f1 = Function(name='f1', grid=grid1d, space_order=8)\n", @@ -367,13 +364,13 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "# Show the FD coefficients generated by Devito \n", + "# Show the FD coefficients generated by Devito\n", "# for the forward 1/2 cell shifted first derivative operator\n", "print(\"\\n\\nForward +1/2 cell shift;\")\n", "print(\"..................................\")\n", "print(f1.dx(x0=x+0.5*x.spacing).evaluate)\n", "\n", - "# Show the FD coefficients generated by Devito \n", + "# Show the FD coefficients generated by Devito\n", "# for the backward 1/2 cell shifted first derivative operator\n", "print(\"\\n\\nBackward -1/2 cell shift;\")\n", "print(\"..................................\")\n", @@ -523,15 +520,15 @@ "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", "\n", - "# Define the dimensions \n", + "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", - "x = SpaceDimension(name='x', spacing=Constant(name='h_x', \n", + "x = SpaceDimension(name='x', spacing=Constant(name='h_x',\n", " value=extent_pad[0]/(shape_pad[0]-1)))\n", - "z = SpaceDimension(name='z', spacing=Constant(name='h_z', \n", + "z = SpaceDimension(name='z', spacing=Constant(name='h_z',\n", " value=extent_pad[1]/(shape_pad[1]-1)))\n", "\n", - "# Initialize the Devito grid \n", - "grid = Grid(extent=extent_pad, shape=shape_pad, origin=origin_pad, \n", + "# Initialize the Devito grid\n", + "grid = Grid(extent=extent_pad, shape=shape_pad, origin=origin_pad,\n", " dimensions=(x, z), dtype=dtype)\n", "\n", "print(\"shape; \", shape)\n", @@ -578,11 +575,11 @@ } ], "source": [ - "# Create the velocity and buoyancy fields. \n", + "# Create the velocity and buoyancy fields.\n", "# - We use a wholespace velocity of 1500 m/s\n", "# - We use a wholespace density of 1 g/cm^3\n", "# - These are scalar fields so we use Function to define them\n", - "# - We specify space_order to establish the appropriate size halo on the edges \n", + "# - We specify space_order to establish the appropriate size halo on the edges\n", "space_order = 8\n", "\n", "# Wholespace velocity\n", @@ -768,7 +765,7 @@ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(m.data), cmap=cm.jet, \n", + "plt.imshow(np.transpose(m.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", @@ -916,7 +913,7 @@ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(q025.data), cmap=cm.jet, \n", + "plt.imshow(np.transpose(q025.data), cmap=cm.jet,\n", " vmin=lmin, vmax=lmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='log10(Q)')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", @@ -975,7 +972,7 @@ "# Define the TimeFunction\n", "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=space_order)\n", "\n", - "# Get the symbols for dimensions for t, x, z \n", + "# Get the symbols for dimensions for t, x, z\n", "# We need these below in order to write the source injection and the\n", "t,x,z = u.dimensions" ] @@ -1000,7 +997,7 @@ "# Source injection, with appropriate scaling\n", "src_term = src.inject(field=u.forward, expr=src * t.spacing**2 * m**2 / b)\n", "\n", - "# Receiver extraction \n", + "# Receiver extraction\n", "rec_term = rec.interpolate(expr=u.forward)" ] }, @@ -1429,7 +1426,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Continuous integration hooks \n", + "# Continuous integration hooks\n", "# We ensure the norm of these computed wavefields is repeatable\n", "assert np.isclose(norm(uQ25), 26.749, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(u), 161.131, atol=0, rtol=1e-3)\n", @@ -1480,7 +1477,7 @@ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(uQ25.data[1,:,:] / amax_Q100), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(uQ25.data[1,:,:] / amax_Q100), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", @@ -1552,7 +1549,7 @@ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(recQ25.data[:,:] / amax_Q100, cmap=\"seismic\", \n", + "plt.imshow(recQ25.data[:,:] / amax_Q100, cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -1591,7 +1588,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Define the partial_differential equation\n", - "# Note the backward shifted time derivative is obtained via u.dt(x0=t-0.5*t.spacing) \n", + "# Note the backward shifted time derivative is obtained via u.dt(x0=t-0.5*t.spacing)\n", "pde = (b / m**2) * (wOverQ_100 * u.dt(x0=t-0.5*t.spacing) + u.dt2) -\\\n", " (b * u.dx(x0=x+0.5*x.spacing)).dx(x0=x-0.5*x.spacing) -\\\n", " (b * u.dz(x0=z+0.5*z.spacing)).dz(x0=z-0.5*z.spacing)\n", @@ -1600,7 +1597,7 @@ "# t1 = timer()\n", "# stencil = Eq(u.forward, solve(pde, u.forward))\n", "# t2 = timer()\n", - "# print(\"solve ran in %.4f seconds.\" % (t2-t1)) \n", + "# print(\"solve ran in %.4f seconds.\" % (t2-t1))\n", "# stencil" ] }, diff --git a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb index 9796ff40ad..e70aca879b 100644 --- a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb +++ b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb @@ -258,24 +258,21 @@ "source": [ "import numpy as np\n", "from examples.seismic import RickerSource, Receiver, TimeAxis\n", - "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant, \n", - " Eq, Operator, solve, configuration, norm)\n", - "from devito.finite_differences import Derivative\n", - "from devito.builtins import gaussian_smooth\n", + "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant,\n", + " Eq, Operator, configuration, norm)\n", "from examples.seismic.self_adjoint import setup_w_over_q\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", - "from timeit import default_timer as timer\n", "\n", - "# These lines force images to be displayed in the notebook, and scale up fonts \n", + "# These lines force images to be displayed in the notebook, and scale up fonts\n", "%matplotlib inline\n", "mpl.rc('font', size=14)\n", "\n", "# Make white background for plots, not transparent\n", "plt.rcParams['figure.facecolor'] = 'white'\n", "\n", - "# We define 32 bit floating point as the precision type \n", + "# We define 32 bit floating point as the precision type\n", "dtype = np.float32\n", "\n", "# Set logging to debug, captures statistics on the performance of operators\n", @@ -332,15 +329,15 @@ "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", "\n", - "# Define the dimensions \n", + "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", - "x = SpaceDimension(name='x', spacing=Constant(name='h_x', \n", + "x = SpaceDimension(name='x', spacing=Constant(name='h_x',\n", " value=extent_pad[0]/(shape_pad[0]-1)))\n", - "z = SpaceDimension(name='z', spacing=Constant(name='h_z', \n", + "z = SpaceDimension(name='z', spacing=Constant(name='h_z',\n", " value=extent_pad[1]/(shape_pad[1]-1)))\n", "\n", - "# Initialize the Devito grid \n", - "grid = Grid(extent=extent_pad, shape=shape_pad, origin=origin_pad, \n", + "# Initialize the Devito grid\n", + "grid = Grid(extent=extent_pad, shape=shape_pad, origin=origin_pad,\n", " dimensions=(x, z), dtype=dtype)\n", "\n", "print(\"shape; \", shape)\n", @@ -391,7 +388,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "# Create the velocity and buoyancy fields as in the nonlinear notebook \n", + "# Create the velocity and buoyancy fields as in the nonlinear notebook\n", "space_order = 8\n", "\n", "# Wholespace velocity\n", @@ -567,7 +564,7 @@ "plt.figure(figsize=(12,12))\n", "\n", "plt.subplot(2, 2, 1)\n", - "plt.imshow(np.transpose(m0.data), cmap=cm.jet, \n", + "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", @@ -593,7 +590,7 @@ "plt.title(\"Background Density\")\n", "\n", "plt.subplot(2, 2, 3)\n", - "plt.imshow(np.transpose(dm.data), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", @@ -645,7 +642,7 @@ "duFwd = TimeFunction(name=\"duFwd\", grid=grid, time_order=2, space_order=space_order, save=None)\n", "duAdj = TimeFunction(name=\"duAdj\", grid=grid, time_order=2, space_order=space_order, save=None)\n", "\n", - "# Get the dimensions for t, x, z \n", + "# Get the dimensions for t, x, z\n", "t,x,z = u0.dimensions" ] }, @@ -740,7 +737,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Continuous integration hooks \n", + "# Continuous integration hooks\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# print(norm(u0))\n", "assert np.isclose(norm(u0), 3098.012, atol=0, rtol=1e-2)" @@ -796,7 +793,7 @@ "# The linearized forward time update equation\n", "eq_time_update_ln_fwd = (t.spacing**2 * m0**2 / b) * \\\n", " ((b * duFwd.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", - " (b * duFwd.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + \n", + " (b * duFwd.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) +\n", " 2 * b * dm * m0**-3 * (wOverQ * u0.dt(x0=t-t.spacing/2) + u0.dt2)) +\\\n", " (2 - t.spacing * wOverQ) * duFwd + \\\n", " (t.spacing * wOverQ - 1) * duFwd.backward\n", @@ -821,7 +818,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Continuous integration hooks \n", + "# Continuous integration hooks\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# print(norm(duFwd))\n", "assert np.isclose(norm(duFwd), 227.063, atol=0, rtol=1e-3)" @@ -875,7 +872,7 @@ "plt.figure(figsize=(12,12))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(u0.data[kt,:,:]), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(u0.data[kt,:,:]), cmap=\"seismic\",\n", " vmin=-amax_nl, vmax=+amax_nl, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", @@ -1017,7 +1014,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Continuous integration hooks \n", + "# Continuous integration hooks\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# print(norm(duAdj))\n", "assert np.isclose(norm(duAdj), 19218.924, atol=0, rtol=1e-3)" @@ -1073,7 +1070,7 @@ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(dm.data), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", @@ -1086,7 +1083,7 @@ "plt.title(\"Velocity Perturbation\")\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(np.transpose(dmAdj.data), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(dmAdj.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", diff --git a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb index abd3524132..3756ac364e 100644 --- a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb +++ b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb @@ -209,19 +209,14 @@ "source": [ "from scipy.special import hankel2\n", "import numpy as np\n", - "from examples.seismic import RickerSource, Receiver, TimeAxis, Model, AcquisitionGeometry\n", - "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant, \n", - " Eq, Operator, solve, configuration, norm)\n", - "from devito.finite_differences import Derivative\n", - "from devito.builtins import gaussian_smooth\n", + "from examples.seismic import RickerSource, TimeAxis, Model, AcquisitionGeometry\n", + "from devito import (Grid, Function, Eq, Operator, configuration, norm)\n", "from examples.seismic.self_adjoint import (acoustic_sa_setup, setup_w_over_q,\n", " SaIsoAcousticWaveSolver)\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", - "from matplotlib import cm\n", - "from timeit import default_timer as timer\n", "\n", - "# These lines force images to be displayed in the notebook, and scale up fonts \n", + "# These lines force images to be displayed in the notebook, and scale up fonts\n", "%matplotlib inline\n", "mpl.rc('font', size=14)\n", "\n", @@ -274,7 +269,7 @@ " timepad = np.linspace(tmin, tmaxpad, ntpad)\n", " print(time_axis)\n", " print(time_axis_pad)\n", - " srcpad = RickerSource(name='srcpad', grid=v.grid, f0=fpeak, npoint=1, \n", + " srcpad = RickerSource(name='srcpad', grid=v.grid, f0=fpeak, npoint=1,\n", " time_range=time_axis_pad, t0w=t0w)\n", " nf = int(ntpad / 2 + 1)\n", " fnyq = 1.0 / (2 * dt)\n", @@ -289,7 +284,7 @@ " # Compute the Hankel function and multiply by the source spectrum\n", " U_a = np.zeros((nf), dtype=complex)\n", " for a in range(1, nf - 1):\n", - " w = 2 * np.pi * faxis[a] \n", + " w = 2 * np.pi * faxis[a]\n", " r = np.sqrt((rx - sx)**2 + (rz - sz)**2)\n", " U_a[a] = -1j * np.pi * hankel2(0.0, w * r / v0) * R[a]\n", "\n", @@ -361,7 +356,7 @@ "model = Model(origin=origin, shape=shape, vp=v0, b=b0, spacing=spacing, nbl=npad,\n", " space_order=space_order, bcs=init_damp, dtype=dtype, dt=dt)\n", "\n", - "# Source and reciver coordinates \n", + "# Source and reciver coordinates\n", "src_coords = np.empty((1, 2), dtype=dtype)\n", "rec_coords = np.empty((1, 2), dtype=dtype)\n", "src_coords[:, :] = np.array(model.domain_size) * .5\n", @@ -406,7 +401,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Continuous integration hooks \n", + "# Continuous integration hooks\n", "# We ensure the norm of these computed wavefields is repeatable\n", "assert np.isclose(np.linalg.norm(recAna), 0.0524, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(recNum), 0.0524, atol=0, rtol=1e-3)\n", @@ -438,14 +433,14 @@ "z1 = origin[1] - model.nbl * model.spacing[1]\n", "z2 = model.domain_size[1] + model.nbl * model.spacing[1]\n", "\n", - "xABC1 = origin[0] \n", + "xABC1 = origin[0]\n", "xABC2 = model.domain_size[0]\n", "zABC1 = origin[1]\n", "zABC2 = model.domain_size[1]\n", "\n", "plt_extent = [x1, x2, z2, z1]\n", - "abc_pairsX = [xABC1, xABC1, xABC2, xABC2, xABC1] \n", - "abc_pairsZ = [zABC1, zABC2, zABC2, zABC1, zABC1] \n", + "abc_pairsX = [xABC1, xABC1, xABC2, xABC2, xABC1]\n", + "abc_pairsZ = [zABC1, zABC2, zABC2, zABC1, zABC1]\n", "\n", "plt.figure(figsize=(12.5,12.5))\n", "\n", @@ -454,9 +449,9 @@ "amax = 1.1 * np.max(np.abs(recNum.data[:]))\n", "plt.imshow(uNum.data[1,:,:], vmin=-amax, vmax=+amax, cmap=\"seismic\",\n", " aspect=\"auto\", extent=plt_extent)\n", - "plt.plot(src_coords[0, 0], src_coords[0, 1], 'r*', markersize=15, label='Source') \n", - "plt.plot(rec_coords[0, 0], rec_coords[0, 1], 'k^', markersize=11, label='Receiver') \n", - "plt.plot(abc_pairsX, abc_pairsZ, 'black', linewidth=4, linestyle=':', \n", + "plt.plot(src_coords[0, 0], src_coords[0, 1], 'r*', markersize=15, label='Source')\n", + "plt.plot(rec_coords[0, 0], rec_coords[0, 1], 'k^', markersize=11, label='Receiver')\n", + "plt.plot(abc_pairsX, abc_pairsZ, 'black', linewidth=4, linestyle=':',\n", " label=\"ABC\")\n", "plt.legend(loc=\"upper left\", bbox_to_anchor=(0.0, 0.9, 0.35, .1), framealpha=1.0)\n", "plt.xlabel('x position (m)')\n", @@ -811,11 +806,11 @@ "\n", "msize = 10\n", "\n", - "plt.plot(np.log10(scale), np.log10(expected1) - np.log10(expected1[0]), '+', label='1st order expected', \n", + "plt.plot(np.log10(scale), np.log10(expected1) - np.log10(expected1[0]), '+', label='1st order expected',\n", " linestyle='solid', linewidth=1.5, markersize=10, color='black')\n", "plt.plot(np.log10(scale), np.log10(norm1) - np.log10(norm1[0]), 'o', label='1st order actual',\n", " linestyle='solid', linewidth=1.5, markersize=10, color='blue')\n", - "plt.plot(np.log10(scale), np.log10(expected2) - np.log10(expected2[0]), 'x', label='2nd order expected', \n", + "plt.plot(np.log10(scale), np.log10(expected2) - np.log10(expected2[0]), 'x', label='2nd order expected',\n", " linestyle='solid', linewidth=1.5, markersize=10, color='black')\n", "plt.plot(np.log10(scale), np.log10(norm2) - np.log10(norm2[0]), 'd', label='2nd order actual',\n", " linestyle='solid', linewidth=1.5, markersize=10, color='red')\n", @@ -1090,18 +1085,18 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "# Make 1D grid to test derivatives \n", + "# Make 1D grid to test derivatives\n", "n = 101\n", "d = 1.0\n", "shape = (n, )\n", - "spacing = (1 / (n-1), ) \n", + "spacing = (1 / (n-1), )\n", "origin = (0., )\n", "extent = (d * (n-1), )\n", "dtype = np.float64\n", "\n", "# Initialize Devito grid and Functions for input(f1,g1) and output(f2,g2)\n", - "# Note that space_order=8 allows us to use an 8th order finite difference \n", - "# operator by properly setting up grid accesses with halo cells \n", + "# Note that space_order=8 allows us to use an 8th order finite difference\n", + "# operator by properly setting up grid accesses with halo cells\n", "grid1d = Grid(shape=shape, extent=extent, origin=origin, dtype=dtype)\n", "x = grid1d.dimensions[0]\n", "f1 = Function(name='f1', grid=grid1d, space_order=8)\n", diff --git a/examples/seismic/source.py b/examples/seismic/source.py index 18fa1893aa..d0a44d612d 100644 --- a/examples/seismic/source.py +++ b/examples/seismic/source.py @@ -8,8 +8,16 @@ from devito.types import SparseTimeFunction -__all__ = ['PointSource', 'Receiver', 'Shot', 'WaveletSource', - 'RickerSource', 'GaborSource', 'DGaussSource', 'TimeAxis'] +__all__ = [ + 'DGaussSource', + 'GaborSource', + 'PointSource', + 'Receiver', + 'RickerSource', + 'Shot', + 'TimeAxis', + 'WaveletSource', +] class TimeAxis: diff --git a/examples/seismic/tti/wavesolver.py b/examples/seismic/tti/wavesolver.py index 6915744def..2534bef750 100644 --- a/examples/seismic/tti/wavesolver.py +++ b/examples/seismic/tti/wavesolver.py @@ -1,4 +1,3 @@ -# coding: utf-8 from devito import (Function, TimeFunction, warning, NODE, DevitoCheckpoint, CheckpointOperator, Revolver) from devito.tools import memoized_meth diff --git a/examples/seismic/tutorials/01_modelling.ipynb b/examples/seismic/tutorials/01_modelling.ipynb index 1ab98e0b50..5fe0e78a8c 100644 --- a/examples/seismic/tutorials/01_modelling.ipynb +++ b/examples/seismic/tutorials/01_modelling.ipynb @@ -320,8 +320,8 @@ } ], "source": [ - "# In order to represent the wavefield u and the square slowness we need symbolic objects \n", - "# corresponding to time-space-varying field (u, TimeFunction) and \n", + "# In order to represent the wavefield u and the square slowness we need symbolic objects\n", + "# corresponding to time-space-varying field (u, TimeFunction) and\n", "# space-varying field (m, Function)\n", "from devito import TimeFunction\n", "\n", @@ -356,7 +356,7 @@ ], "source": [ "# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", - "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as \n", + "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as\n", "# a time marching updating equation known as a stencil using customized SymPy functions\n", "from devito import Eq, solve\n", "\n", diff --git a/examples/seismic/tutorials/02_rtm.ipynb b/examples/seismic/tutorials/02_rtm.ipynb index 3489e20d73..350d98bab0 100644 --- a/examples/seismic/tutorials/02_rtm.ipynb +++ b/examples/seismic/tutorials/02_rtm.ipynb @@ -225,7 +225,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Compute synthetic data with forward operator \n", + "# Compute synthetic data with forward operator\n", "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", "solver = AcousticWaveSolver(model, geometry, space_order=4)\n", @@ -238,7 +238,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Compute initial data with forward operator \n", + "# Compute initial data with forward operator\n", "smooth_d, _, _ = solver.forward(vp=model0.vp)" ] }, @@ -352,18 +352,18 @@ "\n", " u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=4,\n", " save=geometry.nt)\n", - " \n", + "\n", " # Define the wave equation, but with a negated damping term\n", " eqn = model.m * v.dt2 - v.laplace + model.damp * v.dt.T\n", "\n", " # Use `solve` to rearrange the equation into a stencil expression\n", " stencil = Eq(v.backward, solve(eqn, v.backward))\n", - " \n", + "\n", " # Define residual injection at the location of the forward receivers\n", " dt = model.critical_dt\n", " residual = PointSource(name='residual', grid=model.grid,\n", " time_range=geometry.time_axis,\n", - " coordinates=geometry.rec_positions) \n", + " coordinates=geometry.rec_positions)\n", " res_term = residual.inject(field=v.backward, expr=residual * dt**2 / model.m)\n", "\n", " # Correlate u and v for the current time step and add it to the image\n", @@ -500,20 +500,20 @@ "\n", "for i in range(nshots):\n", " print('Imaging source %d out of %d' % (i+1, nshots))\n", - " \n", + "\n", " # Update source location\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", "\n", " # Generate synthetic data from true model\n", " true_d, _, _ = solver.forward(vp=model.vp)\n", - " \n", + "\n", " # Compute smooth data and full forward wavefield u0\n", " smooth_d, u0, _ = solver.forward(vp=model0.vp, save=True)\n", - " \n", - " # Compute gradient from the data residual \n", + "\n", + " # Compute gradient from the data residual\n", " v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=4)\n", " residual = smooth_d.data - true_d.data\n", - " op_imaging(u=u0, v=v, vp=model0.vp, dt=model0.critical_dt, \n", + " op_imaging(u=u0, v=v, vp=model0.vp, dt=model0.critical_dt,\n", " residual=residual)\n" ] }, diff --git a/examples/seismic/tutorials/03_fwi.ipynb b/examples/seismic/tutorials/03_fwi.ipynb index ae0616f5b6..83bf629448 100644 --- a/examples/seismic/tutorials/03_fwi.ipynb +++ b/examples/seismic/tutorials/03_fwi.ipynb @@ -54,7 +54,7 @@ "outputs": [], "source": [ "nshots = 9 # Number of shots to create gradient from\n", - "nreceivers = 101 # Number of receiver locations per shot \n", + "nreceivers = 101 # Number of receiver locations per shot\n", "fwi_iterations = 5 # Number of outer FWI iterations" ] }, @@ -165,7 +165,7 @@ "from examples.seismic import AcquisitionGeometry\n", "\n", "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "f0 = 0.010\n", "# First, position source centrally in all dimensions, then set depth\n", "src_coordinates = np.empty((1, 2))\n", @@ -225,7 +225,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Compute synthetic data with forward operator \n", + "# Compute synthetic data with forward operator\n", "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", "solver = AcousticWaveSolver(model, geometry, space_order=4)\n", @@ -238,7 +238,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Compute initial data with forward operator \n", + "# Compute initial data with forward operator\n", "smooth_d, _, _ = solver.forward(vp=model0.vp)" ] }, @@ -379,7 +379,7 @@ "def compute_residual(residual, dobs, dsyn):\n", " if residual.grid.distributor.is_parallel:\n", " # If we run with MPI, we have to compute the residual via an operator\n", - " # First make sure we can take the difference and that receivers are at the \n", + " # First make sure we can take the difference and that receivers are at the\n", " # same position\n", " assert np.allclose(dobs.coordinates.data[:], dsyn.coordinates.data)\n", " assert np.allclose(residual.coordinates.data[:], dsyn.coordinates.data)\n", @@ -390,7 +390,7 @@ " else:\n", " # A simple data difference is enough in serial\n", " residual.data[:] = dsyn.data[:] - dobs.data[:]\n", - " \n", + "\n", " return residual" ] }, @@ -400,41 +400,40 @@ "metadata": {}, "outputs": [], "source": [ - "# Create FWI gradient kernel \n", - "from devito import Function, TimeFunction, norm\n", + "# Create FWI gradient kernel\n", + "from devito import Function, norm\n", "from examples.seismic import Receiver\n", "\n", - "import scipy\n", - "def fwi_gradient(vp_in): \n", + "def fwi_gradient(vp_in):\n", " # Create symbols to hold the gradient\n", " grad = Function(name=\"grad\", grid=model.grid)\n", " # Create placeholders for the data residual and data\n", " residual = Receiver(name='residual', grid=model.grid,\n", - " time_range=geometry.time_axis, \n", + " time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", " d_obs = Receiver(name='d_obs', grid=model.grid,\n", - " time_range=geometry.time_axis, \n", + " time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", " d_syn = Receiver(name='d_syn', grid=model.grid,\n", - " time_range=geometry.time_axis, \n", + " time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", " objective = 0.\n", " for i in range(nshots):\n", " # Update source location\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", - " \n", + "\n", " # Generate synthetic data from true model\n", " _, _, _ = solver.forward(vp=model.vp, rec=d_obs)\n", - " \n", + "\n", " # Compute smooth data and full forward wavefield u0\n", " _, u0, _ = solver.forward(vp=vp_in, save=True, rec=d_syn)\n", - " \n", - " # Compute gradient from data residual and update objective function \n", + "\n", + " # Compute gradient from data residual and update objective function\n", " compute_residual(residual, d_obs, d_syn)\n", - " \n", + "\n", " objective += .5*norm(residual)**2\n", " solver.gradient(rec=residual, u=u0, vp=vp_in, grad=grad)\n", - " \n", + "\n", " return objective, grad" ] }, @@ -590,19 +589,19 @@ " # Compute the functional value and gradient for the current\n", " # model estimate\n", " phi, direction = fwi_gradient(model0.vp)\n", - " \n", + "\n", " # Store the history of the functional values\n", " history[i] = phi\n", - " \n", + "\n", " # Artificial Step length for gradient descent\n", " # In practice this would be replaced by a Linesearch (Wolfe, ...)\n", " # that would guarantee functional decrease Phi(m-alpha g) <= epsilon Phi(m)\n", " # where epsilon is a minimum decrease constant\n", " alpha = .05 / mmax(direction)\n", - " \n", + "\n", " # Update the model estimate and enforce minimum/maximum values\n", " update_with_box(model0.vp , alpha , direction)\n", - " \n", + "\n", " # Log the progress made\n", " print('Objective value is %f at iteration %d' % (phi, i+1))" ] diff --git a/examples/seismic/tutorials/04_dask.ipynb b/examples/seismic/tutorials/04_dask.ipynb index a7becc9e1b..9084f2a3de 100644 --- a/examples/seismic/tutorials/04_dask.ipynb +++ b/examples/seismic/tutorials/04_dask.ipynb @@ -133,7 +133,7 @@ "\n", "# Set up acquisiton geometry\n", "t0 = 0.\n", - "tn = 1000. \n", + "tn = 1000.\n", "f0 = 0.010\n", "\n", "# Set up source geometry, but define 5 sources instead of just one.\n", @@ -203,9 +203,9 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:], \n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", - " \n", + "\n", " # Call serial modeling function for each index\n", " futures.append(client.submit(forward_modeling_single_shot, model, geometry_i, save=save, dt=dt))\n", "\n", @@ -264,11 +264,11 @@ "# Start Dask cluster\n", "if USE_GPU_AWARE_DASK:\n", " from dask_cuda import LocalCUDACluster\n", - " cluster = LocalCUDACluster(threads_per_worker=1, death_timeout=600) \n", + " cluster = LocalCUDACluster(threads_per_worker=1, death_timeout=600)\n", "else:\n", " from distributed import LocalCluster\n", " cluster = LocalCluster(n_workers=nsources, death_timeout=600)\n", - " \n", + "\n", "client = Client(cluster)\n", "\n", "# Compute observed data in parallel (inverse crime). In real life we would read the SEG-Y data here.\n", @@ -338,7 +338,7 @@ " # Devito objects for gradient and data residual\n", " grad = Function(name=\"grad\", grid=model.grid)\n", " residual = Receiver(name='rec', grid=model.grid,\n", - " time_range=geometry.time_axis, \n", + " time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", " solver = AcousticWaveSolver(model, geometry, space_order=4)\n", "\n", @@ -346,13 +346,13 @@ " d_pred, u0 = solver.forward(vp=model.vp, save=True)[0:2]\n", " residual.data[:] = d_pred.data[:] - d_obs.resample(geometry.dt).data[:][0:d_pred.data.shape[0], :]\n", "\n", - " # Function value and gradient \n", + " # Function value and gradient\n", " fval = .5*np.linalg.norm(residual.data.flatten())**2\n", " solver.gradient(rec=residual, u=u0, vp=model.vp, grad=grad)\n", - " \n", + "\n", " # Convert to numpy array and remove absorbing boundaries\n", " grad_crop = np.array(grad.data[:])[model.nbl:-model.nbl, model.nbl:-model.nbl]\n", - " \n", + "\n", " return fval, grad_crop" ] }, @@ -376,9 +376,9 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:], \n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", - " \n", + "\n", " # Call serial FWI objective function for each shot location\n", " futures.append(client.submit(fwi_objective_single_shot, model, geometry_i, d_obs[i]))\n", "\n", @@ -519,14 +519,14 @@ "source": [ "# Wrapper for scipy optimizer: x is current model in squared slowness [s^2/km^2]\n", "def loss(x, model, geometry, d_obs):\n", - " \n", + "\n", " # Convert x to velocity\n", " v_curr = 1.0/np.sqrt(x.reshape(model.shape))\n", - " \n", + "\n", " # Overwrite current velocity in geometry (don't update boundary region)\n", " model.update('vp', v_curr.reshape(model.shape))\n", - " \n", - " # Evaluate objective function \n", + "\n", + " # Evaluate objective function\n", " fval, grad = fwi_objective_multi_shots(model, geometry, d_obs)\n", " return fval, grad.flatten().astype(np.float64) # scipy expects double precision vector" ] @@ -888,7 +888,7 @@ "# FWI with L-BFGS\n", "ftol = 0.1\n", "maxiter = 5\n", - "result = optimize.minimize(loss, m0, args=(model0, geometry0, d_obs), method='L-BFGS-B', jac=True, \n", + "result = optimize.minimize(loss, m0, args=(model0, geometry0, d_obs), method='L-BFGS-B', jac=True,\n", " callback=fwi_callback, bounds=bounds, options={'ftol':ftol, 'maxiter':maxiter, 'disp':True})" ] }, diff --git a/examples/seismic/tutorials/04_dask_pickling.ipynb b/examples/seismic/tutorials/04_dask_pickling.ipynb index d8fbf4d456..d1de2fd35a 100644 --- a/examples/seismic/tutorials/04_dask_pickling.ipynb +++ b/examples/seismic/tutorials/04_dask_pickling.ipynb @@ -92,30 +92,26 @@ "\n", "import numpy as np\n", "\n", - "import scipy\n", - "from scipy import signal, optimize\n", + "from scipy import optimize\n", "\n", - "from devito import Grid\n", "\n", "from distributed import Client, LocalCluster, wait\n", "\n", "import cloudpickle as pickle\n", "\n", "# Import acoustic solver, source and receiver modules.\n", - "from examples.seismic import Model, demo_model, AcquisitionGeometry, Receiver\n", + "from examples.seismic import demo_model, AcquisitionGeometry, Receiver\n", "from examples.seismic.acoustic import AcousticWaveSolver\n", - "from examples.seismic import AcquisitionGeometry\n", "\n", "# Import convenience function for plotting results\n", "from examples.seismic import plot_image\n", - "from examples.seismic import plot_shotrecord\n", "\n", "\n", "def get_true_model():\n", " ''' Define the test phantom; in this case we are using\n", " a simple circle so we can easily see what is going on.\n", " '''\n", - " return demo_model('circle-isotropic', vp_circle=3.0, vp_background=2.5, \n", + " return demo_model('circle-isotropic', vp_circle=3.0, vp_background=2.5,\n", " origin=param['origin'], shape=param['shape'],\n", " spacing=param['spacing'], nbl=param['nbl'])\n", "\n", @@ -124,7 +120,7 @@ " '''\n", " # Make sure both model are on the same grid\n", " grid = get_true_model().grid\n", - " return demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5, \n", + " return demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,\n", " origin=param['origin'], shape=param['shape'],\n", " spacing=param['spacing'], nbl=param['nbl'],\n", " grid=grid)\n", @@ -134,7 +130,7 @@ " '''\n", " model = get_initial_model()\n", " v_curr = 1.0/np.sqrt(x.reshape(model.shape))\n", - " \n", + "\n", " if astype:\n", " model.update('vp', v_curr.astype(astype).reshape(model.shape))\n", " else:\n", @@ -146,26 +142,26 @@ " worker to get the current model.\n", " \"\"\"\n", " pkl = pickle.load(open(filename, \"rb\"))\n", - " \n", + "\n", " return pkl['model']\n", "\n", "def dump_model(filename, model):\n", " ''' Dump model to disk.\n", " '''\n", " pickle.dump({'model':model}, open(filename, \"wb\"))\n", - " \n", + "\n", "def load_shot_data(shot_id, dt):\n", " ''' Load shot data from disk, resampling to the model time step.\n", " '''\n", " pkl = pickle.load(open(\"shot_%d.p\"%shot_id, \"rb\"))\n", - " \n", + "\n", " return pkl['geometry'], pkl['rec'].resample(dt)\n", "\n", "def dump_shot_data(shot_id, rec, geometry):\n", " ''' Dump shot data to disk.\n", " '''\n", " pickle.dump({'rec':rec, 'geometry': geometry}, open('shot_%d.p'%shot_id, \"wb\"))\n", - " \n", + "\n", "def generate_shotdata_i(param):\n", " \"\"\" Inversion crime alert! Here the worker is creating the\n", " 'observed' data using the real model. For a real case\n", @@ -174,12 +170,12 @@ " # Reconstruct objects\n", " with open(\"arguments.pkl\", \"rb\") as cp_file:\n", " cp = pickle.load(cp_file)\n", - " \n", + "\n", " solver = cp['solver']\n", "\n", " # source position changes according to the index\n", " shot_id=param['shot_id']\n", - " \n", + "\n", " solver.geometry.src_positions[0,:]=[20, shot_id*1000./(param['nshots']-1)]\n", " true_d = solver.forward()[0]\n", " dump_shot_data(shot_id, true_d.resample(4.0), solver.geometry.src_positions)\n", @@ -188,10 +184,10 @@ " # Pick devito objects (save on disk)\n", " cp = {'solver': solver}\n", " with open(\"arguments.pkl\", \"wb\") as cp_file:\n", - " pickle.dump(cp, cp_file) \n", + " pickle.dump(cp, cp_file)\n", "\n", " work = [dict(param) for i in range(param['nshots'])]\n", - " # synthetic data is generated here twice: serial(loop below) and parallel (via dask map functionality) \n", + " # synthetic data is generated here twice: serial(loop below) and parallel (via dask map functionality)\n", " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", " generate_shotdata_i(work[i])\n", @@ -283,7 +279,6 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "from examples.seismic import plot_shotrecord\n", "\n", "# Client setup\n", "cluster = LocalCluster(n_workers=2, death_timeout=600)\n", @@ -299,7 +294,7 @@ "rec_coordinates = np.empty((nreceivers, len(param['shape'])))\n", "rec_coordinates[:, 1] = np.linspace(param['spacing'][0], true_model.domain_size[0] - param['spacing'][0], num=nreceivers)\n", "rec_coordinates[:, 0] = 980. # 20m from the right end\n", - "# Geometry \n", + "# Geometry\n", "geometry = AcquisitionGeometry(true_model, rec_coordinates, src_coordinates,\n", " param['t0'], param['tn'], src_type='Ricker',\n", " f0=param['f0'])\n", @@ -335,13 +330,13 @@ " def __init__(self, f, g):\n", " self.f = f\n", " self.g = g\n", - " \n", + "\n", " def __add__(self, other):\n", " f = self.f + other.f\n", " g = self.g + other.g\n", - " \n", + "\n", " return fg_pair(f, g)\n", - " \n", + "\n", " def __radd__(self, other):\n", " if other == 0:\n", " return self\n", @@ -378,25 +373,25 @@ " # it only worked reliably with Dask main. Therefore, the\n", " # the model is communicated via a file.\n", " model0 = load_model(param['model'])\n", - " \n", + "\n", " dt = model0.critical_dt\n", " nbl = model0.nbl\n", "\n", " # Get src_position and data\n", " src_positions, rec = load_shot_data(param['shot_id'], dt)\n", "\n", - " # Set up solver -- load the solver used above in the generation of the syntethic data. \n", + " # Set up solver -- load the solver used above in the generation of the syntethic data.\n", " with open(\"arguments.pkl\", \"rb\") as cp_file:\n", " cp = pickle.load(cp_file)\n", " solver = cp['solver']\n", - " \n", + "\n", " # Set attributes to solver\n", " solver.geometry.src_positions=src_positions\n", " solver.geometry.resample(dt)\n", "\n", " # Compute simulated data and full forward wavefield u0\n", " d, u0 = solver.forward(vp=model0.vp, dt=dt, save=True)[0:2]\n", - " \n", + "\n", " # Compute the data misfit (residual) and objective function\n", " residual = Receiver(name='rec', grid=model0.grid,\n", " time_range=solver.geometry.time_axis,\n", @@ -410,11 +405,11 @@ " # backpropagates the data misfit through the model.\n", " grad = Function(name=\"grad\", grid=model0.grid)\n", " solver.gradient(rec=residual, u=u0, vp=model0.vp, dt=dt, grad=grad)\n", - " \n", + "\n", " # Copying here to avoid a (probably overzealous) destructor deleting\n", " # the gradient before Dask has had a chance to communicate it.\n", - " g = np.array(grad.data[:])[nbl:-nbl, nbl:-nbl] \n", - " \n", + " g = np.array(grad.data[:])[nbl:-nbl, nbl:-nbl]\n", + "\n", " # return the objective functional and gradient.\n", " return fg_pair(f, g)" ] @@ -444,13 +439,13 @@ " work = [dict(param) for i in range(param['nshots'])]\n", " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", - " \n", + "\n", " # Distribute worklist to workers.\n", " fgi = c.map(fwi_gradient_i, work, retries=1)\n", - " \n", + "\n", " # Perform reduction.\n", " fg = c.submit(sum, fgi).result()\n", - " \n", + "\n", " # L-BFGS in scipy expects a flat array in 64-bit floats.\n", " return fg.f, fg.g.flatten().astype(np.float64)" ] @@ -469,13 +464,12 @@ "metadata": {}, "outputs": [], "source": [ - "from scipy import optimize\n", "\n", "# Many optimization methods in scipy.optimize.minimize accept a callback\n", "# function that can operate on the solution after every iteration. Here\n", "# we use this to monitor the true relative solution error.\n", "relative_error = []\n", - "def fwi_callbacks(x): \n", + "def fwi_callbacks(x):\n", " # Calculate true relative error\n", " true_vp = get_true_model().vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", " true_m = 1.0 / (true_vp.reshape(-1).astype(np.float64))**2\n", @@ -489,12 +483,12 @@ " # Initial guess\n", " v0 = model.vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", " m0 = 1.0 / (v0.reshape(-1).astype(np.float64))**2\n", - " \n", + "\n", " # Define bounding box constraints on the solution.\n", " vmin = 1.4 # do not allow velocities slower than water\n", " vmax = 4.0\n", " bounds = [(1.0/vmax**2, 1.0/vmin**2) for _ in range(np.prod(model.shape))] # in [s^2/km^2]\n", - " \n", + "\n", " result = optimize.minimize(fwi_gradient,\n", " m0, args=(param, ), method='L-BFGS-B', jac=True,\n", " bounds=bounds, callback=fwi_callbacks,\n", @@ -879,7 +873,6 @@ "#NBVAL_SKIP\n", "\n", "# Plot FWI result\n", - "from examples.seismic import plot_image\n", "\n", "slices = tuple(slice(param['nbl'],-param['nbl']) for _ in range(2))\n", "vp = 1.0/np.sqrt(result['x'].reshape(true_model.shape))\n", diff --git a/examples/seismic/tutorials/05_staggered_acoustic.ipynb b/examples/seismic/tutorials/05_staggered_acoustic.ipynb index 1d1ce958f7..03f6c62b45 100644 --- a/examples/seismic/tutorials/05_staggered_acoustic.ipynb +++ b/examples/seismic/tutorials/05_staggered_acoustic.ipynb @@ -18,7 +18,7 @@ "from examples.seismic import plot_image\n", "import numpy as np\n", "\n", - "from sympy import init_printing, latex\n", + "from sympy import init_printing\n", "init_printing(use_latex='mathjax')" ] }, diff --git a/examples/seismic/tutorials/06_elastic.ipynb b/examples/seismic/tutorials/06_elastic.ipynb index 0c4c7163b0..4ee235fc8f 100644 --- a/examples/seismic/tutorials/06_elastic.ipynb +++ b/examples/seismic/tutorials/06_elastic.ipynb @@ -28,11 +28,11 @@ "outputs": [], "source": [ "from devito import *\n", - "from examples.seismic.source import WaveletSource, RickerSource, GaborSource, TimeAxis\n", + "from examples.seismic.source import WaveletSource, RickerSource, TimeAxis\n", "from examples.seismic import plot_image\n", "import numpy as np\n", "\n", - "from sympy import init_printing, latex\n", + "from sympy import init_printing\n", "init_printing(use_latex='mathjax')" ] }, @@ -57,7 +57,7 @@ "outputs": [], "source": [ "class DGaussSource(WaveletSource):\n", - " \n", + "\n", " def wavelet(self, f0, t):\n", " a = 0.004\n", " return -2.*a*(t - 1/f0) * np.exp(-a * (t - 1/f0)**2)\n", diff --git a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb index 58468db8cd..4358debc87 100644 --- a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb +++ b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb @@ -23,17 +23,17 @@ "source": [ "from devito import *\n", "from examples.seismic.source import RickerSource, Receiver, TimeAxis\n", - "from examples.seismic import plot_image, demo_model\n", + "from examples.seismic import demo_model\n", "import numpy as np\n", "\n", "import matplotlib.pyplot as plt\n", "\n", - "from sympy import init_printing, latex\n", + "from sympy import init_printing\n", "init_printing(use_latex='mathjax')\n", "\n", "# Some ploting setup\n", "plt.rc('font', family='serif')\n", - "plt.rc('xtick', labelsize=20) \n", + "plt.rc('xtick', labelsize=20)\n", "plt.rc('ytick', labelsize=20)" ] }, @@ -938,7 +938,7 @@ "from devito import div45, grad45\n", "\n", "all_node = [[NODE for _ in range(model.grid.dim)] for _ in range(model.grid.dim)]\n", - "all_vert = [model.grid.dimensions for _ in range(model.grid.dim)] \n", + "all_vert = [model.grid.dimensions for _ in range(model.grid.dim)]\n", "\n", "so = 8\n", "v_rsfd = VectorTimeFunction(name='vr', grid=model.grid, space_order=so, time_order=1, staggered=all_vert)\n", diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index 9970615f1b..9a16f497d6 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -790,7 +790,7 @@ " if not isinstance(first_arrival, np.ndarray):\n", " arrival = time[np.argmax(np.abs(data)>0.01, axis=0)]\n", " ax.plot(space, arrival, c='red', lw=1)\n", - " ax.annotate(f'first arrival',\n", + " ax.annotate('first arrival',\n", " xy=((extents[1] - extents[0])/2, arrival[arrival.size//2]), xycoords='data',\n", " xytext=(5, 5), textcoords='offset points', fontsize=12, color='red',\n", " path_effects=[patheffects.withStroke(linewidth=2, foreground=\"k\")]\n", @@ -815,7 +815,7 @@ " ax.set_aspect((extent[1] - extent[0])/(2*clip))\n", " if first_arrival is not None:\n", " ax.axvline(first_arrival, c='red', ls='--', lw=1)\n", - " ax.annotate(f'first arrival',\n", + " ax.annotate('first arrival',\n", " xy=(first_arrival, 0), xycoords='data',\n", " xytext=(-70, 5), textcoords='offset points', fontsize=12, color='red'\n", " )\n", diff --git a/examples/seismic/tutorials/07_DRP_schemes.ipynb b/examples/seismic/tutorials/07_DRP_schemes.ipynb index c990b517a0..7eae8a7019 100644 --- a/examples/seismic/tutorials/07_DRP_schemes.ipynb +++ b/examples/seismic/tutorials/07_DRP_schemes.ipynb @@ -242,7 +242,7 @@ "pde = model.m * u.dt2 - H + model.damp * u.dt\n", "\n", "# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", - "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as \n", + "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as\n", "# a time marching updating equation known as a stencil using customized SymPy functions\n", "from devito import solve\n", "\n", @@ -387,7 +387,7 @@ " # but the bottom 80 (+boundary layer) cells in the z-direction, which is achieved via\n", " # the following notation:\n", " return {x: x, z: ('left', 80+nbl)}\n", - " \n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", " def define(self, dimensions):\n", @@ -424,7 +424,7 @@ "source": [ "#NBVAL_IGNORE_OUTPUT\n", "\n", - "# Create our model passing it our 'upper' and 'lower' subdomains: \n", + "# Create our model passing it our 'upper' and 'lower' subdomains:\n", "model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", " space_order=order, nbl=nbl, bcs=\"damp\")\n", "\n", @@ -482,14 +482,14 @@ "# Define our custom FD coefficients:\n", "x, z = model.grid.dimensions\n", "# Upper layer\n", - "weights_u = np.array([ 2.00462e-03, -1.63274e-02, 7.72781e-02, \n", - " -3.15476e-01, 1.77768e+00, -3.05033e+00, \n", - " 1.77768e+00, -3.15476e-01, 7.72781e-02, \n", + "weights_u = np.array([ 2.00462e-03, -1.63274e-02, 7.72781e-02,\n", + " -3.15476e-01, 1.77768e+00, -3.05033e+00,\n", + " 1.77768e+00, -3.15476e-01, 7.72781e-02,\n", " -1.63274e-02, 2.00462e-03])\n", "# Lower layer\n", - "weights_l = np.array([ 0. , 0. , 0.0274017, \n", - " -0.223818, 1.64875 , -2.90467, \n", - " 1.64875 , -0.223818, 0.0274017, \n", + "weights_l = np.array([ 0. , 0. , 0.0274017,\n", + " -0.223818, 1.64875 , -2.90467,\n", + " 1.64875 , -0.223818, 0.0274017,\n", " 0. , 0. ])\n", "# Create the Devito Coefficient objects:\n", "ux_u_coeffs = weights_u/x.spacing**2\n", diff --git a/examples/seismic/tutorials/08_snapshotting.ipynb b/examples/seismic/tutorials/08_snapshotting.ipynb index fd84501ac7..5474ff2a72 100644 --- a/examples/seismic/tutorials/08_snapshotting.ipynb +++ b/examples/seismic/tutorials/08_snapshotting.ipynb @@ -27,7 +27,7 @@ "#NBVAL_IGNORE_OUTPUT\n", "%reset -f\n", "import numpy as np\n", - "import matplotlib.pyplot as plt \n", + "import matplotlib.pyplot as plt\n", "%matplotlib inline" ] }, @@ -123,7 +123,7 @@ " name='src',\n", " grid=model.grid,\n", " f0=f0,\n", - " time_range=time_range) \n", + " time_range=time_range)\n", "\n", "src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", "src.coordinates.data[0, -1] = 20. # Depth is 20m\n", @@ -142,10 +142,10 @@ " receiver=rec.coordinates.data[::4, :])\n", "\n", "#Used for reshaping\n", - "vnx = nx+20 \n", + "vnx = nx+20\n", "vnz = nz+20\n", "\n", - "# Set symbolics for the wavefield object `u`, setting save on all time steps \n", + "# Set symbolics for the wavefield object `u`, setting save on all time steps\n", "# (which can occupy a lot of memory), to later collect snapshots (naive method):\n", "\n", "u = TimeFunction(name=\"u\", grid=model.grid, time_order=2,\n", @@ -250,8 +250,8 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "fobj = open(\"naivsnaps.bin\", \"rb\") \n", - "snaps = np.fromfile(fobj, dtype = np.float32) \n", + "fobj = open(\"naivsnaps.bin\", \"rb\")\n", + "snaps = np.fromfile(fobj, dtype = np.float32)\n", "snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) #reshape vec2mtx, devito format. nx first\n", "fobj.close()\n", "\n", @@ -261,11 +261,11 @@ "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", - " plt.subplot(1, plot_num+1, imcnt+1);\n", + " plt.subplot(1, plot_num+1, imcnt+1)\n", " imcnt = imcnt + 1\n", " plt.imshow(np.transpose(snaps[i,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", - "plt.show() " + "plt.show()" ] }, { @@ -362,7 +362,7 @@ "\n", "#Part 3 #############\n", "print(\"Saving snaps file\")\n", - "print(\"Dimensions: nz = {:d}, nx = {:d}\".format(nz + 2 * nb, nx + 2 * nb))\n", + "print(f\"Dimensions: nz = {nz + 2 * nb:d}, nx = {nx + 2 * nb:d}\")\n", "filename = \"snaps2.bin\"\n", "usave.data.tofile(filename)\n", "#####################" @@ -403,12 +403,12 @@ "imcnt = 1 # Image counter for plotting\n", "plot_num = 5 # Number of images to plot\n", "for i in range(0, plot_num):\n", - " plt.subplot(1, plot_num, i+1);\n", + " plt.subplot(1, plot_num, i+1)\n", " imcnt = imcnt + 1\n", " ind = i * int(nsnaps/plot_num)\n", " plt.imshow(np.transpose(snaps[ind,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", - "plt.show() " + "plt.show()" ] }, { @@ -3360,7 +3360,7 @@ "\n", "plt.xlabel('x')\n", "plt.ylabel('z')\n", - "plt.title('Modelling one shot over a 2-layer velocity model with Devito.') \n", + "plt.title('Modelling one shot over a 2-layer velocity model with Devito.')\n", "\n", "def update(i):\n", " matrice.set_array(snapsObj[i, :, :].T)\n", diff --git a/examples/seismic/tutorials/10_nmo_correction.ipynb b/examples/seismic/tutorials/10_nmo_correction.ipynb index 6fc9e23ce8..7abafd6aad 100644 --- a/examples/seismic/tutorials/10_nmo_correction.ipynb +++ b/examples/seismic/tutorials/10_nmo_correction.ipynb @@ -239,7 +239,7 @@ "mpl.rc('figure', figsize=(8, 6))\n", "\n", "def plot_traces(rec, xb, xe, t0, tn, colorbar=True):\n", - " scale = np.max(rec)/100 \n", + " scale = np.max(rec)/100\n", " extent = [xb, xe, 1e-3*tn, t0]\n", " plot = plt.imshow(rec, cmap=cm.gray, vmin=-scale, vmax=scale, extent=extent)\n", " plt.xlabel('X position (km)')\n", @@ -342,7 +342,7 @@ "source": [ "vnmo = 1500\n", "vguide = SparseFunction(name='v', grid=grid, npoint=ns)\n", - "vguide.data[:] = vnmo " + "vguide.data[:] = vnmo" ] }, { @@ -397,7 +397,7 @@ "t_0 = SparseFunction(name='t0', grid=grid, npoint=ns, dimensions=[sample], shape=[grid.shape[0]])\n", "tt = SparseFunction(name='tt', grid=grid, npoint=nrcv, dimensions=grid.dimensions, shape=grid.shape)\n", "snmo = SparseFunction(name='snmo', grid=grid, npoint=nrcv, dimensions=grid.dimensions, shape=grid.shape)\n", - "s = SparseFunction(name='s', grid=grid, dtype=np.intc, npoint=nrcv, dimensions=grid.dimensions, \n", + "s = SparseFunction(name='s', grid=grid, dtype=np.intc, npoint=nrcv, dimensions=grid.dimensions,\n", " shape=grid.shape)" ] }, @@ -484,7 +484,7 @@ "op2 = Operator([E4])\n", "op2()\n", "\n", - "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!! \n", + "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!\n", "\n", "plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)" ] diff --git a/examples/seismic/tutorials/11_viscoacoustic.ipynb b/examples/seismic/tutorials/11_viscoacoustic.ipynb index d7471243d0..b883f31677 100644 --- a/examples/seismic/tutorials/11_viscoacoustic.ipynb +++ b/examples/seismic/tutorials/11_viscoacoustic.ipynb @@ -117,8 +117,8 @@ "import matplotlib.pyplot as plt\n", "\n", "from devito import *\n", - "from examples.seismic.source import RickerSource, WaveletSource, TimeAxis\n", - "from examples.seismic import ModelViscoacoustic, plot_image, setup_geometry, plot_velocity" + "from examples.seismic.source import RickerSource, TimeAxis\n", + "from examples.seismic import ModelViscoacoustic" ] }, { @@ -179,8 +179,8 @@ ], "source": [ "#NBVAL_IGNORE_OUTPUT\n", - "model = ModelViscoacoustic(space_order=space_order, vp=v, qp=qp, b=1/rho, \n", - " origin=origin, shape=shape, spacing=spacing, \n", + "model = ModelViscoacoustic(space_order=space_order, vp=v, qp=qp, b=1/rho,\n", + " origin=origin, shape=shape, spacing=spacing,\n", " nbl=nbl)" ] }, @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.005 # peak/dominant frequency \n", + "f0 = 0.005 # peak/dominant frequency\n", "b = model.b\n", "rho = 1./b\n", "\n", @@ -279,18 +279,18 @@ "def src_rec(p, model):\n", " src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)\n", " src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", - " src.coordinates.data[0, -1] = 8. \n", + " src.coordinates.data[0, -1] = 8.\n", "\n", " # Create symbol for receivers\n", " rec = Receiver(name='rec', grid=model.grid, npoint=shape[0], time_range=time_range)\n", "\n", " # Prescribe even spacing for receivers along the x-axis\n", " rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=shape[0])\n", - " rec.coordinates.data[:, 1] = 8. \n", + " rec.coordinates.data[:, 1] = 8.\n", "\n", " src_term = src.inject(field=p.forward, expr=(s*src))\n", " rec_term = rec.interpolate(expr=p)\n", - " \n", + "\n", " return src_term + rec_term, src, rec" ] }, @@ -328,7 +328,7 @@ "outputs": [], "source": [ "def plot_v_and_p(model, v, p):\n", - " \n", + "\n", " slices = [slice(model.nbl, -model.nbl), slice(model.nbl, -model.nbl)]\n", " scale = .5*1e-3\n", "\n", @@ -392,7 +392,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Stencil created from Blanch and Symes (1995) / Dutta and Schuster (2014) \n", + "# Stencil created from Blanch and Symes (1995) / Dutta and Schuster (2014)\n", "def SLS(model, p, r, v):\n", "\n", " # Bulk modulus\n", @@ -400,7 +400,7 @@ "\n", " # Define PDE to v\n", " pde_v = v.dt + b * grad(p)\n", - " u_v = Eq(v.forward, damp * solve(pde_v, v.forward)) \n", + " u_v = Eq(v.forward, damp * solve(pde_v, v.forward))\n", "\n", " # Define PDE to r\n", " pde_r = r.dt + (1. / t_s) * (r + tt * bm * div(v.forward))\n", @@ -409,7 +409,7 @@ " # Define PDE to p\n", " pde_p = p.dt + bm * (tt + 1.) * div(v.forward) + r.forward\n", " u_p = Eq(p.forward, damp * solve(pde_p, p.forward))\n", - " \n", + "\n", " return [u_v, u_r, u_p]" ] }, @@ -421,27 +421,27 @@ "source": [ "# Seismic Modelling from Blanch and Symes (1995) / Dutta and Schuster (2014) viscoacoustic wave equation.\n", "def modelling_SLS(model):\n", - " \n", + "\n", " # Create symbols for particle velocity, pressure field, memory variable, source and receivers\n", - " \n", + "\n", " v = VectorTimeFunction(name=\"v\", grid=model.grid, time_order=1, space_order=space_order)\n", "\n", - " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order, \n", + " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order,\n", " staggered=NODE)\n", "\n", - " r = TimeFunction(name=\"r\", grid=model.grid, time_order=1, space_order=space_order, \n", + " r = TimeFunction(name=\"r\", grid=model.grid, time_order=1, space_order=space_order,\n", " staggered=NODE)\n", - " \n", + "\n", " # define the source injection and create interpolation expression for receivers\n", - " \n", + "\n", " src_rec_expr, src, rec = src_rec(p, model)\n", - " \n", + "\n", " eqn = SLS(model, p, r, v)\n", - " \n", + "\n", " op = Operator(eqn + src_rec_expr, subs=model.spacing_map)\n", - " \n", + "\n", " op(time=time_range.num-1, dt=dt, src=src, rec=rec)\n", - " \n", + "\n", " return rec, v, p" ] }, @@ -564,7 +564,7 @@ "# Stencil created from Ren et al. (2014) viscoacoustic wave equation.\n", "def KV(model, p, v):\n", "\n", - " # Angular frequency \n", + " # Angular frequency\n", " w = 2. * np.pi * f0\n", "\n", " # Define PDE to v\n", @@ -575,7 +575,7 @@ " pde_p = p.dt + lam * div(v.forward) - (lam / (w * model.qp)) * div(b * grad(p, shift=.5), shift=-.5)\n", "\n", " u_p = Eq(p.forward, damp * solve(pde_p, p.forward))\n", - " \n", + "\n", " return [u_v, u_p]" ] }, @@ -587,24 +587,24 @@ "source": [ "# Seismic Modelling from Ren et al. (2014) viscoacoustic wave equation.\n", "def modelling_KV(model):\n", - " \n", + "\n", " # Create symbols for particle velocity, pressure field, source and receivers\n", "\n", " v = VectorTimeFunction(name=\"v\", grid=model.grid, time_order=1, space_order=space_order)\n", "\n", - " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order, \n", + " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order,\n", " staggered=NODE)\n", "\n", " # define the source injection and create interpolation expression for receivers\n", - " \n", + "\n", " src_rec_expr, src, rec = src_rec(p, model)\n", - " \n", + "\n", " eqn = KV(model, p, v)\n", - " \n", + "\n", " op = Operator(eqn + src_rec_expr, subs=model.spacing_map)\n", - " \n", + "\n", " op(time=time_range.num-1, dt=dt, src=src, rec=rec)\n", - " \n", + "\n", " return rec, v, p" ] }, @@ -725,7 +725,7 @@ "# Stencil created from Deng and McMechan (2007) viscoacoustic wave equation.\n", "def Maxwell(model, p, v):\n", "\n", - " # Angular frequency \n", + " # Angular frequency\n", " w = 2. * np.pi * f0\n", "\n", " # Define PDE to v\n", @@ -735,7 +735,7 @@ " # Define PDE to p\n", " pde_p = p.dt + lam * div(v.forward) + (w / model.qp) * p\n", " u_p = Eq(p.forward, damp * solve(pde_p, p.forward))\n", - " \n", + "\n", " return [u_v, u_p]" ] }, @@ -747,24 +747,24 @@ "source": [ "# Seismic Modelling from Deng and McMechan (2007) viscoacoustic wave equation.\n", "def modelling_Maxwell(model):\n", - " \n", + "\n", " # Create symbols for particle velocity, pressure field, source and receivers\n", - " \n", + "\n", " v = VectorTimeFunction(name=\"v\", grid=model.grid, time_order=1, space_order=space_order)\n", "\n", - " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order, \n", + " p = TimeFunction(name=\"p\", grid=model.grid, time_order=1, space_order=space_order,\n", " staggered=NODE)\n", "\n", " # define the source injection and create interpolation expression for receivers\n", - " \n", + "\n", " src_rec_expr, src, rec = src_rec(p, model)\n", - " \n", + "\n", " eqn = Maxwell(model, p, v)\n", - " \n", + "\n", " op = Operator(eqn + src_rec_expr, subs=model.spacing_map)\n", - " \n", + "\n", " op(time=time_range.num-1, dt=dt, src=src, rec=rec)\n", - " \n", + "\n", " return rec, v, p" ] }, diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index 65df5f8507..5d3882cdc0 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -295,7 +295,6 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Install pyzfp package in the current Jupyter kernel\n", - "import sys\n", "!{sys.executable} -m pip install blosc\n", "import blosc" ] @@ -317,7 +316,7 @@ "source": [ "import numpy as np\n", "from examples.seismic import RickerSource, Receiver, TimeAxis\n", - "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant, \n", + "from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant,\n", " Eq, Operator, configuration, norm, Buffer)\n", "from examples.seismic.self_adjoint import setup_w_over_q\n", "import matplotlib as mpl\n", @@ -326,7 +325,7 @@ "import copy\n", "import os\n", "\n", - "# These lines force images to be displayed in the notebook, and scale up fonts \n", + "# These lines force images to be displayed in the notebook, and scale up fonts\n", "%matplotlib inline\n", "mpl.rc('font', size=14)\n", "\n", @@ -410,11 +409,11 @@ "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", "\n", - "# Define the dimensions \n", + "# Define the dimensions\n", "x = SpaceDimension(name='x', spacing=Constant(name='h_x', value=extent[0]/(shape[0]-1)))\n", "z = SpaceDimension(name='z', spacing=Constant(name='h_z', value=extent[1]/(shape[1]-1)))\n", "\n", - "# Initialize the Devito grid \n", + "# Initialize the Devito grid\n", "dtype = np.float32\n", "grid = Grid(extent=extent, shape=shape, origin=origin, dimensions=(x, z), dtype=dtype)\n", "\n", @@ -428,7 +427,7 @@ "print(\"grid.extent; \", grid.extent)\n", "print(\"grid.spacing_map;\", grid.spacing_map)\n", "\n", - "# Create velocity and buoyancy fields. \n", + "# Create velocity and buoyancy fields.\n", "space_order = 8\n", "m0 = Function(name='m0', grid=grid, space_order=space_order)\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", @@ -523,9 +522,9 @@ "\n", "plt.figure(figsize=(12,14))\n", "\n", - "# plot velocity \n", + "# plot velocity\n", "plt.subplot(2, 2, 1)\n", - "plt.imshow(np.transpose(m0.data), cmap=cm.jet, \n", + "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", @@ -551,7 +550,7 @@ "\n", "# plot velocity perturbation\n", "plt.subplot(2, 2, 3)\n", - "plt.imshow(np.transpose(dm.data), cmap=\"seismic\", \n", + "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", @@ -659,7 +658,7 @@ "v1 = TimeFunction(name=\"v1\", grid=grid, time_order=2, space_order=space_order, save=nt)\n", "v2 = TimeFunction(name=\"v2\", grid=grid, time_order=2, space_order=space_order, save=Buffer(M))\n", "\n", - "# get time and space dimensions \n", + "# get time and space dimensions\n", "t,x,z = u1.dimensions\n", "\n", "# Source terms (see notebooks linked above for more detail)\n", @@ -912,7 +911,7 @@ "# Number of time blocks\n", "N = int((nt-1) / M) + 1\n", "\n", - "# Open a binary file in append mode to save the wavefield chunks \n", + "# Open a binary file in append mode to save the wavefield chunks\n", "filename = \"timeblocking.nonlinear.bin\"\n", "\n", "if os.path.exists(filename):\n", @@ -924,7 +923,7 @@ "file_length = np.zeros(nt, dtype=np.int64)\n", "\n", "# The length of the data type, 4 bytes for float32\n", - "itemsize = v2.data[0,:,:].dtype.itemsize \n", + "itemsize = v2.data[0,:,:].dtype.itemsize\n", "\n", "# The length of a an uncompressed wavefield, used to compute compression ratio below\n", "len0 = 4.0 * np.prod(v2._data[0,:,:].shape)\n", @@ -944,15 +943,15 @@ "\n", " # assign\n", " v2_all[kt,:,:] = v2.data[(kt%M),:,:]\n", - " \n", + "\n", " # compression\n", - " c = blosc.compress_ptr(v2._data[(kt%M),:,:].__array_interface__['data'][0], \n", - " np.prod(v2._data[(kt%M),:,:].shape), \n", + " c = blosc.compress_ptr(v2._data[(kt%M),:,:].__array_interface__['data'][0],\n", + " np.prod(v2._data[(kt%M),:,:].shape),\n", " v2._data[(kt%M),:,:].dtype.itemsize, 9, True, 'zstd')\n", "\n", " # compression ratio\n", " cratio = len0 / (1.0 * len(c))\n", - " \n", + "\n", " # serialization\n", " file_offset[kt] = f.tell()\n", " f.write(c)\n", @@ -962,9 +961,9 @@ "# rms_v1 = np.linalg.norm(v1.data[kt,:,:].reshape(-1))\n", "# rms_v2 = np.linalg.norm(v2_all[kt,:,:].reshape(-1))\n", "# rms_12 = np.linalg.norm(v1.data[kt,:,:].reshape(-1) - v2_all[kt,:,:].reshape(-1))\n", - "# print(\"kt1,kt2,len,cratio,|u1|,|u2|,|v1-v2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" % \n", + "# print(\"kt1,kt2,len,cratio,|u1|,|u2|,|v1-v2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" %\n", "# (kt1, kt2, kt2 - kt1 + 1, cratio, rms_v1, rms_v2, rms_12), flush=True)\n", - " \n", + "\n", "# Close the binary file\n", "f.close()" ] @@ -1300,8 +1299,8 @@ "for kN in range(0,N,1):\n", " kt1 = max((kN + 0) * M, 1)\n", " kt2 = min((kN + 1) * M - 1, nt-2)\n", - " \n", - " # 1. Seek to file_offset[kt] \n", + "\n", + " # 1. Seek to file_offset[kt]\n", " # 2. Read file_length[kt1] bytes from file\n", " # 3. Decompress wavefield and assign to v2 Buffer\n", " for kt in range(kt1,kt2+1):\n", @@ -1312,13 +1311,13 @@ "\n", " # Run the operator for this time block\n", " lf_op2(time_m=kt1, time_M=kt2)\n", - " \n", + "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt1,kt2+1):\n", "# rms_du1 = np.linalg.norm(duFwd1.data[kt,:,:].reshape(-1))\n", "# rms_du2 = np.linalg.norm(duFwd2.data[kt,:,:].reshape(-1))\n", "# rms_d12 = np.linalg.norm(duFwd1.data[kt,:,:].reshape(-1) - duFwd2.data[kt,:,:].reshape(-1))\n", - "# print(\"kt1,kt2,len,cratio,|du1|,|du2|,|du1-du2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" % \n", + "# print(\"kt1,kt2,len,cratio,|du1|,|du2|,|du1-du2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" %\n", "# (kt1, kt2, kt2 - kt1 + 1, cratio[kt], rms_du1, rms_du2, rms_d12), flush=True)" ] }, @@ -1659,8 +1658,8 @@ "for kN in range(N-1,-1,-1):\n", " kt1 = max((kN + 0) * M, 1)\n", " kt2 = min((kN + 1) * M - 1, nt-2)\n", - " \n", - " # 1. Seek to file_offset[kt] \n", + "\n", + " # 1. Seek to file_offset[kt]\n", " # 2. Read file_length[kt1] bytes from file\n", " # 3. Decompress wavefield and assign to v2 Buffer\n", " for kt in range(kt1,kt2+1,+1):\n", @@ -1671,13 +1670,13 @@ "\n", " # Run the operator for this time block\n", " la_op2(time_m=kt1, time_M=kt2)\n", - " \n", + "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt2,kt1-1,-1):\n", "# rms_du1 = np.linalg.norm(duAdj1.data[kt,:,:].reshape(-1))\n", "# rms_du2 = np.linalg.norm(duAdj2.data[kt,:,:].reshape(-1))\n", "# rms_d12 = np.linalg.norm(duAdj1.data[kt,:,:].reshape(-1) - duAdj2.data[kt,:,:].reshape(-1))\n", - "# print(\"kt2,kt1,kt,cratio,|du1|,|du2|,|du1-du2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" % \n", + "# print(\"kt2,kt1,kt,cratio,|du1|,|du2|,|du1-du2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" %\n", "# (kt2, kt1, kt, cratio[kt], rms_du1, rms_du2, rms_d12), flush=True)" ] }, @@ -1738,7 +1737,7 @@ "norm_dm1 = np.linalg.norm(dm1.data.reshape(-1))\n", "norm_dm12 = np.linalg.norm(dm1.data.reshape(-1) - dm2.data.reshape(-1))\n", "\n", - "print(\"Relative norm of difference wavefield,gradient; %+.4e %+.4e\" % \n", + "print(\"Relative norm of difference wavefield,gradient; %+.4e %+.4e\" %\n", " (norm_du12 / norm_du1, norm_dm12 /norm_dm1))\n", "\n", "assert norm_du12 / norm_du1 < 1e-7\n", diff --git a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb index 404de83f9c..26d3eb5cca 100644 --- a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb +++ b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb @@ -126,17 +126,15 @@ "%matplotlib inline\n", "import numpy as np\n", "\n", - "from devito import Operator,Eq,solve,Grid,SparseFunction,norm\n", - "from devito import TimeFunction,Function\n", + "from devito import Operator,Eq,norm\n", + "from devito import Function\n", "from devito import gaussian_smooth\n", "from devito import mmax\n", "\n", - "from devito.logger import info\n", "\n", "from examples.seismic import Model\n", - "from examples.seismic import plot_velocity,plot_shotrecord\n", + "from examples.seismic import plot_velocity\n", "from examples.seismic import Receiver\n", - "from examples.seismic import PointSource\n", "from examples.seismic import plot_image,AcquisitionGeometry\n", "from examples.seismic import TimeAxis\n", "\n", @@ -144,9 +142,7 @@ "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", "import matplotlib.pyplot as plt\n", - "from mpl_toolkits.axes_grid1 import ImageGrid\n", "from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n", - "import matplotlib.ticker as plticker\n", "\n", "from devito import configuration\n", "configuration['log-level'] = 'WARNING'" @@ -195,7 +191,7 @@ "# Define a velocity profile. The velocity is in km/s\n", "vp_top = 1.5\n", "\n", - "v[:] = vp_top # Top velocity \n", + "v[:] = vp_top # Top velocity\n", "v[:, 30:65]= vp_top +0.5\n", "v[:, 65:101]= vp_top +1.5\n", "v[40:60, 35:55]= vp_top+1\n", @@ -206,7 +202,7 @@ "model0 = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", " space_order=8, bcs=init_damp,nbl=npad,dtype=dtype)\n", "\n", - "dt = model.critical_dt \n", + "dt = model.critical_dt\n", "s = model.grid.stepping_dim.spacing\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "nt=time_range.num" @@ -263,7 +259,7 @@ "# First, position source centrally in all dimensions, then set depth\n", "src_coordinates = np.empty((1, 2))\n", "src_coordinates[0, :] = np.array(model.domain_size) * .5\n", - "src_coordinates[0, -1] = 30. \n", + "src_coordinates[0, -1] = 30.\n", "\n", "# Define acquisition geometry: receivers\n", "\n", @@ -303,57 +299,57 @@ "outputs": [], "source": [ "def lsrtm_gradient(dm):\n", - " \n", + "\n", " residual = Receiver(name='residual', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", - " \n", + "\n", " d_obs = Receiver(name='d_obs', grid=model.grid,time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", " d_syn = Receiver(name='d_syn', grid=model.grid,time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", - " \n", + "\n", " grad_full = Function(name='grad_full', grid=model.grid)\n", - " \n", + "\n", " grad_illum = Function(name='grad_illum', grid=model.grid)\n", - " \n", + "\n", " src_illum = Function (name =\"src_illum\", grid = model.grid)\n", "\n", " # Using devito's reference of virtual source\n", " dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))\n", - " \n", + "\n", " objective = 0.\n", " u0 = None\n", " for i in range(nshots):\n", - " \n", + "\n", " #Observed Data using Born's operator\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", "\n", " _, u0, _ = solver.forward(vp=model0.vp, save=True, u=u0)\n", - " \n", + "\n", " _, _, _,_ = solver.jacobian(dm_true, vp=model0.vp, rec = d_obs)\n", - " \n", + "\n", " #Calculated Data using Born's operator\n", " solver.jacobian(dm, vp=model0.vp, rec = d_syn)\n", - " \n", + "\n", " residual.data[:] = d_syn.data[:]- d_obs.data[:]\n", - " \n", + "\n", " grad_shot,_ = solver.gradient(rec=residual, u=u0, vp=model0.vp)\n", - " \n", + "\n", " src_illum_upd = Eq(src_illum, src_illum + u0**2)\n", " op_src = Operator([src_illum_upd])\n", " op_src.apply()\n", - " \n", + "\n", " grad_sum = Eq(grad_full, grad_full + grad_shot)\n", " op_grad = Operator([grad_sum])\n", " op_grad.apply()\n", - " \n", + "\n", " objective += .5*norm(residual)**2\n", - " \n", + "\n", " grad_f = Eq(grad_illum, grad_full/(src_illum+10**-9))\n", " op_gradf = Operator([grad_f])\n", " op_gradf.apply()\n", - " \n", + "\n", " return objective,grad_illum,d_obs,d_syn" ] }, @@ -388,31 +384,31 @@ "outputs": [], "source": [ "def get_alfa(grad_iter,image_iter,niter_lsrtm):\n", - " \n", - " \n", + "\n", + "\n", " term1 = np.dot(image_iter.reshape(-1), image_iter.reshape(-1))\n", - " \n", + "\n", " term2 = np.dot(image_iter.reshape(-1), grad_iter.reshape(-1))\n", - " \n", + "\n", " term3 = np.dot(grad_iter.reshape(-1), grad_iter.reshape(-1))\n", - " \n", + "\n", " if niter_lsrtm == 0:\n", - " \n", + "\n", " alfa = .05 / mmax(grad_full)\n", - " \n", + "\n", " else:\n", " abb1 = term1 / term2\n", - " \n", + "\n", " abb2 = term2 / term3\n", - " \n", + "\n", " abb3 = abb2 / abb1\n", - " \n", + "\n", " if abb3 > 0 and abb3 < 1:\n", " alfa = abb2\n", " else:\n", " alfa = abb1\n", - " \n", - " return alfa " + "\n", + " return alfa" ] }, { @@ -579,7 +575,7 @@ "history = np.zeros((niter, 1)) #objective function\n", "\n", "image_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", - " \n", + "\n", "grad_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", "\n", "yk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", @@ -587,29 +583,29 @@ "sk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", "\n", "for k in range(niter) :\n", - " \n", + "\n", " dm = image_up_dev # Reflectivity for Calculated data via Born\n", "\n", " print('LSRTM Iteration',k+1)\n", - " \n", + "\n", " objective,grad_full,d_obs,d_syn = lsrtm_gradient(dm)\n", - " \n", + "\n", " history[k] = objective\n", - " \n", + "\n", " yk = grad_full.data - grad_prev\n", - " \n", + "\n", " sk = image_up_dev - image_prev\n", "\n", " alfa = get_alfa(yk,sk,k)\n", - " \n", + "\n", " grad_prev = grad_full.data\n", "\n", " image_prev = image_up_dev\n", - " \n", + "\n", " image_up_dev = image_up_dev - alfa*grad_full.data\n", - " \n", + "\n", " if k == 0: # Saving the first migration using Born operator.\n", - " \n", + "\n", " image = image_up_dev" ] }, @@ -664,7 +660,7 @@ " vmin=-.05,\n", " vmax=.05,\n", " cmap=cmap,extent=extent)\n", - " \n", + "\n", " plt.xlabel('X position (km)')\n", " plt.ylabel('Depth (km)')\n", "\n", diff --git a/examples/seismic/tutorials/14_creating_synthetics.ipynb b/examples/seismic/tutorials/14_creating_synthetics.ipynb index 3dc601852a..bb27537db3 100644 --- a/examples/seismic/tutorials/14_creating_synthetics.ipynb +++ b/examples/seismic/tutorials/14_creating_synthetics.ipynb @@ -66,7 +66,7 @@ " ! pip install gempy==2.2.9\n", " # Import gempy\n", " import gempy as gp\n", - " \n", + "\n", "try:\n", " # Import jinja2 (used for colour coding geology)\n", " import jinja2\n", @@ -74,14 +74,12 @@ " # Install jinja2\n", " ! pip install jinja2\n", " # Import jinja2\n", - " import jinja2\n", - " \n", + "\n", "try:\n", " # Check vtk notebook backend is installed\n", " import ipyvtklink\n", "except ModuleNotFoundError:\n", - " ! pip install ipyvtklink\n", - " import ipyvtklink" + " ! pip install ipyvtklink" ] }, { @@ -1299,7 +1297,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", - "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as \n", + "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as\n", "# a time marching updating equation known as a stencil using customized SymPy functions\n", "\n", "stencil = dv.Eq(u.forward, dv.solve(pde, u.forward))\n", diff --git a/examples/seismic/tutorials/15_tti_qp_pure.ipynb b/examples/seismic/tutorials/15_tti_qp_pure.ipynb index 4013a14e1e..c936aae461 100644 --- a/examples/seismic/tutorials/15_tti_qp_pure.ipynb +++ b/examples/seismic/tutorials/15_tti_qp_pure.ipynb @@ -47,7 +47,7 @@ "source": [ "import numpy as np\n", "from devito import (Function, TimeFunction, cos, sin, solve,\n", - " Eq, Operator, configuration, norm)\n", + " Eq, Operator)\n", "from examples.seismic import TimeAxis, RickerSource, Receiver, demo_model\n", "from matplotlib import pyplot as plt" ] @@ -67,11 +67,11 @@ "metadata": {}, "outputs": [], "source": [ - "# NBVAL_IGNORE_OUTPUT \n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "shape = (101,101) # 101x101 grid\n", "spacing = (10.,10.) # spacing of 10 meters\n", - "origin = (0.,0.) \n", + "origin = (0.,0.)\n", "nbl = 0 # number of pad points\n", "\n", "model = demo_model('layers-tti', spacing=spacing, space_order=8,\n", @@ -192,7 +192,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "# time stepping \n", + "# time stepping\n", "p = TimeFunction(name=\"p\", grid=model.grid, time_order=2, space_order=2)\n", "q = Function(name=\"q\", grid=model.grid, space_order=8)\n", "\n", diff --git a/examples/seismic/utils.py b/examples/seismic/utils.py index 6491d9ca5a..d691c101c8 100644 --- a/examples/seismic/utils.py +++ b/examples/seismic/utils.py @@ -7,7 +7,7 @@ from .source import * -__all__ = ['AcquisitionGeometry', 'setup_geometry', 'seismic_args'] +__all__ = ['AcquisitionGeometry', 'seismic_args', 'setup_geometry'] def setup_geometry(model, tn, f0=0.010, interpolation='linear', **kwargs): @@ -79,8 +79,8 @@ def __init__(self, model, rec_positions, src_positions, t0, tn, **kwargs): self._src_type = kwargs.get('src_type') assert (self.src_type in sources or self.src_type is None) self._f0 = kwargs.get('f0') - self._a = kwargs.get('a', None) - self._t0w = kwargs.get('t0w', None) + self._a = kwargs.get('a') + self._t0w = kwargs.get('t0w') if self._src_type is not None and self._f0 is None: error("Peak frequency must be provided in KHz" + " for source of type %s" % self._src_type) diff --git a/examples/timestepping/superstep.ipynb b/examples/timestepping/superstep.ipynb index c67b445087..d71a9cd7cc 100644 --- a/examples/timestepping/superstep.ipynb +++ b/examples/timestepping/superstep.ipynb @@ -54,13 +54,11 @@ "metadata": {}, "outputs": [], "source": [ - "from sympy import pprint\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", - "from time import perf_counter\n", "\n", - "from devito import Grid, Function, TimeFunction, VectorTimeFunction, Eq, solve, Operator, ConditionalDimension, exp, SparseTimeFunction\n", + "from devito import Grid, Function, TimeFunction, Eq, solve, Operator, ConditionalDimension, SparseTimeFunction\n", "from devito.timestepping.superstep import superstep_generator, superstep_solution_transfer" ] }, diff --git a/examples/userapi/00_sympy.ipynb b/examples/userapi/00_sympy.ipynb index 8a205b8c68..431cf31534 100644 --- a/examples/userapi/00_sympy.ipynb +++ b/examples/userapi/00_sympy.ipynb @@ -337,8 +337,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Type solution here\n", - "from sympy import solve" + "# Type solution here" ] }, { @@ -457,8 +456,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Solution here\n", - "from sympy import solve" + "# Solution here" ] }, { @@ -976,8 +974,8 @@ } ], "source": [ - "from IPython.core.display import Image \n", - "Image(filename='figures/comic.png') " + "from IPython.core.display import Image\n", + "Image(filename='figures/comic.png')" ] }, { diff --git a/examples/userapi/03_subdomains.ipynb b/examples/userapi/03_subdomains.ipynb index 34402f40ad..8c73671543 100644 --- a/examples/userapi/03_subdomains.ipynb +++ b/examples/userapi/03_subdomains.ipynb @@ -603,9 +603,9 @@ "outputs": [], "source": [ "import numpy as np\n", - "from devito import (TimeFunction, VectorTimeFunction, TensorTimeFunction,\n", - " div, grad, curl, diag)\n", - "from examples.seismic import ModelElastic, plot_velocity, TimeAxis, RickerSource, plot_image" + "from devito import (VectorTimeFunction, TensorTimeFunction,\n", + " div, grad, diag)\n", + "from examples.seismic import ModelElastic, TimeAxis, RickerSource, plot_image" ] }, { @@ -689,7 +689,7 @@ " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: ('left', l1+nbl)}\n", - " \n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", " def define(self, dimensions):\n", diff --git a/examples/userapi/04_boundary_conditions.ipynb b/examples/userapi/04_boundary_conditions.ipynb index 2ab20396b1..d154dc64ba 100644 --- a/examples/userapi/04_boundary_conditions.ipynb +++ b/examples/userapi/04_boundary_conditions.ipynb @@ -432,11 +432,11 @@ " The field being updated: 'pressure' or 'velocity'\n", " \"\"\"\n", " lhs, rhs = eq.evaluate.args\n", - " \n", + "\n", " # Get vertical subdimension and its parent\n", " yfs = subdomain.dimensions[-1]\n", " y = yfs.parent\n", - " \n", + "\n", " # Functions present in stencil\n", " funcs = retrieve_functions(rhs)\n", " mapper = {}\n", @@ -457,7 +457,7 @@ " # Substitute where index is negative for +ve where index is positive\n", " mapper.update({f: f.subs({yind: INT(abs(yind))})})\n", " return Eq(lhs, rhs.subs(mapper), subdomain=subdomain)\n", - " \n", + "\n", "fs_p = free_surface_top(eq_p, freesurface, 'pressure')\n", "fs_v = free_surface_top(eq_v, freesurface, 'velocity')" ] diff --git a/examples/userapi/05_conditional_dimension.ipynb b/examples/userapi/05_conditional_dimension.ipynb index 467a729d82..c8d50d629d 100644 --- a/examples/userapi/05_conditional_dimension.ipynb +++ b/examples/userapi/05_conditional_dimension.ipynb @@ -587,7 +587,7 @@ "ci = ConditionalDimension(name='ci', parent=i, factor=factor)\n", "\n", "g = Function(name='g', shape=(size,), dimensions=(i,))\n", - "# Intialize g \n", + "# Intialize g\n", "g.data[:,]= list(range(size))\n", "f = Function(name='f', shape=(int(size/factor),), dimensions=(ci,))\n", "\n", diff --git a/examples/userapi/06_sparse_operations.ipynb b/examples/userapi/06_sparse_operations.ipynb index b4c131e247..b150474ac8 100644 --- a/examples/userapi/06_sparse_operations.ipynb +++ b/examples/userapi/06_sparse_operations.ipynb @@ -36,7 +36,7 @@ "from devito import *\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", - "from matplotlib.ticker import AutoMinorLocator, FixedLocator" + "from matplotlib.ticker import FixedLocator" ] }, { @@ -693,7 +693,7 @@ "outputs": [], "source": [ "coeffs = np.ones((5, 2, 5))\n", - "s = PrecomputedSparseTimeFunction(name=\"s\", grid=grid, npoint=npoint, nt=nt, \n", + "s = PrecomputedSparseTimeFunction(name=\"s\", grid=grid, npoint=npoint, nt=nt,\n", " interpolation_coeffs=coeffs,\n", " coordinates=coords, r=2)\n", "\n", diff --git a/examples/userapi/07_functions_on_subdomains.ipynb b/examples/userapi/07_functions_on_subdomains.ipynb index c0aecdbb7b..4c8d1a50da 100644 --- a/examples/userapi/07_functions_on_subdomains.ipynb +++ b/examples/userapi/07_functions_on_subdomains.ipynb @@ -1051,7 +1051,7 @@ "nt = time_range.num # number of time steps\n", "\n", "f0 = 0.040 # Source peak frequency is 10Hz (0.010 kHz)\n", - "src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range) \n", + "src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)\n", "\n", "src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", "src.coordinates.data[0, -1] = 20. # Depth is 20m" @@ -2770,7 +2770,7 @@ "nt = time_range.num # number of time steps\n", "\n", "f0 = 0.030 # Source peak frequency is 30Hz (0.030 kHz)\n", - "src = RickerSource(name='src', grid=grid1, f0=f0, time_range=time_range) \n", + "src = RickerSource(name='src', grid=grid1, f0=f0, time_range=time_range)\n", "\n", "src.coordinates.data[0, :] = 500.\n", "src.coordinates.data[0, -1] = 350. # Depth is 350m" diff --git a/tests/test_data.py b/tests/test_data.py index cf430dc744..eac3aa1c43 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -665,9 +665,7 @@ def test_getitem(self, mode): assert np.all(result[3] == [[3, 2, 1, 0]]) result1 = np.array(f.data[5, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: - assert result1.size == 0 - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result1.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result1 == [[46, 45]]) @@ -675,9 +673,7 @@ def test_getitem(self, mode): assert np.all(result1 == [[44, 43, 42]]) result2 = np.array(f.data[6:4:-1, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: - assert result2.size == 0 - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result2.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result2[0] == [[54, 53]]) @@ -687,9 +683,7 @@ def test_getitem(self, mode): assert np.all(result2[1] == [[44, 43, 42]]) result3 = np.array(f.data[6:4:-1, 2:7]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: - assert result3.size == 0 - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result3.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result3[0] == [[50, 51]]) @@ -784,9 +778,7 @@ def test_setitem(self, mode): [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: - assert np.all(np.array(g.data)) == 0 - elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: + elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y] or RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(g.data)) == 0 else: assert np.all(np.array(g.data)) == 0 @@ -921,11 +913,7 @@ def test_niche_slicing(self, mode): t.data[:] = b tdat0 = np.array(f.data[-2::, -2::]) - if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: - assert tdat0.size == 0 - elif LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0]: - assert tdat0.size == 0 - elif RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: + if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0] or LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0] or RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: assert tdat0.size == 0 else: assert np.all(tdat0 == [[54, 55], @@ -1091,9 +1079,7 @@ def test_neg_start_stop(self, mode): h.data[8:10, 0:4] = f.data[slices] - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: - assert np.count_nonzero(h.data[:]) == 0 - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert np.count_nonzero(h.data[:]) == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(h.data) == [[0, 0, 0, 0, 0, 0], diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index 3d654d7a84..b2140ccdbd 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -301,7 +301,7 @@ def test_fd_space_45(self, staggered, space_order, ndim): Dpolynome = diff(polynome) Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx_s], np.float32) # FD derivative, symbolic - u_deriv = getattr(u, 'dx45') + u_deriv = u.dx45 # Compute numerical FD stencil = Eq(du, u_deriv) op = Operator(stencil, subs={d.spacing: dx for d in grid.dimensions}) diff --git a/tests/test_operator.py b/tests/test_operator.py index 1ef4bfc7e3..c73a25664a 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -2302,7 +2302,7 @@ def test_to_json(self): summary.to_json("memory_estimate_output.json") - with open("memory_estimate_output.json", "r") as infile: + with open("memory_estimate_output.json") as infile: json_object = json.load(infile) assert json_object['name'] == summary.name diff --git a/tests/test_save.py b/tests/test_save.py index 995558572c..b3c8fd487b 100644 --- a/tests/test_save.py +++ b/tests/test_save.py @@ -8,7 +8,7 @@ def initial(nt, nx, ny): np.linspace(0., 1., ny, dtype=np.float32)) ui = np.zeros((nt, nx, ny), dtype=np.float32) r = (xx - .5)**2. + (yy - .5)**2. - ui[0, np.logical_and(.05 <= r, r <= .1)] = 1. + ui[0, np.logical_and(r >= .05, r <= .1)] = 1. return ui diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index 2924aa4c78..03d6d9ef9e 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -375,14 +375,14 @@ class MySubdomains2(SubDomainSet): bounds_xm = np.array([1, Nx/2+1], dtype=np.int32) bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32) - bounds_ym = int(1) + bounds_ym = 1 bounds_yM = int(Ny/2+1) bounds1 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM) bounds_xm = np.array([1, Nx/2+1], dtype=np.int32) bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32) bounds_ym = int(Ny/2+1) - bounds_yM = int(1) + bounds_yM = 1 bounds2 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM) grid = Grid(extent=(Nx, Ny), shape=(Nx, Ny)) @@ -474,14 +474,14 @@ class MySubdomains2(SubDomainSet): bounds_xm = np.array([1, Nx/2+1], dtype=np.int32) bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32) - bounds_ym = int(1) + bounds_ym = 1 bounds_yM = int(Ny/2+1) bounds1 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM) bounds_xm = np.array([1, Nx/2+1], dtype=np.int32) bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32) bounds_ym = int(Ny/2+1) - bounds_yM = int(1) + bounds_yM = 1 bounds2 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM) grid = Grid(extent=(Nx, Ny), shape=(Nx, Ny)) diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 63b3f2bae5..2f3090fee4 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -253,7 +253,7 @@ def test_non_devito_tens(func1): f1 = func1(name="f1", grid=grid, components=comps) f2 = func1(name="f2", grid=grid) - assert f1.T == f1 + assert f1 == f1.T assert isinstance(f1.T, sympy.ImmutableDenseMatrix) # No devito object in the matrix components, should return a pure sympy Matrix assert ~isinstance(f1.T, func1) @@ -275,7 +275,7 @@ def test_partial_devito_tens(func1): f1 = func1(name="f1", grid=grid, components=comps) - assert f1.T == f1 + assert f1 == f1.T assert isinstance(f1.T, func1) # Should have original grid assert f1[0, 2].grid == grid From 5674a30724a00bb4b16ff1596c57a5f75258429b Mon Sep 17 00:00:00 2001 From: JDBetteridge Date: Wed, 24 Dec 2025 17:01:58 +0000 Subject: [PATCH 20/42] lint: Re-sort all imports with new isort rules --- auto_lint.sh | 22 -------- benchmarks/regression/benchmarks/arguments.py | 3 +- benchmarks/regression/benchmarks/codegen.py | 1 - .../regression/benchmarks/simple_operators.py | 3 +- benchmarks/user/advisor/roofline.py | 12 ++--- benchmarks/user/advisor/run_advisor.py | 7 ++- benchmarks/user/benchmark.py | 26 ++++----- conftest.py | 14 ++--- devito/arch/archinfo.py | 16 +++--- devito/arch/compiler.py | 27 +++++----- devito/builtins/arithmetic.py | 2 +- devito/builtins/initializers.py | 2 +- devito/checkpointing/checkpoint.py | 1 + devito/core/arm.py | 5 +- devito/core/autotuning.py | 5 +- devito/core/cpu.py | 15 +++--- devito/core/gpu.py | 14 ++--- devito/core/intel.py | 8 +-- devito/core/operator.py | 7 +-- devito/core/power.py | 5 +- devito/data/allocators.py | 4 +- devito/data/data.py | 2 +- devito/data/decomposition.py | 2 +- devito/data/utils.py | 2 +- devito/finite_differences/differentiable.py | 13 +++-- devito/finite_differences/elementary.py | 1 - .../finite_differences/finite_difference.py | 9 ++-- devito/finite_differences/rsfd.py | 5 +- devito/finite_differences/tools.py | 4 +- devito/ir/cgen/printer.py | 8 ++- devito/ir/clusters/algorithms.py | 16 +++--- devito/ir/clusters/analysis.py | 5 +- devito/ir/clusters/cluster.py | 13 +++-- devito/ir/clusters/visitors.py | 1 - devito/ir/equations/algorithms.py | 17 +++--- devito/ir/equations/equation.py | 8 +-- devito/ir/iet/algorithms.py | 6 +-- devito/ir/iet/nodes.py | 21 ++++---- devito/ir/iet/visitors.py | 31 ++++++----- devito/ir/stree/algorithms.py | 12 +++-- devito/ir/stree/tree.py | 4 +- devito/ir/support/basic.py | 28 +++++----- devito/ir/support/guards.py | 4 +- devito/ir/support/space.py | 13 ++--- devito/ir/support/symregistry.py | 5 +- devito/ir/support/syncs.py | 1 + devito/ir/support/utils.py | 10 ++-- devito/mpatches/as_independent.py | 5 +- devito/mpatches/rationaltools.py | 2 +- devito/mpi/distributed.py | 18 +++---- devito/mpi/halo_scheme.py | 14 ++--- devito/mpi/routines.py | 32 ++++++----- devito/operations/interpolators.py | 13 ++--- devito/operations/solve.py | 4 +- devito/operator/operator.py | 53 ++++++++++--------- devito/operator/profiling.py | 5 +- devito/parameters.py | 2 +- devito/passes/clusters/aliases.py | 28 +++++----- devito/passes/clusters/asynchrony.py | 6 ++- devito/passes/clusters/blocking.py | 13 +++-- devito/passes/clusters/buffering.py | 15 +++--- devito/passes/clusters/cse.py | 7 +-- devito/passes/clusters/derivatives.py | 2 +- devito/passes/clusters/factorization.py | 5 +- devito/passes/clusters/implicit.py | 2 +- devito/passes/clusters/misc.py | 6 ++- devito/passes/iet/asynchrony.py | 20 +++---- devito/passes/iet/definitions.py | 14 ++--- devito/passes/iet/engine.py | 17 +++--- devito/passes/iet/errors.py | 7 +-- devito/passes/iet/instrument.py | 12 +++-- devito/passes/iet/langbase.py | 12 +++-- devito/passes/iet/languages/C.py | 4 +- devito/passes/iet/languages/CXX.py | 6 +-- devito/passes/iet/languages/openacc.py | 19 ++++--- devito/passes/iet/languages/openmp.py | 27 ++++++---- devito/passes/iet/languages/targets.py | 17 +++--- devito/passes/iet/languages/utils.py | 2 +- devito/passes/iet/linearization.py | 9 ++-- devito/passes/iet/misc.py | 15 +++--- devito/passes/iet/mpi.py | 9 ++-- devito/passes/iet/orchestration.py | 12 +++-- devito/passes/iet/parpragma.py | 18 ++++--- devito/symbolics/extended_dtypes.py | 9 ++-- devito/symbolics/extended_sympy.py | 10 ++-- devito/symbolics/inspection.py | 12 ++--- devito/symbolics/manipulation.py | 23 ++++---- devito/symbolics/queries.py | 8 +-- devito/symbolics/search.py | 5 +- devito/tools/abc.py | 1 - devito/tools/data_structures.py | 8 +-- devito/tools/utils.py | 2 +- devito/types/array.py | 8 +-- devito/types/basic.py | 11 ++-- devito/types/caching.py | 1 - devito/types/dense.py | 29 +++++----- devito/types/dimension.py | 9 ++-- devito/types/equation.py | 6 +-- devito/types/grid.py | 13 ++--- devito/types/misc.py | 5 +- devito/types/object.py | 2 +- devito/types/sparse.py | 22 ++++---- devito/types/tensor.py | 2 + devito/types/utils.py | 1 + examples/cfd/example_diffusion.py | 2 +- examples/cfd/tools.py | 4 +- examples/misc/linalg.py | 2 +- examples/seismic/acoustic/acoustic_example.py | 5 +- examples/seismic/acoustic/operators.py | 4 +- examples/seismic/acoustic/wavesolver.py | 4 +- examples/seismic/elastic/elastic_example.py | 3 +- examples/seismic/elastic/operators.py | 5 +- examples/seismic/elastic/wavesolver.py | 3 +- examples/seismic/inversion/fwi.py | 8 ++- examples/seismic/inversion/inversion_utils.py | 2 +- examples/seismic/model.py | 9 ++-- examples/seismic/plotting.py | 1 + examples/seismic/self_adjoint/example_iso.py | 8 ++- examples/seismic/self_adjoint/operators.py | 2 +- examples/seismic/self_adjoint/test_utils.py | 3 +- .../self_adjoint/test_wavesolver_iso.py | 12 +++-- examples/seismic/self_adjoint/utils.py | 3 +- examples/seismic/self_adjoint/wavesolver.py | 5 +- examples/seismic/source.py | 4 +- examples/seismic/test_seismic_utils.py | 2 +- examples/seismic/tti/operators.py | 3 +- examples/seismic/tti/tti_example.py | 6 +-- examples/seismic/tti/wavesolver.py | 12 +++-- examples/seismic/utils.py | 5 +- examples/seismic/viscoacoustic/operators.py | 7 +-- .../viscoacoustic/viscoacoustic_example.py | 5 +- examples/seismic/viscoacoustic/wavesolver.py | 8 +-- examples/seismic/viscoelastic/operators.py | 5 +- .../viscoelastic/viscoelastic_example.py | 3 +- examples/seismic/viscoelastic/wavesolver.py | 2 +- examples/timestepping/acoustic_superstep.py | 9 +--- tests/test_adjoint.py | 2 +- tests/test_arch.py | 4 +- tests/test_autotuner.py | 6 +-- tests/test_benchmark.py | 7 +-- tests/test_buffering.py | 10 ++-- tests/test_builtins.py | 12 +++-- tests/test_caching.py | 19 ++++--- tests/test_checkpointing.py | 8 +-- tests/test_constant.py | 2 +- tests/test_cse.py | 10 ++-- tests/test_data.py | 13 ++--- tests/test_derivatives.py | 16 +++--- tests/test_differentiable.py | 11 ++-- tests/test_dimension.py | 26 ++++----- tests/test_dle.py | 18 ++++--- tests/test_docstrings.py | 2 +- tests/test_dse.py | 37 +++++++------ tests/test_dtypes.py | 6 +-- tests/test_error_checking.py | 2 +- tests/test_fission.py | 5 +- tests/test_gpu_common.py | 27 +++++----- tests/test_gpu_openacc.py | 13 ++--- tests/test_gpu_openmp.py | 9 ++-- tests/test_gradient.py | 4 +- tests/test_iet.py | 25 ++++----- tests/test_interpolation.py | 18 ++++--- tests/test_ir.py | 22 ++++---- tests/test_linearize.py | 10 ++-- tests/test_lower_clusters.py | 2 +- tests/test_lower_exprs.py | 7 +-- tests/test_mpi.py | 28 +++++----- tests/test_operator.py | 30 +++++------ tests/test_pickle.py | 44 ++++++++------- tests/test_rebuild.py | 2 +- tests/test_resample.py | 2 +- tests/test_roundoff.py | 4 +- tests/test_save.py | 2 +- tests/test_skewing.py | 8 +-- tests/test_sparse.py | 12 ++--- tests/test_staggered_utils.py | 10 ++-- tests/test_subdomains.py | 16 +++--- tests/test_symbolic_coefficients.py | 7 +-- tests/test_symbolics.py | 36 +++++++------ tests/test_tensors.py | 9 ++-- tests/test_threading.py | 8 +-- tests/test_timestepping.py | 2 +- tests/test_tools.py | 13 ++--- tests/test_tti.py | 2 +- tests/test_unexpansion.py | 14 ++--- tests/test_visitors.py | 15 +++--- tests/test_warnings.py | 5 +- 187 files changed, 987 insertions(+), 870 deletions(-) delete mode 100755 auto_lint.sh diff --git a/auto_lint.sh b/auto_lint.sh deleted file mode 100755 index 1281fd1ce4..0000000000 --- a/auto_lint.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -pre-commit run --all-files trailing-whitespace -git add --all -git commit --no-verify -m "lint: Remove all the trailing whitespace" - -pre-commit run --all-files end-of-file-fixer -git add --all -git commit --no-verify -m "lint: Fix ends of files" - -isort . -git add --all -git commit --no-verify -m "lint: Re-sort all imports with new isort rules" - -ruff check --fix -git add --all -git commit --no-verify -m "lint: First pass with ruff --fix" - -# Don't run these -# ruff check --fix --unsafe-fixes -# git add --all -# git commit --no-verify -m "lint: Second pass with ruff --fix --unsafe-fixes" diff --git a/benchmarks/regression/benchmarks/arguments.py b/benchmarks/regression/benchmarks/arguments.py index 181a117917..662ccec7eb 100644 --- a/benchmarks/regression/benchmarks/arguments.py +++ b/benchmarks/regression/benchmarks/arguments.py @@ -1,5 +1,4 @@ -from devito import Grid, Function, TimeFunction, SparseTimeFunction, Eq, Operator - +from devito import Eq, Function, Grid, Operator, SparseTimeFunction, TimeFunction # ASV config repeat = 10 diff --git a/benchmarks/regression/benchmarks/codegen.py b/benchmarks/regression/benchmarks/codegen.py index d86136f6b5..b573e58e13 100644 --- a/benchmarks/regression/benchmarks/codegen.py +++ b/benchmarks/regression/benchmarks/codegen.py @@ -1,6 +1,5 @@ from examples.seismic.tti.tti_example import tti_setup - repeat = 3 diff --git a/benchmarks/regression/benchmarks/simple_operators.py b/benchmarks/regression/benchmarks/simple_operators.py index 839ecc52cf..b5213ea3e5 100644 --- a/benchmarks/regression/benchmarks/simple_operators.py +++ b/benchmarks/regression/benchmarks/simple_operators.py @@ -1,5 +1,4 @@ -from devito import Grid, Function, TimeFunction, Eq, Operator, gaussian_smooth, norm - +from devito import Eq, Function, Grid, Operator, TimeFunction, gaussian_smooth, norm # ASV config repeat = 3 diff --git a/benchmarks/user/advisor/roofline.py b/benchmarks/user/advisor/roofline.py index 6add2a22e4..12578fda92 100644 --- a/benchmarks/user/advisor/roofline.py +++ b/benchmarks/user/advisor/roofline.py @@ -6,20 +6,18 @@ This module has been partly extracted from the examples directory of Intel Advisor 2018. """ -import click import json import math +import os +import sys + +import click import matplotlib -from matplotlib.ticker import ScalarFormatter import matplotlib.pyplot as plt # noqa - import numpy as np import pandas as pd -import sys -import os - from advisor_logging import check, err, log - +from matplotlib.ticker import ScalarFormatter try: import advisor diff --git a/benchmarks/user/advisor/run_advisor.py b/benchmarks/user/advisor/run_advisor.py index 9c0d1554ff..b3cef8387a 100644 --- a/benchmarks/user/advisor/run_advisor.py +++ b/benchmarks/user/advisor/run_advisor.py @@ -1,14 +1,13 @@ -import click import datetime import logging import os import sys - from pathlib import Path -from subprocess import check_output, PIPE, Popen +from subprocess import PIPE, Popen, check_output from tempfile import gettempdir, mkdtemp -from advisor_logging import check, log, progress, log_process +import click +from advisor_logging import check, log, log_process, progress @click.command() diff --git a/benchmarks/user/benchmark.py b/benchmarks/user/benchmark.py index 420f18df02..4439ad6066 100644 --- a/benchmarks/user/benchmark.py +++ b/benchmarks/user/benchmark.py @@ -1,22 +1,24 @@ -import numpy as np -import click import os -from devito import Device, configuration, info, warning, set_log_level, switchconfig, norm +import click +import numpy as np + +from devito import Device, configuration, info, norm, set_log_level, switchconfig, warning from devito.arch.compiler import IntelCompiler from devito.mpi import MPI from devito.operator.profiling import PerformanceSummary from devito.tools import all_equal, as_tuple, sweep from devito.types.dense import DiscreteFunction - -from examples.seismic.acoustic.acoustic_example import run as acoustic_run, acoustic_setup -from examples.seismic.tti.tti_example import run as tti_run, tti_setup -from examples.seismic.elastic.elastic_example import run as elastic_run, elastic_setup -from examples.seismic.self_adjoint.example_iso import run as acoustic_sa_run, \ - acoustic_sa_setup -from examples.seismic.viscoelastic.viscoelastic_example import run as viscoelastic_run, \ - viscoelastic_setup - +from examples.seismic.acoustic.acoustic_example import acoustic_setup +from examples.seismic.acoustic.acoustic_example import run as acoustic_run +from examples.seismic.elastic.elastic_example import elastic_setup +from examples.seismic.elastic.elastic_example import run as elastic_run +from examples.seismic.self_adjoint.example_iso import acoustic_sa_setup +from examples.seismic.self_adjoint.example_iso import run as acoustic_sa_run +from examples.seismic.tti.tti_example import run as tti_run +from examples.seismic.tti.tti_example import tti_setup +from examples.seismic.viscoelastic.viscoelastic_example import run as viscoelastic_run +from examples.seismic.viscoelastic.viscoelastic_example import viscoelastic_setup model_type = { 'viscoelastic': { diff --git a/conftest.py b/conftest.py index 16ae762b4e..3dc51e2eb0 100644 --- a/conftest.py +++ b/conftest.py @@ -6,14 +6,16 @@ from sympy import Add from sympy.printing import sstr -from devito import Eq, configuration, Revolver # noqa +from devito import Eq, Revolver, configuration # noqa +from devito.arch import Arm, Cpu64, Device, get_advisor_path, sniff_mpi_distro +from devito.arch.compiler import ( + IntelCompiler, NvidiaCompiler, OneapiCompiler, compiler_registry +) from devito.checkpointing import NoopRevolver from devito.finite_differences.differentiable import EvalDerivative -from devito.arch import Cpu64, Device, sniff_mpi_distro, Arm, get_advisor_path -from devito.arch.compiler import (compiler_registry, IntelCompiler, OneapiCompiler, - NvidiaCompiler) -from devito.ir.iet import (FindNodes, FindSymbols, Iteration, ParallelBlock, - retrieve_iteration_tree) +from devito.ir.iet import ( + FindNodes, FindSymbols, Iteration, ParallelBlock, retrieve_iteration_tree +) from devito.tools import as_tuple try: diff --git a/devito/arch/archinfo.py b/devito/arch/archinfo.py index 7f6f769c0f..0273e88a22 100644 --- a/devito/arch/archinfo.py +++ b/devito/arch/archinfo.py @@ -1,22 +1,22 @@ """Collection of utilities to detect properties of the underlying architecture.""" -from contextlib import suppress -from functools import cached_property -from subprocess import PIPE, Popen, DEVNULL, run, CalledProcessError -from pathlib import Path import ctypes -import re +import json import os +import re import sys -import json +from contextlib import suppress +from functools import cached_property +from pathlib import Path +from subprocess import DEVNULL, PIPE, CalledProcessError, Popen, run import cpuinfo import numpy as np -from packaging.version import parse, InvalidVersion import psutil +from packaging.version import InvalidVersion, parse from devito.logger import warning -from devito.tools import as_tuple, all_equal, memoized_func +from devito.tools import all_equal, as_tuple, memoized_func __all__ = [ 'platform_registry', 'get_cpu_info', 'get_gpu_info', 'get_visible_devices', diff --git a/devito/arch/compiler.py b/devito/arch/compiler.py index ad6e7484d1..130f0f5a0c 100644 --- a/devito/arch/compiler.py +++ b/devito/arch/compiler.py @@ -1,29 +1,28 @@ +import platform +import time +import warnings from functools import partial from hashlib import sha1 from itertools import filterfalse -from os import environ, path, makedirs -from packaging.version import Version -from subprocess import (DEVNULL, PIPE, CalledProcessError, check_output, - check_call, run) -import platform -import warnings -import time +from os import environ, makedirs, path +from subprocess import DEVNULL, PIPE, CalledProcessError, check_call, check_output, run import numpy.ctypeslib as npct from codepy.jit import compile_from_string -from codepy.toolchain import (GCCToolchain, - call_capture_output as _call_capture_output) +from codepy.toolchain import GCCToolchain +from codepy.toolchain import call_capture_output as _call_capture_output +from packaging.version import Version from devito.arch import ( - AMDGPUX, Cpu64, AppleArm, NvidiaDevice, POWER8, POWER9, Graviton, - Cortex, IntelDevice, get_nvidia_cc, NvidiaArm, check_cuda_runtime, - get_cuda_version, get_m1_llvm_path + AMDGPUX, POWER8, POWER9, AppleArm, Cortex, Cpu64, Graviton, IntelDevice, NvidiaArm, + NvidiaDevice, check_cuda_runtime, get_cuda_version, get_m1_llvm_path, get_nvidia_cc ) from devito.exceptions import CompilationError from devito.logger import debug, warning from devito.parameters import configuration -from devito.tools import (as_list, change_directory, filter_ordered, - memoized_func, make_tempdir) +from devito.tools import ( + as_list, change_directory, filter_ordered, make_tempdir, memoized_func +) __all__ = ['compiler_registry', 'sniff_mpi_distro'] diff --git a/devito/builtins/arithmetic.py b/devito/builtins/arithmetic.py index 2996f595a8..6c19f056be 100644 --- a/devito/builtins/arithmetic.py +++ b/devito/builtins/arithmetic.py @@ -1,7 +1,7 @@ import numpy as np import devito as dv -from devito.builtins.utils import make_retval, check_builtins_args +from devito.builtins.utils import check_builtins_args, make_retval __all__ = ['inner', 'mmax', 'mmin', 'norm', 'sum', 'sumall'] diff --git a/devito/builtins/initializers.py b/devito/builtins/initializers.py index 1b5b66937a..db571c4683 100644 --- a/devito/builtins/initializers.py +++ b/devito/builtins/initializers.py @@ -1,8 +1,8 @@ import numpy as np import devito as dv -from devito.tools import as_tuple, as_list from devito.builtins.utils import check_builtins_args, nbl_to_padsize, pad_outhalo +from devito.tools import as_list, as_tuple __all__ = ['assign', 'gaussian_smooth', 'initialize_function', 'smooth'] diff --git a/devito/checkpointing/checkpoint.py b/devito/checkpointing/checkpoint.py index 3d1522498c..a3c5b8bcb5 100644 --- a/devito/checkpointing/checkpoint.py +++ b/devito/checkpointing/checkpoint.py @@ -1,4 +1,5 @@ from pyrevolve import Checkpoint, Operator + from devito import TimeFunction from devito.tools import flatten diff --git a/devito/core/arm.py b/devito/core/arm.py index 9649f5aaa6..fae5d4c513 100644 --- a/devito/core/arm.py +++ b/devito/core/arm.py @@ -1,6 +1,5 @@ -from devito.core.cpu import (Cpu64AdvOperator, Cpu64AdvCXXOperator, - Cpu64AdvCOperator) -from devito.passes.iet import OmpTarget, CXXOmpTarget +from devito.core.cpu import Cpu64AdvCOperator, Cpu64AdvCXXOperator, Cpu64AdvOperator +from devito.passes.iet import CXXOmpTarget, OmpTarget __all__ = [ 'ArmAdvCOperator', diff --git a/devito/core/autotuning.py b/devito/core/autotuning.py index acc546d92f..ccef3a9e72 100644 --- a/devito/core/autotuning.py +++ b/devito/core/autotuning.py @@ -1,10 +1,11 @@ from collections import OrderedDict -from itertools import combinations, product from functools import total_ordering +from itertools import combinations, product from devito.arch import KNL, KNL7210 from devito.ir import Backward, retrieve_iteration_tree -from devito.logger import perf, warning as _warning +from devito.logger import perf +from devito.logger import warning as _warning from devito.mpi.distributed import MPI, MPINeighborhood from devito.mpi.routines import MPIMsgEnriched from devito.parameters import configuration diff --git a/devito/core/cpu.py b/devito/core/cpu.py index 73d120bb78..b090552838 100644 --- a/devito/core/cpu.py +++ b/devito/core/cpu.py @@ -4,14 +4,15 @@ from devito.exceptions import InvalidOperator from devito.operator.operator import rcompile from devito.passes import stream_dimensions +from devito.passes.clusters import ( + Lift, blocking, buffering, cire, cse, factorize, fission, fuse, optimize_hyperplanes, + optimize_pows +) from devito.passes.equations import collect_derivatives -from devito.passes.clusters import (Lift, blocking, buffering, cire, cse, - factorize, fission, fuse, optimize_pows, - optimize_hyperplanes) -from devito.passes.iet import (CTarget, CXXTarget, COmpTarget, CXXOmpTarget, - avoid_denormals, linearize, - mpiize, hoist_prodders, relax_incr_dimensions, - check_stability) +from devito.passes.iet import ( + COmpTarget, CTarget, CXXOmpTarget, CXXTarget, avoid_denormals, check_stability, + hoist_prodders, linearize, mpiize, relax_incr_dimensions +) from devito.tools import timed_pass __all__ = [ diff --git a/devito/core/gpu.py b/devito/core/gpu.py index 108b1ab46b..eaf68b0a5b 100644 --- a/devito/core/gpu.py +++ b/devito/core/gpu.py @@ -6,13 +6,15 @@ from devito.exceptions import InvalidOperator from devito.operator.operator import rcompile from devito.passes import is_on_device, stream_dimensions +from devito.passes.clusters import ( + Lift, blocking, buffering, cire, cse, factorize, fission, fuse, memcpy_prefetch, + optimize_pows, tasking +) from devito.passes.equations import collect_derivatives -from devito.passes.clusters import (Lift, tasking, memcpy_prefetch, blocking, - buffering, cire, cse, factorize, fission, fuse, - optimize_pows) -from devito.passes.iet import (DeviceOmpTarget, DeviceAccTarget, DeviceCXXOmpTarget, - mpiize, hoist_prodders, linearize, pthreadify, - relax_incr_dimensions, check_stability) +from devito.passes.iet import ( + DeviceAccTarget, DeviceCXXOmpTarget, DeviceOmpTarget, check_stability, hoist_prodders, + linearize, mpiize, pthreadify, relax_incr_dimensions +) from devito.tools import as_tuple, timed_pass __all__ = [ diff --git a/devito/core/intel.py b/devito/core/intel.py index 8478189f92..6ab2f70b5f 100644 --- a/devito/core/intel.py +++ b/devito/core/intel.py @@ -1,7 +1,7 @@ -from devito.core.cpu import (Cpu64AdvCOperator, Cpu64AdvOmpOperator, - Cpu64FsgCOperator, Cpu64FsgOmpOperator, - Cpu64AdvCXXOperator, Cpu64AdvCXXOmpOperator, - Cpu64FsgCXXOperator, Cpu64FsgCXXOmpOperator) +from devito.core.cpu import ( + Cpu64AdvCOperator, Cpu64AdvCXXOmpOperator, Cpu64AdvCXXOperator, Cpu64AdvOmpOperator, + Cpu64FsgCOperator, Cpu64FsgCXXOmpOperator, Cpu64FsgCXXOperator, Cpu64FsgOmpOperator +) __all__ = [ 'Intel64AdvCOperator', diff --git a/devito/core/operator.py b/devito/core/operator.py index 186f5a2c94..10df2eed31 100644 --- a/devito/core/operator.py +++ b/devito/core/operator.py @@ -8,10 +8,11 @@ from devito.ir import FindSymbols from devito.logger import warning from devito.mpi.routines import mpi_registry -from devito.parameters import configuration from devito.operator import Operator -from devito.tools import (as_tuple, is_integer, timed_pass, - UnboundTuple, UnboundedMultiTuple) +from devito.parameters import configuration +from devito.tools import ( + UnboundedMultiTuple, UnboundTuple, as_tuple, is_integer, timed_pass +) from devito.types import NThreads, PThreadArray __all__ = ['CoreOperator', 'CustomOperator', diff --git a/devito/core/power.py b/devito/core/power.py index 2ae711dcc9..0b0fe86533 100644 --- a/devito/core/power.py +++ b/devito/core/power.py @@ -1,5 +1,6 @@ -from devito.core.cpu import (Cpu64AdvCOperator, Cpu64AdvOmpOperator, - Cpu64AdvCXXOperator, Cpu64AdvCXXOmpOperator) +from devito.core.cpu import ( + Cpu64AdvCOperator, Cpu64AdvCXXOmpOperator, Cpu64AdvCXXOperator, Cpu64AdvOmpOperator +) __all__ = [ 'PowerAdvCOperator', diff --git a/devito/data/allocators.py b/devito/data/allocators.py index eb2fc7bc45..dc0decab17 100644 --- a/devito/data/allocators.py +++ b/devito/data/allocators.py @@ -1,15 +1,15 @@ import abc import ctypes -from ctypes.util import find_library import mmap import os import sys +from ctypes.util import find_library import numpy as np from devito.logger import logger from devito.parameters import configuration -from devito.tools import is_integer, infer_datasize +from devito.tools import infer_datasize, is_integer __all__ = [ 'ALLOC_ALIGNED', diff --git a/devito/data/data.py b/devito/data/data.py index 7a49ad77b5..02660ca3f5 100644 --- a/devito/data/data.py +++ b/devito/data/data.py @@ -7,7 +7,7 @@ from devito.data.utils import * from devito.logger import warning from devito.parameters import configuration -from devito.tools import Tag, as_tuple, as_list, is_integer +from devito.tools import Tag, as_list, as_tuple, is_integer __all__ = ['Data'] diff --git a/devito/data/decomposition.py b/devito/data/decomposition.py index 9a4ec7a486..7faa0753c0 100644 --- a/devito/data/decomposition.py +++ b/devito/data/decomposition.py @@ -4,7 +4,7 @@ import numpy as np from devito.data.meta import LEFT -from devito.tools import is_integer, as_tuple +from devito.tools import as_tuple, is_integer __all__ = ['Decomposition'] diff --git a/devito/data/utils.py b/devito/data/utils.py index d76db9d13a..c624fcd7d6 100644 --- a/devito/data/utils.py +++ b/devito/data/utils.py @@ -1,6 +1,6 @@ import numpy as np -from devito.tools import Tag, as_tuple, as_list, is_integer +from devito.tools import Tag, as_list, as_tuple, is_integer __all__ = [ 'NONLOCAL', diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index c4440ea432..c20e32e3e8 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -1,23 +1,26 @@ from collections import ChainMap +from functools import cached_property, singledispatch from itertools import product -from functools import singledispatch, cached_property import numpy as np import sympy from sympy.core.add import _addsort -from sympy.core.mul import _keep_coeff, _mulsort from sympy.core.decorators import call_highest_priority from sympy.core.evalf import evalf_table +from sympy.core.mul import _keep_coeff, _mulsort + try: from sympy.core.core import ordering_of_classes except ImportError: # Moved in 1.13 from sympy.core.basic import ordering_of_classes -from devito.finite_differences.tools import make_shift_x0, coeff_priority +from devito.finite_differences.tools import coeff_priority, make_shift_x0 from devito.logger import warning -from devito.tools import (as_tuple, filter_ordered, flatten, frozendict, - infer_dtype, extract_dtype, is_integer, split, is_number) +from devito.tools import ( + as_tuple, extract_dtype, filter_ordered, flatten, frozendict, infer_dtype, is_integer, + is_number, split +) from devito.types import Array, DimensionTuple, Evaluable, StencilDimension from devito.types.basic import AbstractFunction, Indexed diff --git a/devito/finite_differences/elementary.py b/devito/finite_differences/elementary.py index d45eafca5e..55a0054c03 100644 --- a/devito/finite_differences/elementary.py +++ b/devito/finite_differences/elementary.py @@ -1,5 +1,4 @@ import sympy - from packaging.version import Version from devito.finite_differences.differentiable import DifferentiableFunction, diffify diff --git a/devito/finite_differences/finite_difference.py b/devito/finite_differences/finite_difference.py index 15abd179ce..77545552a1 100644 --- a/devito/finite_differences/finite_difference.py +++ b/devito/finite_differences/finite_difference.py @@ -3,9 +3,12 @@ from sympy import sympify from devito.logger import warning -from .differentiable import EvalDerivative, DiffDerivative, Weights -from .tools import (left, right, generate_indices, centered, direct, transpose, - check_input, fd_weights_registry, process_weights) + +from .differentiable import DiffDerivative, EvalDerivative, Weights +from .tools import ( + centered, check_input, direct, fd_weights_registry, generate_indices, left, + process_weights, right, transpose +) __all__ = [ 'centered', diff --git a/devito/finite_differences/rsfd.py b/devito/finite_differences/rsfd.py index 12e13a2c64..bdf2c08ac5 100644 --- a/devito/finite_differences/rsfd.py +++ b/devito/finite_differences/rsfd.py @@ -1,8 +1,9 @@ from functools import wraps from devito.types.dimension import StencilDimension -from .differentiable import Weights, DiffDerivative -from .tools import generate_indices, fd_weights_registry + +from .differentiable import DiffDerivative, Weights +from .tools import fd_weights_registry, generate_indices __all__ = ['d45', 'drot'] diff --git a/devito/finite_differences/tools.py b/devito/finite_differences/tools.py index 8c3398f7cd..cbf3f4a752 100644 --- a/devito/finite_differences/tools.py +++ b/devito/finite_differences/tools.py @@ -1,8 +1,8 @@ -from functools import wraps, partial +from functools import partial, wraps from itertools import product import numpy as np -from sympy import S, finite_diff_weights, cacheit, sympify, Rational, Expr +from sympy import Expr, Rational, S, cacheit, finite_diff_weights, sympify from devito.logger import warning from devito.tools import Tag, as_tuple diff --git a/devito/ir/cgen/printer.py b/devito/ir/cgen/printer.py index 2d2ecd4e07..fc5c23b904 100644 --- a/devito/ir/cgen/printer.py +++ b/devito/ir/cgen/printer.py @@ -3,22 +3,20 @@ """ import numpy as np import sympy - from mpmath.libmp import prec_to_dps, to_str from packaging.version import Version - from sympy.core import S -from sympy.core.numbers import equal_valued, Float -from sympy.printing.codeprinter import CodePrinter +from sympy.core.numbers import Float, equal_valued from sympy.logic.boolalg import BooleanFunction +from sympy.printing.codeprinter import CodePrinter from sympy.printing.precedence import PRECEDENCE_VALUES, precedence from devito import configuration from devito.arch.compiler import AOMPCompiler from devito.symbolics.inspection import has_integer_args, sympy_dtype from devito.symbolics.queries import q_leaf +from devito.tools import ctypes_to_cstr, ctypes_vector_mapper, dtype_to_ctype from devito.types.basic import AbstractFunction -from devito.tools import ctypes_to_cstr, dtype_to_ctype, ctypes_vector_mapper __all__ = ['BasePrinter', 'ccode'] diff --git a/devito/ir/clusters/algorithms.py b/devito/ir/clusters/algorithms.py index 979d13ee8f..52c868ed12 100644 --- a/devito/ir/clusters/algorithms.py +++ b/devito/ir/clusters/algorithms.py @@ -7,19 +7,19 @@ from devito.exceptions import CompilationError from devito.finite_differences.elementary import Max, Min -from devito.ir.support import (Any, Backward, Forward, IterationSpace, erange, - pull_dims) -from devito.ir.equations import OpMin, OpMax, identity_mapper from devito.ir.clusters.analysis import analyze from devito.ir.clusters.cluster import Cluster, ClusterGroup from devito.ir.clusters.visitors import Queue, cluster_pass -from devito.ir.support import Scope +from devito.ir.equations import OpMax, OpMin, identity_mapper +from devito.ir.support import ( + Any, Backward, Forward, IterationSpace, Scope, erange, pull_dims +) from devito.mpi.halo_scheme import HaloScheme, HaloTouch from devito.mpi.reduction_scheme import DistReduce -from devito.symbolics import (limits_mapper, retrieve_indexed, uxreplace, - xreplace_indices) -from devito.tools import (DefaultOrderedDict, Stamp, as_mapper, flatten, - is_integer, split, timed_pass, toposort) +from devito.symbolics import limits_mapper, retrieve_indexed, uxreplace, xreplace_indices +from devito.tools import ( + DefaultOrderedDict, Stamp, as_mapper, flatten, is_integer, split, timed_pass, toposort +) from devito.types import Array, Eq, Symbol from devito.types.dimension import BOTTOM, ModuloDimension diff --git a/devito/ir/clusters/analysis.py b/devito/ir/clusters/analysis.py index 37b3acddae..5ebae71b0f 100644 --- a/devito/ir/clusters/analysis.py +++ b/devito/ir/clusters/analysis.py @@ -1,7 +1,8 @@ from devito.ir.clusters.cluster import Cluster from devito.ir.clusters.visitors import Queue -from devito.ir.support import (AFFINE, PARALLEL, PARALLEL_INDEP, PARALLEL_IF_ATOMIC, - SEQUENTIAL, Property, Scope) +from devito.ir.support import ( + AFFINE, PARALLEL, PARALLEL_IF_ATOMIC, PARALLEL_INDEP, SEQUENTIAL, Property, Scope +) from devito.ir.support.space import IterationSpace from devito.tools import as_tuple, flatten, timed_pass from devito.types.dimension import Dimension diff --git a/devito/ir/clusters/cluster.py b/devito/ir/clusters/cluster.py index d20d5b21b9..8c5f08835d 100644 --- a/devito/ir/clusters/cluster.py +++ b/devito/ir/clusters/cluster.py @@ -1,22 +1,21 @@ -from itertools import chain from functools import cached_property +from itertools import chain import numpy as np from devito.ir.equations import ClusterizedEq from devito.ir.support import ( - PARALLEL, PARALLEL_IF_PVT, BaseGuardBoundNext, Forward, Interval, IntervalGroup, - IterationSpace, DataSpace, Guards, Properties, Scope, WaitLock, WithLock, - PrefetchUpdate, detect_accesses, detect_io, normalize_properties, - tailor_properties, update_properties, normalize_syncs, minimum, maximum, - null_ispace + PARALLEL, PARALLEL_IF_PVT, BaseGuardBoundNext, DataSpace, Forward, Guards, Interval, + IntervalGroup, IterationSpace, PrefetchUpdate, Properties, Scope, WaitLock, WithLock, + detect_accesses, detect_io, maximum, minimum, normalize_properties, normalize_syncs, + null_ispace, tailor_properties, update_properties ) from devito.mpi.halo_scheme import HaloScheme, HaloTouch from devito.mpi.reduction_scheme import DistReduce from devito.symbolics import estimate_cost from devito.tools import as_tuple, filter_ordered, flatten, infer_dtype from devito.types import ( - Fence, WeakFence, CriticalRegion, ThreadPoolSync, ThreadCommit, ThreadWait + CriticalRegion, Fence, ThreadCommit, ThreadPoolSync, ThreadWait, WeakFence ) __all__ = ["Cluster", "ClusterGroup"] diff --git a/devito/ir/clusters/visitors.py b/devito/ir/clusters/visitors.py index 6ec80eba0f..694de41142 100644 --- a/devito/ir/clusters/visitors.py +++ b/devito/ir/clusters/visitors.py @@ -1,5 +1,4 @@ from collections.abc import Iterable - from itertools import groupby from devito.ir.support import IterationSpace, null_ispace diff --git a/devito/ir/equations/algorithms.py b/devito/ir/equations/algorithms.py index 310c17ecf3..813828e27a 100644 --- a/devito/ir/equations/algorithms.py +++ b/devito/ir/equations/algorithms.py @@ -1,17 +1,18 @@ from collections.abc import Iterable from functools import singledispatch -from devito.symbolics import (retrieve_indexed, uxreplace, retrieve_dimensions, - retrieve_functions) -from devito.tools import (Ordering, as_tuple, flatten, filter_sorted, filter_ordered, - frozendict) -from devito.types import (Dimension, Eq, IgnoreDimSort, SubDimension, - ConditionalDimension) +from devito.data.allocators import DataReference +from devito.logger import warning +from devito.symbolics import ( + retrieve_dimensions, retrieve_functions, retrieve_indexed, uxreplace +) +from devito.tools import ( + Ordering, as_tuple, filter_ordered, filter_sorted, flatten, frozendict +) +from devito.types import ConditionalDimension, Dimension, Eq, IgnoreDimSort, SubDimension from devito.types.array import Array from devito.types.basic import AbstractFunction from devito.types.dimension import MultiSubDimension, Thickness -from devito.data.allocators import DataReference -from devito.logger import warning __all__ = ['concretize_subdims', 'dimension_sort', 'lower_exprs'] diff --git a/devito/ir/equations/equation.py b/devito/ir/equations/equation.py index be40773f3c..8a8e821d94 100644 --- a/devito/ir/equations/equation.py +++ b/devito/ir/equations/equation.py @@ -3,10 +3,12 @@ import numpy as np import sympy -from devito.ir.equations.algorithms import dimension_sort, lower_exprs from devito.finite_differences.differentiable import diff2sympy -from devito.ir.support import (GuardFactor, Interval, IntervalGroup, IterationSpace, - Stencil, detect_io, detect_accesses) +from devito.ir.equations.algorithms import dimension_sort, lower_exprs +from devito.ir.support import ( + GuardFactor, Interval, IntervalGroup, IterationSpace, Stencil, detect_accesses, + detect_io +) from devito.symbolics import IntDiv, limits_mapper, uxreplace from devito.tools import Pickable, Tag, frozendict from devito.types import Eq, Inc, ReduceMax, ReduceMin, relational_min diff --git a/devito/ir/iet/algorithms.py b/devito/ir/iet/algorithms.py index d8bd0652e5..1715c9c2d1 100644 --- a/devito/ir/iet/algorithms.py +++ b/devito/ir/iet/algorithms.py @@ -1,10 +1,10 @@ from collections import OrderedDict from devito.ir.iet import ( - Expression, Increment, Iteration, List, Conditional, SyncSpot, Section, - HaloSpot, ExpressionBundle, Switch + Conditional, Expression, ExpressionBundle, HaloSpot, Increment, Iteration, List, + Section, Switch, SyncSpot ) -from devito.ir.support import GuardSwitch, GuardCaseSwitch +from devito.ir.support import GuardCaseSwitch, GuardSwitch from devito.tools import as_mapper, timed_pass __all__ = ['iet_build'] diff --git a/devito/ir/iet/nodes.py b/devito/ir/iet/nodes.py index 1f1fa8e5c6..1c8ff5fbff 100644 --- a/devito/ir/iet/nodes.py +++ b/devito/ir/iet/nodes.py @@ -3,24 +3,25 @@ import abc import ctypes import inspect -from functools import cached_property from collections import OrderedDict, namedtuple from collections.abc import Iterable +from functools import cached_property import cgen as c from sympy import IndexedBase, sympify from devito.data import FULL from devito.ir.cgen import ccode -from devito.ir.equations import DummyEq, OpInc, OpMin, OpMax -from devito.ir.support import (INBOUND, SEQUENTIAL, PARALLEL, PARALLEL_IF_ATOMIC, - PARALLEL_IF_PVT, VECTORIZED, AFFINE, Property, - Forward, WithLock, PrefetchUpdate, detect_io) -from devito.symbolics import ListInitializer, CallFromPointer -from devito.tools import (Signer, as_tuple, filter_ordered, filter_sorted, flatten, - ctypes_to_cstr) -from devito.types.basic import (AbstractFunction, AbstractSymbol, Basic, Indexed, - Symbol) +from devito.ir.equations import DummyEq, OpInc, OpMax, OpMin +from devito.ir.support import ( + AFFINE, INBOUND, PARALLEL, PARALLEL_IF_ATOMIC, PARALLEL_IF_PVT, SEQUENTIAL, + VECTORIZED, Forward, PrefetchUpdate, Property, WithLock, detect_io +) +from devito.symbolics import CallFromPointer, ListInitializer +from devito.tools import ( + Signer, as_tuple, ctypes_to_cstr, filter_ordered, filter_sorted, flatten +) +from devito.types.basic import AbstractFunction, AbstractSymbol, Basic, Indexed, Symbol from devito.types.object import AbstractObject, LocalObject __all__ = [ diff --git a/devito/ir/iet/visitors.py b/devito/ir/iet/visitors.py index 111a31b8eb..31b9356dce 100644 --- a/devito/ir/iet/visitors.py +++ b/devito/ir/iet/visitors.py @@ -4,30 +4,34 @@ The main Visitor class is adapted from https://github.com/coneoproject/COFFEE. """ +import ctypes from collections import OrderedDict from collections.abc import Callable, Generator, Iterable, Iterator, Sequence from itertools import chain, groupby from typing import Any, Generic, TypeVar -import ctypes import cgen as c from sympy import IndexedBase from sympy.core.function import Application from devito.exceptions import CompilationError -from devito.ir.iet.nodes import (Node, Iteration, Expression, ExpressionBundle, - Call, Lambda, BlankLine, Section, ListMajor) +from devito.ir.iet.nodes import ( + BlankLine, Call, Expression, ExpressionBundle, Iteration, Lambda, ListMajor, Node, + Section +) from devito.ir.support.space import Backward -from devito.symbolics import (FieldFromComposite, FieldFromPointer, - ListInitializer, uxreplace) +from devito.symbolics import ( + FieldFromComposite, FieldFromPointer, ListInitializer, uxreplace +) from devito.symbolics.extended_dtypes import NoDeclStruct -from devito.tools import (GenericVisitor, as_tuple, filter_ordered, - filter_sorted, flatten, is_external_ctype, - c_restrict_void_p, sorted_priority) +from devito.tools import ( + GenericVisitor, as_tuple, c_restrict_void_p, filter_ordered, filter_sorted, flatten, + is_external_ctype, sorted_priority +) +from devito.types import ( + ArrayObject, CompositeObject, DeviceMap, Dimension, IndexedData, Pointer +) from devito.types.basic import AbstractFunction, AbstractSymbol, Basic -from devito.types import (ArrayObject, CompositeObject, Dimension, Pointer, - IndexedData, DeviceMap) - __all__ = [ 'CGen', @@ -1604,8 +1608,9 @@ def generate(self): def sorted_efuncs(efuncs): - from devito.ir.iet.efunc import (CommCallable, DeviceFunction, - ThreadCallable, ElementalFunction) + from devito.ir.iet.efunc import ( + CommCallable, DeviceFunction, ElementalFunction, ThreadCallable + ) priority = { DeviceFunction: 3, diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index 07e8094700..6fb229fdc3 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -4,10 +4,14 @@ from sympy import And from devito.ir.clusters import Cluster -from devito.ir.stree.tree import (ScheduleTree, NodeIteration, NodeConditional, - NodeSync, NodeExprs, NodeSection, NodeHalo) -from devito.ir.support import (SEQUENTIAL, Any, Interval, IterationInterval, - IterationSpace, normalize_properties, normalize_syncs) +from devito.ir.stree.tree import ( + NodeConditional, NodeExprs, NodeHalo, NodeIteration, NodeSection, NodeSync, + ScheduleTree +) +from devito.ir.support import ( + SEQUENTIAL, Any, Interval, IterationInterval, IterationSpace, normalize_properties, + normalize_syncs +) from devito.mpi.halo_scheme import HaloScheme from devito.tools import Bunch, DefaultOrderedDict, as_mapper diff --git a/devito/ir/stree/tree.py b/devito/ir/stree/tree.py index b04fb2942f..961afedc65 100644 --- a/devito/ir/stree/tree.py +++ b/devito/ir/stree/tree.py @@ -1,6 +1,6 @@ -from anytree import NodeMixin, PostOrderIter, RenderTree, ContStyle +from anytree import ContStyle, NodeMixin, PostOrderIter, RenderTree -from devito.ir.support import WithLock, PrefetchUpdate +from devito.ir.support import PrefetchUpdate, WithLock __all__ = [ "NodeConditional", diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index c55b772c4a..a304d50b01 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -1,23 +1,25 @@ -from collections.abc import Iterable -from itertools import chain, product +from collections.abc import Callable, Iterable from functools import cached_property -from collections.abc import Callable +from itertools import chain, product -from sympy import S, Expr import sympy +from sympy import Expr, S from devito.ir.support.space import Backward, null_ispace from devito.ir.support.utils import AccessMode, extrema from devito.ir.support.vector import LabeledVector, Vector -from devito.symbolics import (compare_ops, retrieve_indexed, retrieve_terminals, - q_constant, q_comp_acc, q_affine, q_routine, search, - uxreplace) -from devito.tools import (Tag, as_mapper, as_tuple, is_integer, filter_sorted, - flatten, memoized_meth, memoized_generator, smart_gt, - smart_lt, CacheInstances) -from devito.types import (ComponentAccess, Dimension, DimensionTuple, Fence, - CriticalRegion, Function, Symbol, Temp, TempArray, - TBArray) +from devito.symbolics import ( + compare_ops, q_affine, q_comp_acc, q_constant, q_routine, retrieve_indexed, + retrieve_terminals, search, uxreplace +) +from devito.tools import ( + CacheInstances, Tag, as_mapper, as_tuple, filter_sorted, flatten, is_integer, + memoized_generator, memoized_meth, smart_gt, smart_lt +) +from devito.types import ( + ComponentAccess, CriticalRegion, Dimension, DimensionTuple, Fence, Function, Symbol, + TBArray, Temp, TempArray +) __all__ = ['ExprGeometry', 'IterationInstance', 'Scope', 'TimedAccess'] diff --git a/devito/ir/support/guards.py b/devito/ir/support/guards.py index 4ccdfa89e9..574fe490ec 100644 --- a/devito/ir/support/guards.py +++ b/devito/ir/support/guards.py @@ -5,12 +5,12 @@ """ from collections import Counter, defaultdict +from functools import singledispatch from operator import ge, gt, le, lt -from functools import singledispatch +import numpy as np from sympy import And, Expr, Ge, Gt, Le, Lt, Mul, true from sympy.logic.boolalg import BooleanFunction -import numpy as np from devito.ir.support.space import Forward, IterationDirection from devito.symbolics import CondEq, CondNe, search diff --git a/devito/ir/support/space.py b/devito/ir/support/space.py index 2e532b7c1c..dda334d3d1 100644 --- a/devito/ir/support/space.py +++ b/devito/ir/support/space.py @@ -1,15 +1,16 @@ import abc from collections import OrderedDict -from functools import reduce, cached_property +from functools import cached_property, reduce from operator import mul from sympy import Expr -from devito.ir.support.utils import minimum, maximum -from devito.ir.support.vector import Vector, vmin, vmax -from devito.tools import (Ordering, Stamp, as_list, as_set, as_tuple, - filter_ordered, flatten, frozendict, is_integer, - toposort) +from devito.ir.support.utils import maximum, minimum +from devito.ir.support.vector import Vector, vmax, vmin +from devito.tools import ( + Ordering, Stamp, as_list, as_set, as_tuple, filter_ordered, flatten, frozendict, + is_integer, toposort +) from devito.types import Dimension, ModuloDimension __all__ = [ diff --git a/devito/ir/support/symregistry.py b/devito/ir/support/symregistry.py index 8149ab22d5..aca49757a6 100644 --- a/devito/ir/support/symregistry.py +++ b/devito/ir/support/symregistry.py @@ -1,6 +1,7 @@ from devito.tools import generator -from devito.types import (DeviceID, NThreads, NThreadsNested, NThreadsNonaffine, - NPThreads, ThreadID) +from devito.types import ( + DeviceID, NPThreads, NThreads, NThreadsNested, NThreadsNonaffine, ThreadID +) __init__ = ['SymbolRegistry'] diff --git a/devito/ir/support/syncs.py b/devito/ir/support/syncs.py index 44dc39cf67..753b8f28fb 100644 --- a/devito/ir/support/syncs.py +++ b/devito/ir/support/syncs.py @@ -7,6 +7,7 @@ from devito.data import FULL from devito.tools import Pickable, as_tuple, filter_ordered, frozendict + from .utils import IMask __all__ = [ diff --git a/devito/ir/support/utils.py b/devito/ir/support/utils.py index 32dd3a9b88..41805e0ba8 100644 --- a/devito/ir/support/utils.py +++ b/devito/ir/support/utils.py @@ -2,11 +2,11 @@ from itertools import product from devito.finite_differences import IndexDerivative -from devito.symbolics import (CallFromPointer, retrieve_indexed, retrieve_terminals, - search) -from devito.tools import DefaultOrderedDict, as_tuple, flatten, filter_sorted, split -from devito.types import (Dimension, DimensionTuple, Indirection, ModuloDimension, - StencilDimension) +from devito.symbolics import CallFromPointer, retrieve_indexed, retrieve_terminals, search +from devito.tools import DefaultOrderedDict, as_tuple, filter_sorted, flatten, split +from devito.types import ( + Dimension, DimensionTuple, Indirection, ModuloDimension, StencilDimension +) __all__ = [ 'AccessMode', diff --git a/devito/mpatches/as_independent.py b/devito/mpatches/as_independent.py index 975d51bcdb..91d5e329b6 100644 --- a/devito/mpatches/as_independent.py +++ b/devito/mpatches/as_independent.py @@ -1,13 +1,12 @@ """Monkeypatch for as_independent required for Devito Derivative. """ -from packaging.version import Version - import sympy +from packaging.version import Version from sympy.core.add import _unevaluated_Add from sympy.core.expr import Expr from sympy.core.mul import _unevaluated_Mul -from sympy.core.symbol import Symbol from sympy.core.singleton import S +from sympy.core.symbol import Symbol """ Copy of upstream sympy methods, without docstrings, comments or typehints diff --git a/devito/mpatches/rationaltools.py b/devito/mpatches/rationaltools.py index 4e4cea1539..93b0d14ed6 100644 --- a/devito/mpatches/rationaltools.py +++ b/devito/mpatches/rationaltools.py @@ -3,7 +3,7 @@ import importlib import sympy -from sympy.core import Basic, Add, sympify +from sympy.core import Add, Basic, sympify from sympy.core.exprtools import gcd_terms from sympy.utilities import public from sympy.utilities.iterables import iterable diff --git a/devito/mpi/distributed.py b/devito/mpi/distributed.py index 3c251d0a22..480609cd2f 100644 --- a/devito/mpi/distributed.py +++ b/devito/mpi/distributed.py @@ -1,24 +1,22 @@ +import atexit from abc import ABC, abstractmethod from ctypes import c_int, c_void_p, sizeof -from itertools import groupby, product from functools import cached_property - +from itertools import groupby, product from math import ceil, pow -from sympy import factorint, Interval - -import atexit import numpy as np from cgen import Struct, Value +from sympy import Interval, factorint -from devito.data import LEFT, CENTER, RIGHT, Decomposition +from devito.data import CENTER, LEFT, RIGHT, Decomposition from devito.parameters import configuration -from devito.tools import (EnrichedTuple, as_tuple, ctypes_to_cstr, filter_ordered, - frozendict) -from devito.types import CompositeObject, Object, Constant +from devito.tools import ( + EnrichedTuple, as_tuple, ctypes_to_cstr, filter_ordered, frozendict +) +from devito.types import CompositeObject, Constant, Object from devito.types.utils import DimensionTuple - # Do not prematurely initialize MPI # This allows launching a Devito program from within another Python program # that has *already* initialized MPI diff --git a/devito/mpi/halo_scheme.py b/devito/mpi/halo_scheme.py index 062b0a35b6..a3b803d4f5 100644 --- a/devito/mpi/halo_scheme.py +++ b/devito/mpi/halo_scheme.py @@ -1,17 +1,19 @@ -from collections import OrderedDict, namedtuple, defaultdict +from collections import OrderedDict, defaultdict, namedtuple +from functools import cached_property from itertools import product from operator import attrgetter -from functools import cached_property -from sympy import Max, Min import sympy +from sympy import Max, Min from devito import configuration -from devito.data import CORE, OWNED, LEFT, CENTER, RIGHT +from devito.data import CENTER, CORE, LEFT, OWNED, RIGHT from devito.ir.support import Forward, Scope from devito.symbolics.manipulation import _uxreplace_registry -from devito.tools import (Reconstructable, Tag, as_tuple, filter_ordered, flatten, - frozendict, is_integer, filter_sorted, EnrichedTuple) +from devito.tools import ( + EnrichedTuple, Reconstructable, Tag, as_tuple, filter_ordered, filter_sorted, flatten, + frozendict, is_integer +) __all__ = ['HaloScheme', 'HaloSchemeEntry', 'HaloSchemeException', 'HaloTouch'] diff --git a/devito/mpi/routines.py b/devito/mpi/routines.py index c646951a12..9fa639a7a1 100644 --- a/devito/mpi/routines.py +++ b/devito/mpi/routines.py @@ -1,25 +1,31 @@ import abc from collections import OrderedDict -from ctypes import POINTER, c_void_p, c_int, sizeof +from ctypes import POINTER, c_int, c_void_p, sizeof from functools import reduce from itertools import product from operator import mul from sympy import Integer -from devito.data import OWNED, HALO, NOPAD, LEFT, CENTER, RIGHT -from devito.ir.equations import DummyEq, OpInc, OpMin, OpMax -from devito.ir.iet import (Call, Callable, Conditional, ElementalFunction, - Expression, ExpressionBundle, AugmentedExpression, - Iteration, List, Prodder, Return, make_efunc, FindNodes, - Transformer, ElementalCall, CommCallable) +from devito.data import CENTER, HALO, LEFT, NOPAD, OWNED, RIGHT +from devito.ir.equations import DummyEq, OpInc, OpMax, OpMin +from devito.ir.iet import ( + AugmentedExpression, Call, Callable, CommCallable, Conditional, ElementalCall, + ElementalFunction, Expression, ExpressionBundle, FindNodes, Iteration, List, Prodder, + Return, Transformer, make_efunc +) from devito.mpi import MPI -from devito.symbolics import (Byref, CondNe, FieldFromPointer, FieldFromComposite, - IndexedPointer, Macro, cast, subs_op_args) -from devito.tools import (as_mapper, dtype_to_mpitype, dtype_len, infer_datasize, - flatten, generator, is_integer) -from devito.types import (Array, Bag, BundleView, Dimension, Eq, Symbol, - LocalObject, CompositeObject, CustomDimension) +from devito.symbolics import ( + Byref, CondNe, FieldFromComposite, FieldFromPointer, IndexedPointer, Macro, cast, + subs_op_args +) +from devito.tools import ( + as_mapper, dtype_len, dtype_to_mpitype, flatten, generator, infer_datasize, is_integer +) +from devito.types import ( + Array, Bag, BundleView, CompositeObject, CustomDimension, Dimension, Eq, LocalObject, + Symbol +) __all__ = ['HaloExchangeBuilder', 'ReductionBuilder', 'mpi_registry'] diff --git a/devito/operations/interpolators.py b/devito/operations/interpolators.py index 22ba35bf9c..9104fb426c 100644 --- a/devito/operations/interpolators.py +++ b/devito/operations/interpolators.py @@ -1,8 +1,8 @@ from abc import ABC, abstractmethod -from functools import wraps, cached_property +from functools import cached_property, wraps -import sympy import numpy as np +import sympy try: from scipy.special import i0 @@ -12,10 +12,11 @@ from devito.finite_differences.differentiable import Mul from devito.finite_differences.elementary import floor from devito.logger import warning -from devito.symbolics import retrieve_function_carriers, retrieve_functions, INT -from devito.tools import as_tuple, flatten, filter_ordered, Pickable, memoized_meth -from devito.types import (ConditionalDimension, Eq, Inc, Evaluable, Symbol, - CustomDimension, SubFunction) +from devito.symbolics import INT, retrieve_function_carriers, retrieve_functions +from devito.tools import Pickable, as_tuple, filter_ordered, flatten, memoized_meth +from devito.types import ( + ConditionalDimension, CustomDimension, Eq, Evaluable, Inc, SubFunction, Symbol +) from devito.types.utils import DimensionTuple __all__ = ['LinearInterpolator', 'PrecomputedInterpolator', 'SincInterpolator'] diff --git a/devito/operations/solve.py b/devito/operations/solve.py index a6d1a55910..6545fc4ead 100644 --- a/devito/operations/solve.py +++ b/devito/operations/solve.py @@ -2,9 +2,9 @@ import sympy -from devito.logger import warning -from devito.finite_differences.differentiable import Add, Mul, EvalDerivative from devito.finite_differences.derivative import Derivative +from devito.finite_differences.differentiable import Add, EvalDerivative, Mul +from devito.logger import warning from devito.tools import as_tuple __all__ = ['linsolve', 'solve'] diff --git a/devito/operator/operator.py b/devito/operator/operator.py index 451f54b313..a0f3d06410 100644 --- a/devito/operator/operator.py +++ b/devito/operator/operator.py @@ -1,47 +1,48 @@ -from collections import OrderedDict, namedtuple -from functools import cached_property import ctypes import shutil -from operator import attrgetter +from collections import OrderedDict, namedtuple +from functools import cached_property from math import ceil +from operator import attrgetter from tempfile import gettempdir -from sympy import sympify -import sympy import numpy as np +import sympy +from sympy import sympify -from devito.arch import (ANYCPU, Device, compiler_registry, platform_registry, - get_visible_devices) +from devito.arch import ( + ANYCPU, Device, compiler_registry, get_visible_devices, platform_registry +) from devito.data import default_allocator -from devito.exceptions import (CompilationError, ExecutionError, InvalidArgument, - InvalidOperator) -from devito.logger import (debug, info, perf, warning, is_log_enabled_for, - switch_log_level) -from devito.ir.equations import LoweredEq, lower_exprs, concretize_subdims +from devito.exceptions import ( + CompilationError, ExecutionError, InvalidArgument, InvalidOperator +) from devito.ir.clusters import ClusterGroup, clusterize -from devito.ir.iet import (Callable, CInterface, EntryFunction, DeviceFunction, - FindSymbols, MetaCall, derive_parameters, iet_build) -from devito.ir.support import AccessMode, SymbolRegistry +from devito.ir.equations import LoweredEq, concretize_subdims, lower_exprs +from devito.ir.iet import ( + Callable, CInterface, DeviceFunction, EntryFunction, FindSymbols, MetaCall, + derive_parameters, iet_build +) from devito.ir.stree import stree_build +from devito.ir.support import AccessMode, SymbolRegistry +from devito.logger import debug, info, is_log_enabled_for, perf, switch_log_level, warning +from devito.mpi import MPI from devito.operator.profiling import create_profile from devito.operator.registry import operator_selector -from devito.mpi import MPI from devito.parameters import configuration from devito.passes import ( - Graph, lower_index_derivatives, generate_implicit, generate_macros, - minimize_symbols, optimize_pows, unevaluate, error_mapper, is_on_device, - lower_dtypes + Graph, error_mapper, generate_implicit, generate_macros, is_on_device, lower_dtypes, + lower_index_derivatives, minimize_symbols, optimize_pows, unevaluate ) from devito.symbolics import estimate_cost, subs_op_args -from devito.tools import (DAG, OrderedSet, Signer, ReducerMap, as_mapper, as_tuple, - flatten, filter_sorted, frozendict, is_integer, - split, timed_pass, timed_region, contains_val, - CacheInstances, MemoryEstimate) -from devito.types import (Buffer, Evaluable, host_layer, device_layer, - disk_layer) +from devito.tools import ( + DAG, CacheInstances, MemoryEstimate, OrderedSet, ReducerMap, Signer, as_mapper, + as_tuple, contains_val, filter_sorted, flatten, frozendict, is_integer, split, + timed_pass, timed_region +) +from devito.types import Buffer, Evaluable, device_layer, disk_layer, host_layer from devito.types.dimension import Thickness - __all__ = ['Operator'] diff --git a/devito/operator/profiling.py b/devito/operator/profiling.py index f973cfacdd..9355b6efe6 100644 --- a/devito/operator/profiling.py +++ b/devito/operator/profiling.py @@ -9,8 +9,9 @@ from sympy import S from devito.arch import get_advisor_path -from devito.ir.iet import (ExpressionBundle, List, TimedList, Section, - Iteration, FindNodes, Transformer) +from devito.ir.iet import ( + ExpressionBundle, FindNodes, Iteration, List, Section, TimedList, Transformer +) from devito.ir.support import IntervalGroup from devito.logger import warning from devito.mpi import MPI diff --git a/devito/parameters.py b/devito/parameters.py index b793af51d6..300ff3c5f7 100644 --- a/devito/parameters.py +++ b/devito/parameters.py @@ -1,7 +1,7 @@ """The parameters dictionary contains global parameter settings.""" +import os from abc import ABC, abstractmethod from collections import OrderedDict -import os from functools import wraps from devito.logger import info, warning diff --git a/devito/passes/clusters/aliases.py b/devito/passes/clusters/aliases.py index 3a332ddfd8..f6ad4c9777 100644 --- a/devito/passes/clusters/aliases.py +++ b/devito/passes/clusters/aliases.py @@ -1,5 +1,5 @@ from collections import Counter, OrderedDict, defaultdict, namedtuple -from functools import singledispatch, cached_property +from functools import cached_property, singledispatch from itertools import groupby import numpy as np @@ -8,19 +8,23 @@ from devito.exceptions import CompilationError from devito.finite_differences import EvalDerivative, IndexDerivative, Weights from devito.ir import ( - SEQUENTIAL, PARALLEL_IF_PVT, SEPARABLE, Forward, IterationSpace, Interval, - Cluster, ClusterGroup, ExprGeometry, Queue, IntervalGroup, LabeledVector, - Vector, normalize_properties, relax_properties, unbounded, minimum, maximum, - extrema, vmax, vmin + PARALLEL_IF_PVT, SEPARABLE, SEQUENTIAL, Cluster, ClusterGroup, ExprGeometry, Forward, + Interval, IntervalGroup, IterationSpace, LabeledVector, Queue, Vector, extrema, + maximum, minimum, normalize_properties, relax_properties, unbounded, vmax, vmin ) from devito.passes.clusters.cse import _cse -from devito.symbolics import (Uxmapper, estimate_cost, search, reuse_if_untouched, - retrieve_functions, uxreplace, sympy_dtype) -from devito.tools import (Stamp, as_mapper, as_tuple, flatten, frozendict, - is_integer, generator, split, timed_pass) -from devito.types import (Eq, Symbol, Temp, TempArray, TempFunction, - ModuloDimension, CustomDimension, IncrDimension, - StencilDimension, Indexed, Hyperplane, Size) +from devito.symbolics import ( + Uxmapper, estimate_cost, retrieve_functions, reuse_if_untouched, search, sympy_dtype, + uxreplace +) +from devito.tools import ( + Stamp, as_mapper, as_tuple, flatten, frozendict, generator, is_integer, split, + timed_pass +) +from devito.types import ( + CustomDimension, Eq, Hyperplane, IncrDimension, Indexed, ModuloDimension, Size, + StencilDimension, Symbol, Temp, TempArray, TempFunction +) from devito.types.grid import MultiSubDimension __all__ = ['cire'] diff --git a/devito/passes/clusters/asynchrony.py b/devito/passes/clusters/asynchrony.py index 557ca7c5c2..8ee7792068 100644 --- a/devito/passes/clusters/asynchrony.py +++ b/devito/passes/clusters/asynchrony.py @@ -2,8 +2,10 @@ from sympy import true -from devito.ir import (Forward, Backward, GuardBoundNext, WaitLock, WithLock, SyncArray, - PrefetchUpdate, ReleaseLock, Queue, normalize_syncs) +from devito.ir import ( + Backward, Forward, GuardBoundNext, PrefetchUpdate, Queue, ReleaseLock, SyncArray, + WaitLock, WithLock, normalize_syncs +) from devito.passes.clusters.utils import in_critical_region, is_memcpy from devito.symbolics import IntDiv, uxreplace from devito.tools import OrderedSet, is_integer, timed_pass diff --git a/devito/passes/clusters/blocking.py b/devito/passes/clusters/blocking.py index 0289d5e23e..08ae0dc6fc 100644 --- a/devito/passes/clusters/blocking.py +++ b/devito/passes/clusters/blocking.py @@ -2,13 +2,16 @@ from devito.finite_differences.differentiable import IndexSum from devito.ir.clusters import Queue -from devito.ir.support import (AFFINE, PARALLEL, PARALLEL_IF_ATOMIC, - PARALLEL_IF_PVT, SKEWABLE, TILABLES, Interval, - IntervalGroup, IterationSpace, Scope) +from devito.ir.support import ( + AFFINE, PARALLEL, PARALLEL_IF_ATOMIC, PARALLEL_IF_PVT, SKEWABLE, TILABLES, Interval, + IntervalGroup, IterationSpace, Scope +) from devito.passes import is_on_device from devito.symbolics import search, uxreplace, xreplace_indices -from devito.tools import (UnboundedMultiTuple, UnboundTuple, as_mapper, as_tuple, - filter_ordered, flatten, is_integer) +from devito.tools import ( + UnboundedMultiTuple, UnboundTuple, as_mapper, as_tuple, filter_ordered, flatten, + is_integer +) from devito.types import BlockDimension __all__ = ['blocking'] diff --git a/devito/passes/clusters/buffering.py b/devito/passes/clusters/buffering.py index b967e9f249..6d96de77bc 100644 --- a/devito/passes/clusters/buffering.py +++ b/devito/passes/clusters/buffering.py @@ -2,18 +2,21 @@ from functools import cached_property from itertools import chain -from sympy import S import numpy as np +from sympy import S -from devito.ir import (Cluster, Backward, Forward, GuardBound, Interval, - IntervalGroup, IterationSpace, Properties, Queue, Vector, - InitArray, lower_exprs, vmax, vmin) from devito.exceptions import CompilationError +from devito.ir import ( + Backward, Cluster, Forward, GuardBound, InitArray, Interval, IntervalGroup, + IterationSpace, Properties, Queue, Vector, lower_exprs, vmax, vmin +) from devito.logger import warning from devito.passes.clusters.utils import is_memcpy from devito.symbolics import IntDiv, retrieve_functions, uxreplace -from devito.tools import (Stamp, as_mapper, as_tuple, filter_ordered, frozendict, - flatten, is_integer, timed_pass) +from devito.tools import ( + Stamp, as_mapper, as_tuple, filter_ordered, flatten, frozendict, is_integer, + timed_pass +) from devito.types import Array, CustomDimension, Eq, ModuloDimension __all__ = ['buffering'] diff --git a/devito/passes/clusters/cse.py b/devito/passes/clusters/cse.py index 509490a2e1..025acdfdee 100644 --- a/devito/passes/clusters/cse.py +++ b/devito/passes/clusters/cse.py @@ -1,9 +1,10 @@ -from collections import defaultdict, Counter +from collections import Counter, defaultdict from functools import cached_property, singledispatch import numpy as np import sympy from sympy import Add, Function, Indexed, Mul, Pow + try: from sympy.core.core import ordering_of_classes except ImportError: @@ -13,9 +14,9 @@ from devito.finite_differences.differentiable import IndexDerivative from devito.ir import Cluster, Scope, cluster_pass from devito.symbolics import estimate_cost, q_leaf, q_terminal -from devito.symbolics.search import search from devito.symbolics.manipulation import _uxreplace -from devito.tools import DAG, as_list, as_tuple, frozendict, extract_dtype +from devito.symbolics.search import search +from devito.tools import DAG, as_list, as_tuple, extract_dtype, frozendict from devito.types import Eq, Symbol, Temp __all__ = ['cse'] diff --git a/devito/passes/clusters/derivatives.py b/devito/passes/clusters/derivatives.py index d244704058..47607ae306 100644 --- a/devito/passes/clusters/derivatives.py +++ b/devito/passes/clusters/derivatives.py @@ -1,7 +1,7 @@ from functools import singledispatch -from sympy import S import numpy as np +from sympy import S from devito.finite_differences import IndexDerivative from devito.ir import Backward, Forward, Interval, IterationSpace, Queue diff --git a/devito/passes/clusters/factorization.py b/devito/passes/clusters/factorization.py index 45d140a253..812dc9c180 100644 --- a/devito/passes/clusters/factorization.py +++ b/devito/passes/clusters/factorization.py @@ -3,8 +3,9 @@ from sympy import Add, Mul, S, collect from devito.ir import cluster_pass -from devito.symbolics import (BasicWrapperMixin, estimate_cost, reuse_if_untouched, - retrieve_symbols, q_routine) +from devito.symbolics import ( + BasicWrapperMixin, estimate_cost, q_routine, retrieve_symbols, reuse_if_untouched +) from devito.tools import ReducerMap from devito.types.object import AbstractObject diff --git a/devito/passes/clusters/implicit.py b/devito/passes/clusters/implicit.py index bc1136f795..af19e1d3c5 100644 --- a/devito/passes/clusters/implicit.py +++ b/devito/passes/clusters/implicit.py @@ -5,7 +5,7 @@ from collections import defaultdict from functools import singledispatch -from devito.ir import SEQUENTIAL, Queue, Forward +from devito.ir import SEQUENTIAL, Forward, Queue from devito.symbolics import retrieve_dimensions from devito.tools import Bunch, frozendict, timed_pass from devito.types import Eq diff --git a/devito/passes/clusters/misc.py b/devito/passes/clusters/misc.py index 46bc8707da..b8ec71d3c2 100644 --- a/devito/passes/clusters/misc.py +++ b/devito/passes/clusters/misc.py @@ -3,8 +3,10 @@ from devito.finite_differences import IndexDerivative from devito.ir.clusters import Cluster, ClusterGroup, Queue, cluster_pass -from devito.ir.support import (SEQUENTIAL, SEPARABLE, Scope, ReleaseLock, WaitLock, - WithLock, InitArray, SyncArray, PrefetchUpdate) +from devito.ir.support import ( + SEPARABLE, SEQUENTIAL, InitArray, PrefetchUpdate, ReleaseLock, Scope, SyncArray, + WaitLock, WithLock +) from devito.passes.clusters.utils import in_critical_region from devito.symbolics import pow_to_mul, search from devito.tools import DAG, Stamp, as_tuple, flatten, frozendict, timed_pass diff --git a/devito/passes/iet/asynchrony.py b/devito/passes/iet/asynchrony.py index 144a140a6f..2ac8d8581d 100644 --- a/devito/passes/iet/asynchrony.py +++ b/devito/passes/iet/asynchrony.py @@ -1,21 +1,21 @@ from collections import namedtuple -from functools import singledispatch from ctypes import c_int +from functools import singledispatch import cgen as c -from devito.ir import (AsyncCall, AsyncCallable, BlankLine, Call, Callable, - Conditional, DummyEq, DummyExpr, While, Increment, Iteration, - List, PointerCast, Return, FindNodes, FindSymbols, - ThreadCallable, EntryFunction, Transformer, make_callable, - maybe_alias) +from devito.ir import ( + AsyncCall, AsyncCallable, BlankLine, Call, Callable, Conditional, DummyEq, DummyExpr, + EntryFunction, FindNodes, FindSymbols, Increment, Iteration, List, PointerCast, + Return, ThreadCallable, Transformer, While, make_callable, maybe_alias +) from devito.passes.iet.definitions import DataManager from devito.passes.iet.engine import iet_pass -from devito.symbolics import (CondEq, CondNe, FieldFromComposite, FieldFromPointer, - Null) +from devito.symbolics import CondEq, CondNe, FieldFromComposite, FieldFromPointer, Null from devito.tools import split -from devito.types import (Lock, Pointer, PThreadArray, QueueID, SharedData, Temp, - VolatileInt) +from devito.types import ( + Lock, Pointer, PThreadArray, QueueID, SharedData, Temp, VolatileInt +) __all__ = ['pthreadify'] diff --git a/devito/passes/iet/definitions.py b/devito/passes/iet/definitions.py index 978c093eed..0a5416d6a2 100644 --- a/devito/passes/iet/definitions.py +++ b/devito/passes/iet/definitions.py @@ -11,20 +11,20 @@ import numpy as np from devito.ir import ( - Block, Call, Definition, DummyExpr, Iteration, List, Return, EntryFunction, - FindNodes, FindSymbols, MapExprStmts, Transformer, make_callable + Block, Call, Definition, DummyExpr, EntryFunction, FindNodes, FindSymbols, Iteration, + List, MapExprStmts, Return, Transformer, make_callable ) from devito.passes import is_gpu_create from devito.passes.iet.engine import iet_pass from devito.passes.iet.langbase import LangBB from devito.symbolics import ( - Byref, DefFunction, FieldFromPointer, IndexedPointer, ListInitializer, - SizeOf, VOID, pow_to_mul, unevaluate, as_long + VOID, Byref, DefFunction, FieldFromPointer, IndexedPointer, ListInitializer, SizeOf, + as_long, pow_to_mul, unevaluate ) -from devito.tools import as_mapper, as_list, as_tuple, filter_sorted, flatten +from devito.tools import as_list, as_mapper, as_tuple, filter_sorted, flatten from devito.types import ( - Array, ComponentAccess, CustomDimension, Dimension, DeviceMap, DeviceRM, - Eq, Symbol, size_t + Array, ComponentAccess, CustomDimension, DeviceMap, DeviceRM, Dimension, Eq, Symbol, + size_t ) __all__ = ['DataManager', 'DeviceAwareDataManager', 'Storage'] diff --git a/devito/passes/iet/engine.py b/devito/passes/iet/engine.py index 724ccf5c84..3fa8f3249f 100644 --- a/devito/passes/iet/engine.py +++ b/devito/passes/iet/engine.py @@ -5,21 +5,20 @@ from sympy import Mul from devito.ir.iet import ( - Call, ExprStmt, Expression, Iteration, SyncSpot, AsyncCallable, FindNodes, - FindSymbols, MapNodes, MetaCall, Transformer, EntryFunction, ThreadCallable, - Uxreplace, derive_parameters + AsyncCallable, Call, EntryFunction, Expression, ExprStmt, FindNodes, FindSymbols, + Iteration, MapNodes, MetaCall, SyncSpot, ThreadCallable, Transformer, Uxreplace, + derive_parameters ) from devito.ir.support import SymbolRegistry from devito.mpi.distributed import MPINeighborhood -from devito.mpi.routines import Gather, Scatter, HaloUpdate, HaloWait, MPIMsg +from devito.mpi.routines import Gather, HaloUpdate, HaloWait, MPIMsg, Scatter from devito.passes import needs_transfer -from devito.symbolics import (FieldFromComposite, FieldFromPointer, IndexedPointer, - search) +from devito.symbolics import FieldFromComposite, FieldFromPointer, IndexedPointer, search from devito.tools import DAG, as_tuple, filter_ordered, sorted_priority, timed_pass from devito.types import ( - Array, Bundle, ComponentAccess, CompositeObject, Lock, IncrDimension, - ModuloDimension, Indirection, Pointer, SharedData, ThreadArray, Symbol, Temp, - NPThreads, NThreadsBase, Wildcard + Array, Bundle, ComponentAccess, CompositeObject, IncrDimension, Indirection, Lock, + ModuloDimension, NPThreads, NThreadsBase, Pointer, SharedData, Symbol, Temp, + ThreadArray, Wildcard ) from devito.types.args import ArgProvider from devito.types.dense import DiscreteFunction diff --git a/devito/passes/iet/errors.py b/devito/passes/iet/errors.py index 8082ae53cf..d4ac012d31 100644 --- a/devito/passes/iet/errors.py +++ b/devito/passes/iet/errors.py @@ -2,9 +2,10 @@ import numpy as np from sympy import Expr, Not, S -from devito.ir.iet import (Call, Conditional, DummyExpr, EntryFunction, Iteration, - List, Break, Return, FindNodes, FindSymbols, Transformer, - make_callable) +from devito.ir.iet import ( + Break, Call, Conditional, DummyExpr, EntryFunction, FindNodes, FindSymbols, Iteration, + List, Return, Transformer, make_callable +) from devito.passes.iet.engine import iet_pass from devito.symbolics import CondEq, MathFunction from devito.tools import dtype_to_ctype diff --git a/devito/passes/iet/instrument.py b/devito/passes/iet/instrument.py index 5ad934d13f..7e0e49d2cb 100644 --- a/devito/passes/iet/instrument.py +++ b/devito/passes/iet/instrument.py @@ -1,10 +1,12 @@ from itertools import groupby -from devito.ir.iet import (BusyWait, Iteration, Section, TimedList, - FindNodes, FindSymbols, MapNodes, Transformer) -from devito.mpi.routines import (HaloUpdateCall, HaloWaitCall, MPICall, MPIList, - HaloUpdateList, HaloWaitList, RemainderCall, - ComputeCall) +from devito.ir.iet import ( + BusyWait, FindNodes, FindSymbols, Iteration, MapNodes, Section, TimedList, Transformer +) +from devito.mpi.routines import ( + ComputeCall, HaloUpdateCall, HaloUpdateList, HaloWaitCall, HaloWaitList, MPICall, + MPIList, RemainderCall +) from devito.passes.iet.engine import iet_pass from devito.types import TempArray, TempFunction, Timer diff --git a/devito/passes/iet/langbase.py b/devito/passes/iet/langbase.py index a99fcdcb41..808cdba3f0 100644 --- a/devito/passes/iet/langbase.py +++ b/devito/passes/iet/langbase.py @@ -1,19 +1,21 @@ +from abc import ABC from functools import singledispatch from itertools import takewhile -from abc import ABC import cgen as c from devito.data import FULL -from devito.ir import (DummyExpr, Call, Conditional, Expression, List, Prodder, - ParallelIteration, ParallelBlock, PointerCast, EntryFunction, - AsyncCallable, FindNodes, FindSymbols, IsPerfectIteration) +from devito.ir import ( + AsyncCallable, Call, Conditional, DummyExpr, EntryFunction, Expression, FindNodes, + FindSymbols, IsPerfectIteration, List, ParallelBlock, ParallelIteration, PointerCast, + Prodder +) from devito.mpi.distributed import MPICommObject from devito.passes import is_on_device from devito.passes.iet.engine import iet_pass from devito.symbolics import Byref, CondNe, SizeOf from devito.tools import as_list, is_integer, prod -from devito.types import Symbol, QueueID, Wildcard +from devito.types import QueueID, Symbol, Wildcard __all__ = ['LangBB', 'LangTransformer'] diff --git a/devito/passes/iet/languages/C.py b/devito/passes/iet/languages/C.py index 93451c1861..0e04d0feeb 100644 --- a/devito/passes/iet/languages/C.py +++ b/devito/passes/iet/languages/C.py @@ -1,11 +1,11 @@ import numpy as np from sympy.printing.c import C99CodePrinter -from devito.ir import Call, BasePrinter +from devito.ir import BasePrinter, Call from devito.passes.iet.definitions import DataManager -from devito.passes.iet.orchestration import Orchestrator from devito.passes.iet.langbase import LangBB from devito.passes.iet.languages.utils import _atomic_add_split +from devito.passes.iet.orchestration import Orchestrator from devito.symbolics import c_complex, c_double_complex from devito.symbolics.extended_sympy import UnaryOp from devito.tools import dtype_to_cstr diff --git a/devito/passes/iet/languages/CXX.py b/devito/passes/iet/languages/CXX.py index 0157cbd30f..5453ea58d6 100644 --- a/devito/passes/iet/languages/CXX.py +++ b/devito/passes/iet/languages/CXX.py @@ -1,12 +1,12 @@ import numpy as np from sympy.printing.cxx import CXX11CodePrinter -from devito.ir import Call, UsingNamespace, BasePrinter +from devito.ir import BasePrinter, Call, UsingNamespace from devito.passes.iet.definitions import DataManager -from devito.passes.iet.orchestration import Orchestrator from devito.passes.iet.langbase import LangBB from devito.passes.iet.languages.utils import _atomic_add_split -from devito.symbolics import c_complex, c_double_complex, IndexedPointer, cast, Byref +from devito.passes.iet.orchestration import Orchestrator +from devito.symbolics import Byref, IndexedPointer, c_complex, c_double_complex, cast from devito.tools import dtype_to_cstr __all__ = ['CXXBB', 'CXXDataManager', 'CXXOrchestrator'] diff --git a/devito/passes/iet/languages/openacc.py b/devito/passes/iet/languages/openacc.py index 706b45bb17..c04a81fccd 100644 --- a/devito/passes/iet/languages/openacc.py +++ b/devito/passes/iet/languages/openacc.py @@ -1,18 +1,21 @@ import numpy as np from devito.arch import AMDGPUX, NVIDIAX -from devito.ir import (Call, DeviceCall, DummyExpr, EntryFunction, List, Block, - ParallelTree, Pragma, Return, FindSymbols, make_callable) -from devito.passes import needs_transfer, is_on_device +from devito.ir import ( + Block, Call, DeviceCall, DummyExpr, EntryFunction, FindSymbols, List, ParallelTree, + Pragma, Return, make_callable +) +from devito.passes import is_on_device, needs_transfer from devito.passes.iet.definitions import DeviceAwareDataManager from devito.passes.iet.engine import iet_pass -from devito.passes.iet.orchestration import Orchestrator -from devito.passes.iet.parpragma import (PragmaDeviceAwareTransformer, PragmaLangBB, - PragmaIteration, PragmaTransfer) from devito.passes.iet.languages.CXX import CXXBB, CXXPrinter -from devito.passes.iet.languages.openmp import OmpRegion, OmpIteration +from devito.passes.iet.languages.openmp import OmpIteration, OmpRegion +from devito.passes.iet.orchestration import Orchestrator +from devito.passes.iet.parpragma import ( + PragmaDeviceAwareTransformer, PragmaIteration, PragmaLangBB, PragmaTransfer +) from devito.symbolics import FieldFromPointer, Macro, cast -from devito.tools import filter_ordered, UnboundTuple +from devito.tools import UnboundTuple, filter_ordered from devito.types import Symbol __all__ = ['AccOrchestrator', 'DeviceAccDataManager', 'DeviceAccizer'] diff --git a/devito/passes/iet/languages/openmp.py b/devito/passes/iet/languages/openmp.py index c12de9a617..e7027ac272 100644 --- a/devito/passes/iet/languages/openmp.py +++ b/devito/passes/iet/languages/openmp.py @@ -1,22 +1,27 @@ from functools import cached_property -from packaging.version import Version import cgen as c +from packaging.version import Version from sympy import And, Ne, Not -from devito.arch import AMDGPUX, NVIDIAX, INTELGPUX, PVC -from devito.arch.compiler import GNUCompiler, NvidiaCompiler, CustomCompiler -from devito.ir import (Call, Conditional, DeviceCall, List, Pragma, Prodder, - ParallelBlock, PointerCast, While, FindSymbols) +from devito.arch import AMDGPUX, INTELGPUX, NVIDIAX, PVC +from devito.arch.compiler import CustomCompiler, GNUCompiler, NvidiaCompiler +from devito.ir import ( + Call, Conditional, DeviceCall, FindSymbols, List, ParallelBlock, PointerCast, Pragma, + Prodder, While +) from devito.passes.iet.definitions import DataManager, DeviceAwareDataManager from devito.passes.iet.langbase import LangBB -from devito.passes.iet.orchestration import Orchestrator -from devito.passes.iet.parpragma import (PragmaSimdTransformer, PragmaShmTransformer, - PragmaDeviceAwareTransformer, PragmaLangBB, - PragmaIteration, PragmaTransfer) +from devito.passes.iet.languages.C import CBB +from devito.passes.iet.languages.C import atomic_add as c_atomic_add +from devito.passes.iet.languages.CXX import CXXBB +from devito.passes.iet.languages.CXX import atomic_add as cxx_atomic_add from devito.passes.iet.languages.utils import joins -from devito.passes.iet.languages.C import CBB, atomic_add as c_atomic_add -from devito.passes.iet.languages.CXX import CXXBB, atomic_add as cxx_atomic_add +from devito.passes.iet.orchestration import Orchestrator +from devito.passes.iet.parpragma import ( + PragmaDeviceAwareTransformer, PragmaIteration, PragmaLangBB, PragmaShmTransformer, + PragmaSimdTransformer, PragmaTransfer +) from devito.symbolics import CondEq, DefFunction from devito.tools import filter_ordered diff --git a/devito/passes/iet/languages/targets.py b/devito/passes/iet/languages/targets.py index 2993a5b3e6..c998b9315a 100644 --- a/devito/passes/iet/languages/targets.py +++ b/devito/passes/iet/languages/targets.py @@ -1,13 +1,14 @@ +from devito.passes.iet.instrument import instrument from devito.passes.iet.languages.C import CDataManager, COrchestrator, CPrinter from devito.passes.iet.languages.CXX import CXXDataManager, CXXOrchestrator, CXXPrinter -from devito.passes.iet.languages.openmp import (SimdOmpizer, Ompizer, DeviceOmpizer, - OmpDataManager, DeviceOmpDataManager, - OmpOrchestrator, DeviceOmpOrchestrator, - CXXSimdOmpizer, CXXOmpizer, - CXXOmpDataManager, CXXOmpOrchestrator) -from devito.passes.iet.languages.openacc import (DeviceAccizer, DeviceAccDataManager, - AccOrchestrator, AccPrinter) -from devito.passes.iet.instrument import instrument +from devito.passes.iet.languages.openacc import ( + AccOrchestrator, AccPrinter, DeviceAccDataManager, DeviceAccizer +) +from devito.passes.iet.languages.openmp import ( + CXXOmpDataManager, CXXOmpizer, CXXOmpOrchestrator, CXXSimdOmpizer, + DeviceOmpDataManager, DeviceOmpizer, DeviceOmpOrchestrator, OmpDataManager, Ompizer, + OmpOrchestrator, SimdOmpizer +) __all__ = [ 'COmpTarget', diff --git a/devito/passes/iet/languages/utils.py b/devito/passes/iet/languages/utils.py index e22e345791..186017c4d6 100644 --- a/devito/passes/iet/languages/utils.py +++ b/devito/passes/iet/languages/utils.py @@ -1,6 +1,6 @@ import numpy as np -from devito import Real, Imag +from devito import Imag, Real from devito.exceptions import InvalidOperator from devito.ir import List diff --git a/devito/passes/iet/linearization.py b/devito/passes/iet/linearization.py index 844959b0b5..c75b2334ce 100644 --- a/devito/passes/iet/linearization.py +++ b/devito/passes/iet/linearization.py @@ -3,13 +3,14 @@ import numpy as np from devito.data import FULL -from devito.ir import (BlankLine, Call, DummyExpr, Dereference, List, PointerCast, - Transfer, FindNodes, FindSymbols, Transformer, Uxreplace, - IMask) +from devito.ir import ( + BlankLine, Call, Dereference, DummyExpr, FindNodes, FindSymbols, IMask, List, + PointerCast, Transfer, Transformer, Uxreplace +) from devito.passes.iet.engine import iet_pass from devito.passes.iet.parpragma import PragmaIteration from devito.tools import filter_ordered, flatten, prod -from devito.types import Array, Bundle, Symbol, FIndexed, Wildcard +from devito.types import Array, Bundle, FIndexed, Symbol, Wildcard from devito.types.dense import DiscreteFunction __all__ = ['linearize'] diff --git a/devito/passes/iet/misc.py b/devito/passes/iet/misc.py index dd1bf2814b..3b397f350e 100644 --- a/devito/passes/iet/misc.py +++ b/devito/passes/iet/misc.py @@ -6,15 +6,16 @@ from devito.finite_differences import Max, Min from devito.finite_differences.differentiable import SafeInv -from devito.ir import (Any, Forward, DummyExpr, Iteration, EmptyList, Prodder, - FindApplications, FindNodes, FindSymbols, Transformer, - Uxreplace, filter_iterations, retrieve_iteration_tree, - pull_dims) +from devito.ir import ( + Any, DummyExpr, EmptyList, FindApplications, FindNodes, FindSymbols, Forward, + Iteration, Prodder, Transformer, Uxreplace, filter_iterations, pull_dims, + retrieve_iteration_tree +) +from devito.ir.iet.efunc import DeviceFunction, EntryFunction from devito.passes.iet.engine import iet_pass from devito.passes.iet.languages.C import CPrinter -from devito.ir.iet.efunc import DeviceFunction, EntryFunction -from devito.symbolics import (ValueLimit, evalrel, has_integer_args, limits_mapper, Cast) -from devito.tools import Bunch, as_mapper, filter_ordered, split, as_tuple +from devito.symbolics import Cast, ValueLimit, evalrel, has_integer_args, limits_mapper +from devito.tools import Bunch, as_mapper, as_tuple, filter_ordered, split from devito.types import FIndexed __all__ = [ diff --git a/devito/passes/iet/mpi.py b/devito/passes/iet/mpi.py index 6fe6eaa2c1..6830723314 100644 --- a/devito/passes/iet/mpi.py +++ b/devito/passes/iet/mpi.py @@ -1,11 +1,12 @@ from collections import defaultdict +from itertools import combinations from sympy import S -from itertools import combinations -from devito.ir.iet import (Call, Expression, HaloSpot, Iteration, FindNodes, - FindWithin, MapNodes, MapHaloSpots, Transformer, - retrieve_iteration_tree) +from devito.ir.iet import ( + Call, Expression, FindNodes, FindWithin, HaloSpot, Iteration, MapHaloSpots, MapNodes, + Transformer, retrieve_iteration_tree +) from devito.ir.support import PARALLEL, Scope from devito.mpi.reduction_scheme import DistReduce from devito.mpi.routines import HaloExchangeBuilder, ReductionBuilder diff --git a/devito/passes/iet/orchestration.py b/devito/passes/iet/orchestration.py index cd3cbf17b3..b2bc5b0caf 100644 --- a/devito/passes/iet/orchestration.py +++ b/devito/passes/iet/orchestration.py @@ -4,11 +4,13 @@ from sympy import Or from devito.exceptions import CompilationError -from devito.ir.iet import (Call, Callable, List, SyncSpot, FindNodes, Transformer, - BlankLine, BusyWait, DummyExpr, AsyncCall, AsyncCallable, - make_callable, derive_parameters) -from devito.ir.support import (WaitLock, WithLock, ReleaseLock, InitArray, - SyncArray, PrefetchUpdate, SnapOut, SnapIn) +from devito.ir.iet import ( + AsyncCall, AsyncCallable, BlankLine, BusyWait, Call, Callable, DummyExpr, FindNodes, + List, SyncSpot, Transformer, derive_parameters, make_callable +) +from devito.ir.support import ( + InitArray, PrefetchUpdate, ReleaseLock, SnapIn, SnapOut, SyncArray, WaitLock, WithLock +) from devito.passes.iet.engine import iet_pass from devito.passes.iet.langbase import LangBB from devito.symbolics import CondEq, CondNe diff --git a/devito/passes/iet/parpragma.py b/devito/passes/iet/parpragma.py index 880d819e2e..670d85a9a4 100644 --- a/devito/passes/iet/parpragma.py +++ b/devito/passes/iet/parpragma.py @@ -1,19 +1,21 @@ from collections import defaultdict from functools import cached_property -import numpy as np import cgen as c +import numpy as np from sympy import And, Max, true from devito.data import FULL -from devito.ir import (Conditional, DummyEq, Dereference, Expression, - ExpressionBundle, FindSymbols, FindNodes, ParallelIteration, - ParallelTree, Pragma, Prodder, Transfer, List, Transformer, - IsPerfectIteration, OpInc, filter_iterations, ccode, - retrieve_iteration_tree, IMask, VECTORIZED) +from devito.ir import ( + VECTORIZED, Conditional, Dereference, DummyEq, Expression, ExpressionBundle, + FindNodes, FindSymbols, IMask, IsPerfectIteration, List, OpInc, ParallelIteration, + ParallelTree, Pragma, Prodder, Transfer, Transformer, ccode, filter_iterations, + retrieve_iteration_tree +) from devito.passes.iet.engine import iet_pass -from devito.passes.iet.langbase import (LangBB, LangTransformer, DeviceAwareMixin, - ShmTransformer, make_sections_from_imask) +from devito.passes.iet.langbase import ( + DeviceAwareMixin, LangBB, LangTransformer, ShmTransformer, make_sections_from_imask +) from devito.symbolics import INT from devito.tools import as_tuple, flatten, is_integer, prod from devito.types import Symbol diff --git a/devito/symbolics/extended_dtypes.py b/devito/symbolics/extended_dtypes.py index eda71a0b74..1bb5fbb91b 100644 --- a/devito/symbolics/extended_dtypes.py +++ b/devito/symbolics/extended_dtypes.py @@ -1,9 +1,12 @@ import ctypes + import numpy as np -from devito.symbolics.extended_sympy import ReservedWord, Cast, ValueLimit -from devito.tools import (Bunch, float2, float3, float4, double2, double3, double4, # noqa - int2, int3, int4, ctypes_vector_mapper) +from devito.symbolics.extended_sympy import Cast, ReservedWord, ValueLimit +from devito.tools import ( # noqa + Bunch, ctypes_vector_mapper, double2, double3, double4, float2, float3, float4, int2, + int3, int4 +) from devito.tools.dtypes_lowering import dtype_mapper __all__ = ['cast', 'CustomType', 'limits_mapper', 'INT', 'FLOAT', 'BaseCast', # noqa diff --git a/devito/symbolics/extended_sympy.py b/devito/symbolics/extended_sympy.py index 3a6e61742c..d47a063bac 100644 --- a/devito/symbolics/extended_sympy.py +++ b/devito/symbolics/extended_sympy.py @@ -9,11 +9,11 @@ from sympy.core.decorators import call_highest_priority from sympy.logic.boolalg import BooleanFunction -from devito.finite_differences.elementary import Min, Max -from devito.tools import (Pickable, Bunch, as_tuple, is_integer, float2, # noqa - float3, float4, double2, double3, double4, int2, int3, - int4, dtype_to_ctype, ctypes_to_cstr, ctypes_vector_mapper, - ctypes_to_cstr) +from devito.finite_differences.elementary import Max, Min +from devito.tools import ( # noqa + Bunch, Pickable, as_tuple, ctypes_to_cstr, ctypes_vector_mapper, double2, double3, + double4, dtype_to_ctype, float2, float3, float4, int2, int3, int4, is_integer +) from devito.types import Symbol from devito.types.basic import Basic diff --git a/devito/symbolics/inspection.py b/devito/symbolics/inspection.py index 85b7b8bfa9..1a4a8babca 100644 --- a/devito/symbolics/inspection.py +++ b/devito/symbolics/inspection.py @@ -1,19 +1,19 @@ from functools import singledispatch import numpy as np -from sympy import (Function, Indexed, Integer, Mul, Number, - Pow, S, Symbol, Tuple) -from sympy.core.numbers import ImaginaryUnit +from sympy import Function, Indexed, Integer, Mul, Number, Pow, S, Symbol, Tuple from sympy.core.function import Application +from sympy.core.numbers import ImaginaryUnit from devito.finite_differences import Derivative from devito.finite_differences.differentiable import IndexDerivative from devito.logger import warning from devito.symbolics.extended_dtypes import INT -from devito.symbolics.extended_sympy import (CallFromPointer, Cast, - DefFunction, ReservedWord) +from devito.symbolics.extended_sympy import ( + CallFromPointer, Cast, DefFunction, ReservedWord +) from devito.symbolics.queries import q_routine -from devito.tools import as_tuple, prod, is_integer +from devito.tools import as_tuple, is_integer, prod from devito.tools.dtypes_lowering import infer_dtype __all__ = ['compare_ops', 'estimate_cost', 'has_integer_args', 'sympy_dtype'] diff --git a/devito/symbolics/manipulation.py b/devito/symbolics/manipulation.py index 70a4f49cd0..23dbf2b764 100644 --- a/devito/symbolics/manipulation.py +++ b/devito/symbolics/manipulation.py @@ -3,27 +3,24 @@ from functools import singledispatch import numpy as np -from sympy import Pow, Add, Mul, Min, Max, S, SympifyError, Tuple, sympify +from sympy import Add, Max, Min, Mul, Pow, S, SympifyError, Tuple, sympify from sympy.core.add import _addsort from sympy.core.mul import _mulsort -from devito.finite_differences.differentiable import ( - EvalDerivative, IndexDerivative -) -from devito.symbolics.extended_sympy import DefFunction, rfunc +from devito.finite_differences.differentiable import EvalDerivative, IndexDerivative from devito.symbolics.extended_dtypes import LONG +from devito.symbolics.extended_sympy import DefFunction, rfunc from devito.symbolics.queries import q_leaf -from devito.symbolics.search import ( - retrieve_indexed, retrieve_functions, retrieve_symbols -) -from devito.symbolics.unevaluation import ( - Add as UnevalAdd, Mul as UnevalMul, Pow as UnevalPow, UnevaluableMixin -) +from devito.symbolics.search import retrieve_functions, retrieve_indexed, retrieve_symbols +from devito.symbolics.unevaluation import Add as UnevalAdd +from devito.symbolics.unevaluation import Mul as UnevalMul +from devito.symbolics.unevaluation import Pow as UnevalPow +from devito.symbolics.unevaluation import UnevaluableMixin from devito.tools import as_list, as_tuple, flatten, split, transitive_closure -from devito.types.basic import Basic, Indexed from devito.types.array import ComponentAccess +from devito.types.basic import Basic, Indexed from devito.types.equation import Eq -from devito.types.relational import Le, Lt, Gt, Ge +from devito.types.relational import Ge, Gt, Le, Lt __all__ = [ 'Uxmapper', diff --git a/devito/symbolics/queries.py b/devito/symbolics/queries.py index dec4728254..76cb737294 100644 --- a/devito/symbolics/queries.py +++ b/devito/symbolics/queries.py @@ -1,15 +1,15 @@ from sympy import Eq, IndexedBase, Mod, S, diff, nan -from devito.symbolics.extended_sympy import (FieldFromComposite, FieldFromPointer, - IndexedPointer, IntDiv) +from devito.symbolics.extended_sympy import ( + FieldFromComposite, FieldFromPointer, IndexedPointer, IntDiv +) from devito.tools import as_tuple, is_integer +from devito.types.array import ComponentAccess from devito.types.basic import AbstractFunction from devito.types.constant import Constant from devito.types.dimension import Dimension -from devito.types.array import ComponentAccess from devito.types.object import AbstractObject - __all__ = [ 'q_affine', 'q_comp_acc', diff --git a/devito/symbolics/search.py b/devito/symbolics/search.py index 94072520ef..500ffd25ed 100644 --- a/devito/symbolics/search.py +++ b/devito/symbolics/search.py @@ -5,8 +5,9 @@ import numpy as np import sympy -from devito.symbolics.queries import (q_indexed, q_function, q_terminal, q_leaf, - q_symbol, q_dimension, q_derivative) +from devito.symbolics.queries import ( + q_derivative, q_dimension, q_function, q_indexed, q_leaf, q_symbol, q_terminal +) from devito.tools import as_tuple __all__ = [ diff --git a/devito/tools/abc.py b/devito/tools/abc.py index 4fe295ee07..f325778239 100644 --- a/devito/tools/abc.py +++ b/devito/tools/abc.py @@ -1,7 +1,6 @@ import abc from hashlib import sha1 - __all__ = ['Pickable', 'Reconstructable', 'Signer', 'Singleton', 'Stamp', 'Tag'] diff --git a/devito/tools/data_structures.py b/devito/tools/data_structures.py index 1e7e46ad54..48a7a342a7 100644 --- a/devito/tools/data_structures.py +++ b/devito/tools/data_structures.py @@ -1,14 +1,14 @@ -from collections import OrderedDict, deque -from collections.abc import Callable, Iterable, MutableSet, Mapping, Set -from functools import reduce, cached_property import json +from collections import OrderedDict, deque +from collections.abc import Callable, Iterable, Mapping, MutableSet, Set +from functools import cached_property, reduce import numpy as np from multidict import MultiDict from devito.tools import Pickable -from devito.tools.utils import as_tuple, filter_ordered, humanbytes from devito.tools.algorithms import toposort +from devito.tools.utils import as_tuple, filter_ordered, humanbytes __all__ = [ 'DAG', diff --git a/devito/tools/utils.py b/devito/tools/utils.py index 51adefd158..8b8cc2a72e 100644 --- a/devito/tools/utils.py +++ b/devito/tools/utils.py @@ -1,9 +1,9 @@ +import types from collections import OrderedDict from collections.abc import Iterable from functools import reduce, wraps from itertools import chain, combinations, groupby, product, zip_longest from operator import attrgetter, mul -import types import numpy as np import sympy diff --git a/devito/types/array.py b/devito/types/array.py index 845eef1976..6f2914756b 100644 --- a/devito/types/array.py +++ b/devito/types/array.py @@ -1,11 +1,13 @@ -from ctypes import POINTER, Structure, c_void_p, c_int, c_uint64 +from ctypes import POINTER, Structure, c_int, c_uint64, c_void_p from functools import cached_property import numpy as np from sympy import Expr, cacheit -from devito.tools import (Pickable, as_tuple, c_restrict_void_p, - dtype_to_ctype, dtypes_vector_mapper, is_integer) +from devito.tools import ( + Pickable, as_tuple, c_restrict_void_p, dtype_to_ctype, dtypes_vector_mapper, + is_integer +) from devito.types.basic import AbstractFunction, LocalType from devito.types.utils import CtypesFactory, DimensionTuple diff --git a/devito/types/basic.py b/devito/types/basic.py index b49d030031..a80adcf20e 100644 --- a/devito/types/basic.py +++ b/devito/types/basic.py @@ -1,19 +1,20 @@ import abc import inspect -from ctypes import POINTER, _Pointer, c_char_p, c_char, Structure -from functools import reduce, cached_property +from ctypes import POINTER, Structure, _Pointer, c_char, c_char_p +from functools import cached_property, reduce from operator import mul import numpy as np import sympy - from sympy.core.assumptions import _assume_rules from sympy.core.decorators import call_highest_priority from devito.data import default_allocator from devito.parameters import configuration -from devito.tools import (Pickable, as_tuple, dtype_to_ctype, - frozendict, memoized_meth, sympy_mutex, CustomDtype) +from devito.tools import ( + CustomDtype, Pickable, as_tuple, dtype_to_ctype, frozendict, memoized_meth, + sympy_mutex +) from devito.types.args import ArgProvider from devito.types.caching import Cached, Uncached from devito.types.lazy import Evaluable diff --git a/devito/types/caching.py b/devito/types/caching.py index a1e359bd4d..3c7814021e 100644 --- a/devito/types/caching.py +++ b/devito/types/caching.py @@ -6,7 +6,6 @@ from devito.tools import safe_dict_copy - __all__ = ['CacheManager', 'Cached', 'Uncached', '_SymbolCache'] _SymbolCache = {} diff --git a/devito/types/dense.py b/devito/types/dense.py index eef4682c65..ffabf59d4f 100644 --- a/devito/types/dense.py +++ b/devito/types/dense.py @@ -1,33 +1,34 @@ from collections import namedtuple -from ctypes import POINTER, Structure, c_int, c_ulong, c_void_p, cast, byref -from functools import wraps, reduce +from ctypes import POINTER, Structure, byref, c_int, c_ulong, c_void_p, cast +from functools import cached_property, reduce, wraps from operator import mul import numpy as np import sympy -from functools import cached_property from devito.builtins import assign -from devito.data import (DOMAIN, OWNED, HALO, NOPAD, FULL, LEFT, CENTER, RIGHT, - Data, default_allocator) +from devito.data import ( + CENTER, DOMAIN, FULL, HALO, LEFT, NOPAD, OWNED, RIGHT, Data, default_allocator +) from devito.data.allocators import DataReference from devito.deprecations import deprecations from devito.exceptions import InvalidArgument +from devito.finite_differences import Differentiable, generate_fd_shortcuts +from devito.finite_differences.tools import fd_weights_registry from devito.logger import debug, warning from devito.mpi import MPI from devito.parameters import configuration -from devito.symbolics import FieldFromPointer, normalize_args, IndexedPointer -from devito.finite_differences import Differentiable, generate_fd_shortcuts -from devito.finite_differences.tools import fd_weights_registry -from devito.tools import (ReducerMap, as_tuple, c_restrict_void_p, flatten, - is_integer, memoized_meth, dtype_to_ctype, humanbytes, - mpi4py_mapper) -from devito.types.dimension import Dimension +from devito.symbolics import FieldFromPointer, IndexedPointer, normalize_args +from devito.tools import ( + ReducerMap, as_tuple, c_restrict_void_p, dtype_to_ctype, flatten, humanbytes, + is_integer, memoized_meth, mpi4py_mapper +) from devito.types.args import ArgProvider -from devito.types.caching import CacheManager from devito.types.basic import AbstractFunction +from devito.types.caching import CacheManager +from devito.types.dimension import Dimension from devito.types.utils import ( - Buffer, DimensionTuple, NODE, CELL, Size, Staggering, host_layer + CELL, NODE, Buffer, DimensionTuple, Size, Staggering, host_layer ) __all__ = ['Function', 'SubFunction', 'TempFunction', 'TimeFunction'] diff --git a/devito/types/dimension.py b/devito/types/dimension.py index d83efece3c..0261b33245 100644 --- a/devito/types/dimension.py +++ b/devito/types/dimension.py @@ -1,10 +1,10 @@ +import math from collections import namedtuple from functools import cached_property -import math +import numpy as np import sympy from sympy.core.decorators import call_highest_priority -import numpy as np from devito.data import LEFT, RIGHT from devito.deprecations import deprecations @@ -12,10 +12,9 @@ from devito.logger import debug from devito.tools import Pickable, is_integer, is_number, memoized_meth from devito.types.args import ArgProvider -from devito.types.basic import Symbol, DataSymbol, Scalar +from devito.types.basic import DataSymbol, Scalar, Symbol from devito.types.constant import Constant -from devito.types.relational import relational_min, relational_max - +from devito.types.relational import relational_max, relational_min __all__ = [ 'BlockDimension', diff --git a/devito/types/equation.py b/devito/types/equation.py index b3353e2cb4..b1c918978d 100644 --- a/devito/types/equation.py +++ b/devito/types/equation.py @@ -1,10 +1,10 @@ """User API to specify equations.""" -import sympy - from functools import cached_property +import sympy + from devito.deprecations import deprecations -from devito.tools import as_tuple, frozendict, Pickable +from devito.tools import Pickable, as_tuple, frozendict from devito.types.lazy import Evaluable __all__ = ['Eq', 'Inc', 'ReduceMax', 'ReduceMin'] diff --git a/devito/types/grid.py b/devito/types/grid.py index 005d92dded..f3e6c6b90b 100644 --- a/devito/types/grid.py +++ b/devito/types/grid.py @@ -7,18 +7,19 @@ from sympy import prod from devito import configuration -from devito.data import LEFT, RIGHT, CENTER +from devito.data import CENTER, LEFT, RIGHT +from devito.deprecations import deprecations from devito.logger import warning -from devito.mpi import Distributor, MPI, SubDistributor +from devito.mpi import MPI, Distributor, SubDistributor from devito.tools import ReducerMap, as_tuple, frozendict from devito.types.args import ArgProvider from devito.types.basic import Scalar from devito.types.dense import Function +from devito.types.dimension import ( + DefaultDimension, Dimension, MultiSubDimension, SpaceDimension, Spacing, + SteppingDimension, SubDimension, TimeDimension +) from devito.types.utils import DimensionTuple -from devito.types.dimension import (Dimension, SpaceDimension, TimeDimension, - Spacing, SteppingDimension, SubDimension, - MultiSubDimension, DefaultDimension) -from devito.deprecations import deprecations __all__ = ['Border', 'Grid', 'SubDomain', 'SubDomainSet'] diff --git a/devito/types/misc.py b/devito/types/misc.py index 599d606b60..286e6b4886 100644 --- a/devito/types/misc.py +++ b/devito/types/misc.py @@ -2,15 +2,16 @@ import numpy as np import sympy + try: from sympy.core.core import ordering_of_classes except ImportError: # Moved in 1.13 from sympy.core.basic import ordering_of_classes -from devito.types import Array, CompositeObject, Indexed, Symbol, LocalObject -from devito.types.basic import IndexedData from devito.tools import CustomDtype, Pickable, as_tuple, frozendict +from devito.types import Array, CompositeObject, Indexed, LocalObject, Symbol +from devito.types.basic import IndexedData __all__ = [ 'Auto', diff --git a/devito/types/object.py b/devito/types/object.py index 05fb6e1069..51790b35bc 100644 --- a/devito/types/object.py +++ b/devito/types/object.py @@ -4,8 +4,8 @@ from devito.tools import Pickable, as_tuple, sympy_mutex from devito.types.args import ArgProvider -from devito.types.caching import Uncached from devito.types.basic import Basic, LocalType +from devito.types.caching import Uncached from devito.types.utils import CtypesFactory __all__ = ['CompositeObject', 'LocalObject', 'Object'] diff --git a/devito/types/sparse.py b/devito/types/sparse.py index eaee37528a..c79ef05654 100644 --- a/devito/types/sparse.py +++ b/devito/types/sparse.py @@ -1,26 +1,28 @@ from collections import OrderedDict +from functools import cached_property from itertools import product -import sympy import numpy as np -from functools import cached_property +import sympy from devito.finite_differences import generate_fd_shortcuts from devito.mpi import MPI, SparseDistributor -from devito.operations import (LinearInterpolator, PrecomputedInterpolator, - SincInterpolator) +from devito.operations import ( + LinearInterpolator, PrecomputedInterpolator, SincInterpolator +) from devito.symbolics import indexify, retrieve_function_carriers -from devito.tools import (ReducerMap, as_tuple, flatten, prod, filter_ordered, - is_integer, dtype_to_mpidtype) +from devito.tools import ( + ReducerMap, as_tuple, dtype_to_mpidtype, filter_ordered, flatten, is_integer, prod +) +from devito.types.basic import Symbol from devito.types.dense import DiscreteFunction, SubFunction -from devito.types.dimension import (Dimension, ConditionalDimension, DefaultDimension, - DynamicDimension) +from devito.types.dimension import ( + ConditionalDimension, DefaultDimension, Dimension, DynamicDimension +) from devito.types.dimension import dimensions as mkdims -from devito.types.basic import Symbol from devito.types.equation import Eq, Inc from devito.types.utils import IgnoreDimSort - __all__ = [ 'MatrixSparseTimeFunction', 'PrecomputedSparseFunction', diff --git a/devito/types/tensor.py b/devito/types/tensor.py index f8bc533a3c..7a3f41d3e0 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -2,11 +2,13 @@ from functools import cached_property import numpy as np + try: from sympy.matrices.matrixbase import MatrixBase except ImportError: # Before 1.13 from sympy.matrices.matrices import MatrixBase + from sympy.core.sympify import converter as sympify_converter from devito.finite_differences import Differentiable diff --git a/devito/types/utils.py b/devito/types/utils.py index d8a7877cf1..25b697f2bd 100644 --- a/devito/types/utils.py +++ b/devito/types/utils.py @@ -3,6 +3,7 @@ from functools import cached_property from devito.tools import EnrichedTuple, Tag + # Additional Function-related APIs __all__ = [ diff --git a/examples/cfd/example_diffusion.py b/examples/cfd/example_diffusion.py index 4acdcdeeca..ccaf5f3faf 100644 --- a/examples/cfd/example_diffusion.py +++ b/examples/cfd/example_diffusion.py @@ -18,7 +18,7 @@ import numpy as np import sympy -from devito import Grid, Eq, Operator, TimeFunction, solve +from devito import Eq, Grid, Operator, TimeFunction, solve from devito.logger import log diff --git a/examples/cfd/tools.py b/examples/cfd/tools.py index 44766a049a..ee23177cac 100644 --- a/examples/cfd/tools.py +++ b/examples/cfd/tools.py @@ -1,7 +1,7 @@ +import numpy as np +from matplotlib import cm, pyplot from mpl_toolkits.mplot3d import Axes3D # noqa -import numpy as np -from matplotlib import pyplot, cm from devito.logger import warning diff --git a/examples/misc/linalg.py b/examples/misc/linalg.py index 45aadfb41e..948d4d8c47 100644 --- a/examples/misc/linalg.py +++ b/examples/misc/linalg.py @@ -1,6 +1,6 @@ import click -from devito import Inc, Operator, Function, dimensions, info +from devito import Function, Inc, Operator, dimensions, info from devito.tools import as_tuple __all__ = [ diff --git a/examples/seismic/acoustic/acoustic_example.py b/examples/seismic/acoustic/acoustic_example.py index d45d935b8b..9a994340e9 100644 --- a/examples/seismic/acoustic/acoustic_example.py +++ b/examples/seismic/acoustic/acoustic_example.py @@ -1,13 +1,14 @@ import numpy as np + try: import pytest except ImportError: pass +from devito import Constant, Function, norm, smooth from devito.logger import info -from devito import Constant, Function, smooth, norm +from examples.seismic import demo_model, seismic_args, setup_geometry from examples.seismic.acoustic import AcousticWaveSolver -from examples.seismic import demo_model, setup_geometry, seismic_args def acoustic_setup(shape=(50, 50, 50), spacing=(15.0, 15.0, 15.0), diff --git a/examples/seismic/acoustic/operators.py b/examples/seismic/acoustic/operators.py index c8a2fdec5c..6854d22d29 100644 --- a/examples/seismic/acoustic/operators.py +++ b/examples/seismic/acoustic/operators.py @@ -1,5 +1,5 @@ -from devito import Eq, Operator, Function, TimeFunction, Inc, solve, sign -from devito.symbolics import retrieve_functions, INT, retrieve_derivatives +from devito import Eq, Function, Inc, Operator, TimeFunction, sign, solve +from devito.symbolics import INT, retrieve_derivatives, retrieve_functions def freesurface(model, eq): diff --git a/examples/seismic/acoustic/wavesolver.py b/examples/seismic/acoustic/wavesolver.py index e191b34972..bc12c7b041 100644 --- a/examples/seismic/acoustic/wavesolver.py +++ b/examples/seismic/acoustic/wavesolver.py @@ -1,7 +1,7 @@ -from devito import Function, TimeFunction, DevitoCheckpoint, CheckpointOperator, Revolver +from devito import CheckpointOperator, DevitoCheckpoint, Function, Revolver, TimeFunction from devito.tools import memoized_meth from examples.seismic.acoustic.operators import ( - ForwardOperator, AdjointOperator, GradientOperator, BornOperator + AdjointOperator, BornOperator, ForwardOperator, GradientOperator ) diff --git a/examples/seismic/elastic/elastic_example.py b/examples/seismic/elastic/elastic_example.py index f5f046eb50..5ce82a696b 100644 --- a/examples/seismic/elastic/elastic_example.py +++ b/examples/seismic/elastic/elastic_example.py @@ -1,12 +1,13 @@ import numpy as np + try: import pytest except ImportError: pass from devito import norm from devito.logger import info +from examples.seismic import demo_model, seismic_args, setup_geometry from examples.seismic.elastic import ElasticWaveSolver -from examples.seismic import demo_model, setup_geometry, seismic_args def elastic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., space_order=4, diff --git a/examples/seismic/elastic/operators.py b/examples/seismic/elastic/operators.py index 1576ed133a..0153870275 100644 --- a/examples/seismic/elastic/operators.py +++ b/examples/seismic/elastic/operators.py @@ -1,5 +1,6 @@ -from devito import Eq, Operator, VectorTimeFunction, TensorTimeFunction -from devito import div, grad, diag, solve +from devito import ( + Eq, Operator, TensorTimeFunction, VectorTimeFunction, diag, div, grad, solve +) def src_rec(v, tau, model, geometry): diff --git a/examples/seismic/elastic/wavesolver.py b/examples/seismic/elastic/wavesolver.py index 09a43ca62a..ba112dcdaa 100644 --- a/examples/seismic/elastic/wavesolver.py +++ b/examples/seismic/elastic/wavesolver.py @@ -1,6 +1,5 @@ +from devito import TensorTimeFunction, VectorTimeFunction from devito.tools import memoized_meth -from devito import VectorTimeFunction, TensorTimeFunction - from examples.seismic.elastic.operators import ForwardOperator diff --git a/examples/seismic/inversion/fwi.py b/examples/seismic/inversion/fwi.py index 831b981763..b1445e6466 100644 --- a/examples/seismic/inversion/fwi.py +++ b/examples/seismic/inversion/fwi.py @@ -1,12 +1,10 @@ import numpy as np +from inversion_utils import compute_residual, update_with_box -from devito import configuration, Function, norm, mmax, mmin - -from examples.seismic import demo_model, AcquisitionGeometry +from devito import Function, configuration, mmax, mmin, norm +from examples.seismic import AcquisitionGeometry, demo_model from examples.seismic.acoustic import AcousticWaveSolver -from inversion_utils import compute_residual, update_with_box - # Turn off logging configuration['log-level'] = "ERROR" # Setup diff --git a/examples/seismic/inversion/inversion_utils.py b/examples/seismic/inversion/inversion_utils.py index 2d51275f92..789d31c02f 100644 --- a/examples/seismic/inversion/inversion_utils.py +++ b/examples/seismic/inversion/inversion_utils.py @@ -1,6 +1,6 @@ import numpy as np -from devito import Operator, Eq, Min, Max +from devito import Eq, Max, Min, Operator def compute_residual(res, dobs, dsyn): diff --git a/examples/seismic/model.py b/examples/seismic/model.py index 16d8f7a3a6..579d7a3827 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -1,13 +1,16 @@ import numpy as np from sympy import finite_diff_weights as fd_w + try: import pytest except: pass -from devito import (Grid, SubDomain, Function, Constant, warning, - SubDimension, Eq, Inc, Operator, div, sin, Abs) -from devito.builtins import initialize_function, gaussian_smooth, mmax, mmin +from devito import ( + Abs, Constant, Eq, Function, Grid, Inc, Operator, SubDimension, SubDomain, div, sin, + warning +) +from devito.builtins import gaussian_smooth, initialize_function, mmax, mmin from devito.tools import as_tuple __all__ = [ diff --git a/examples/seismic/plotting.py b/examples/seismic/plotting.py index 953290b8de..a5974d212b 100644 --- a/examples/seismic/plotting.py +++ b/examples/seismic/plotting.py @@ -1,4 +1,5 @@ import numpy as np + try: import matplotlib as mpl import matplotlib.pyplot as plt diff --git a/examples/seismic/self_adjoint/example_iso.py b/examples/seismic/self_adjoint/example_iso.py index 7899e43355..961415b13f 100644 --- a/examples/seismic/self_adjoint/example_iso.py +++ b/examples/seismic/self_adjoint/example_iso.py @@ -1,11 +1,9 @@ import numpy as np +from devito import Function, smooth from devito.logger import info -from devito import smooth, Function - -from examples.seismic import setup_geometry, Model, seismic_args -from examples.seismic.self_adjoint import (setup_w_over_q, - SaIsoAcousticWaveSolver) +from examples.seismic import Model, seismic_args, setup_geometry +from examples.seismic.self_adjoint import SaIsoAcousticWaveSolver, setup_w_over_q def acoustic_sa_setup(shape=(50, 50, 50), spacing=(10.0, 10.0, 10.0), diff --git a/examples/seismic/self_adjoint/operators.py b/examples/seismic/self_adjoint/operators.py index f4abb81de2..cdb0858e73 100644 --- a/examples/seismic/self_adjoint/operators.py +++ b/examples/seismic/self_adjoint/operators.py @@ -1,4 +1,4 @@ -from devito import Eq, Operator, Function, TimeFunction +from devito import Eq, Function, Operator, TimeFunction def iso_stencil(field, model, **kwargs): diff --git a/examples/seismic/self_adjoint/test_utils.py b/examples/seismic/self_adjoint/test_utils.py index c957b6e3f1..02527c85b5 100644 --- a/examples/seismic/self_adjoint/test_utils.py +++ b/examples/seismic/self_adjoint/test_utils.py @@ -1,9 +1,10 @@ import numpy as np + try: import pytest except: pass -from devito import Grid, Function +from devito import Function, Grid from examples.seismic.self_adjoint import setup_w_over_q diff --git a/examples/seismic/self_adjoint/test_wavesolver_iso.py b/examples/seismic/self_adjoint/test_wavesolver_iso.py index 2e4b9671b2..368a9f4b8d 100644 --- a/examples/seismic/self_adjoint/test_wavesolver_iso.py +++ b/examples/seismic/self_adjoint/test_wavesolver_iso.py @@ -1,13 +1,15 @@ -from scipy.special import hankel2 import numpy as np +from scipy.special import hankel2 + try: import pytest except: pass -from devito import Grid, Function, Eq, Operator, info -from examples.seismic import RickerSource, TimeAxis, Model, AcquisitionGeometry -from examples.seismic.self_adjoint import (acoustic_sa_setup, setup_w_over_q, - SaIsoAcousticWaveSolver) +from devito import Eq, Function, Grid, Operator, info +from examples.seismic import AcquisitionGeometry, Model, RickerSource, TimeAxis +from examples.seismic.self_adjoint import ( + SaIsoAcousticWaveSolver, acoustic_sa_setup, setup_w_over_q +) # Defaults in global scope shapes = [(71, 61), (71, 61, 51)] diff --git a/examples/seismic/self_adjoint/utils.py b/examples/seismic/self_adjoint/utils.py index 13c14ee2ac..6cdcf06c9d 100644 --- a/examples/seismic/self_adjoint/utils.py +++ b/examples/seismic/self_adjoint/utils.py @@ -1,5 +1,6 @@ import numpy as np -from devito import Eq, Operator, SubDimension, exp, Min, Abs + +from devito import Abs, Eq, Min, Operator, SubDimension, exp __all__ = ['setup_w_over_q'] diff --git a/examples/seismic/self_adjoint/wavesolver.py b/examples/seismic/self_adjoint/wavesolver.py index 219d37633e..fcb292f207 100644 --- a/examples/seismic/self_adjoint/wavesolver.py +++ b/examples/seismic/self_adjoint/wavesolver.py @@ -1,7 +1,8 @@ from devito import Function, TimeFunction from devito.tools import memoized_meth -from examples.seismic.self_adjoint.operators import IsoFwdOperator, IsoAdjOperator, \ - IsoJacobianFwdOperator, IsoJacobianAdjOperator +from examples.seismic.self_adjoint.operators import ( + IsoAdjOperator, IsoFwdOperator, IsoJacobianAdjOperator, IsoJacobianFwdOperator +) class SaIsoAcousticWaveSolver: diff --git a/examples/seismic/source.py b/examples/seismic/source.py index d0a44d612d..12e420565d 100644 --- a/examples/seismic/source.py +++ b/examples/seismic/source.py @@ -1,6 +1,8 @@ from functools import cached_property -from scipy import interpolate + import numpy as np +from scipy import interpolate + try: import matplotlib.pyplot as plt except: diff --git a/examples/seismic/test_seismic_utils.py b/examples/seismic/test_seismic_utils.py index efe12e2a49..76f5ff4560 100644 --- a/examples/seismic/test_seismic_utils.py +++ b/examples/seismic/test_seismic_utils.py @@ -5,7 +5,7 @@ import numpy as np from devito import norm -from examples.seismic import Model, setup_geometry, AcquisitionGeometry +from examples.seismic import AcquisitionGeometry, Model, setup_geometry def not_bcs(bc): diff --git a/examples/seismic/tti/operators.py b/examples/seismic/tti/operators.py index 6f7637c4a6..3e79d974bd 100644 --- a/examples/seismic/tti/operators.py +++ b/examples/seismic/tti/operators.py @@ -1,5 +1,4 @@ -from devito import (Eq, Operator, Function, TimeFunction, NODE, Inc, solve, - cos, sin, sqrt) +from devito import NODE, Eq, Function, Inc, Operator, TimeFunction, cos, sin, solve, sqrt from examples.seismic.acoustic.operators import freesurface diff --git a/examples/seismic/tti/tti_example.py b/examples/seismic/tti/tti_example.py index d65a8ce88b..aebbac62c7 100644 --- a/examples/seismic/tti/tti_example.py +++ b/examples/seismic/tti/tti_example.py @@ -1,12 +1,12 @@ import numpy as np + try: import pytest except ImportError: pass -from devito import Function, smooth, norm, info, Constant - -from examples.seismic import demo_model, setup_geometry, seismic_args +from devito import Constant, Function, info, norm, smooth +from examples.seismic import demo_model, seismic_args, setup_geometry from examples.seismic.tti import AnisotropicWaveSolver diff --git a/examples/seismic/tti/wavesolver.py b/examples/seismic/tti/wavesolver.py index 2534bef750..3ca1c740d4 100644 --- a/examples/seismic/tti/wavesolver.py +++ b/examples/seismic/tti/wavesolver.py @@ -1,9 +1,11 @@ -from devito import (Function, TimeFunction, warning, NODE, - DevitoCheckpoint, CheckpointOperator, Revolver) +from devito import ( + NODE, CheckpointOperator, DevitoCheckpoint, Function, Revolver, TimeFunction, warning +) from devito.tools import memoized_meth -from examples.seismic.tti.operators import ForwardOperator, AdjointOperator -from examples.seismic.tti.operators import JacobianOperator, JacobianAdjOperator -from examples.seismic.tti.operators import particle_velocity_fields +from examples.seismic.tti.operators import ( + AdjointOperator, ForwardOperator, JacobianAdjOperator, JacobianOperator, + particle_velocity_fields +) class AnisotropicWaveSolver: diff --git a/examples/seismic/utils.py b/examples/seismic/utils.py index d691c101c8..cacce8a3ee 100644 --- a/examples/seismic/utils.py +++ b/examples/seismic/utils.py @@ -1,7 +1,8 @@ -import numpy as np from argparse import Action, ArgumentError, ArgumentParser -from devito import error, configuration, warning +import numpy as np + +from devito import configuration, error, warning from devito.tools import Pickable from devito.types.sparse import _default_radius diff --git a/examples/seismic/viscoacoustic/operators.py b/examples/seismic/viscoacoustic/operators.py index d237d43ea6..de84aad76f 100755 --- a/examples/seismic/viscoacoustic/operators.py +++ b/examples/seismic/viscoacoustic/operators.py @@ -1,8 +1,9 @@ -import sympy as sp import numpy as np +import sympy as sp -from devito import (Eq, Operator, VectorTimeFunction, TimeFunction, Function, NODE, - div, grad, solve) +from devito import ( + NODE, Eq, Function, Operator, TimeFunction, VectorTimeFunction, div, grad, solve +) def src_rec(p, model, geometry, **kwargs): diff --git a/examples/seismic/viscoacoustic/viscoacoustic_example.py b/examples/seismic/viscoacoustic/viscoacoustic_example.py index 3b065d50f9..fdf6992f89 100755 --- a/examples/seismic/viscoacoustic/viscoacoustic_example.py +++ b/examples/seismic/viscoacoustic/viscoacoustic_example.py @@ -1,13 +1,14 @@ import numpy as np + try: import pytest except ImportError: pass -from devito.logger import info from devito import norm +from devito.logger import info +from examples.seismic import demo_model, seismic_args, setup_geometry from examples.seismic.viscoacoustic import ViscoacousticWaveSolver -from examples.seismic import demo_model, setup_geometry, seismic_args def viscoacoustic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., space_order=4, diff --git a/examples/seismic/viscoacoustic/wavesolver.py b/examples/seismic/viscoacoustic/wavesolver.py index 61f03b7c8c..9b7a6c5b8a 100755 --- a/examples/seismic/viscoacoustic/wavesolver.py +++ b/examples/seismic/viscoacoustic/wavesolver.py @@ -1,8 +1,10 @@ -from devito import (VectorTimeFunction, TimeFunction, Function, NODE, - DevitoCheckpoint, CheckpointOperator, Revolver) +from devito import ( + NODE, CheckpointOperator, DevitoCheckpoint, Function, Revolver, TimeFunction, + VectorTimeFunction +) from devito.tools import memoized_meth from examples.seismic.viscoacoustic.operators import ( - ForwardOperator, AdjointOperator, GradientOperator, BornOperator + AdjointOperator, BornOperator, ForwardOperator, GradientOperator ) diff --git a/examples/seismic/viscoelastic/operators.py b/examples/seismic/viscoelastic/operators.py index 9e0269665a..199f7a926e 100644 --- a/examples/seismic/viscoelastic/operators.py +++ b/examples/seismic/viscoelastic/operators.py @@ -1,7 +1,8 @@ import sympy as sp -from devito import (Eq, Operator, VectorTimeFunction, TensorTimeFunction, - div, grad, diag, solve) +from devito import ( + Eq, Operator, TensorTimeFunction, VectorTimeFunction, diag, div, grad, solve +) from examples.seismic.elastic import src_rec diff --git a/examples/seismic/viscoelastic/viscoelastic_example.py b/examples/seismic/viscoelastic/viscoelastic_example.py index 381c542414..4813131325 100644 --- a/examples/seismic/viscoelastic/viscoelastic_example.py +++ b/examples/seismic/viscoelastic/viscoelastic_example.py @@ -1,4 +1,5 @@ import numpy as np + try: import pytest except ImportError: @@ -6,8 +7,8 @@ from devito import norm from devito.logger import info +from examples.seismic import demo_model, seismic_args, setup_geometry from examples.seismic.viscoelastic import ViscoelasticWaveSolver -from examples.seismic import demo_model, setup_geometry, seismic_args def viscoelastic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., space_order=4, diff --git a/examples/seismic/viscoelastic/wavesolver.py b/examples/seismic/viscoelastic/wavesolver.py index 6f3945661c..edbed1222a 100644 --- a/examples/seismic/viscoelastic/wavesolver.py +++ b/examples/seismic/viscoelastic/wavesolver.py @@ -1,4 +1,4 @@ -from devito import VectorTimeFunction, TensorTimeFunction +from devito import TensorTimeFunction, VectorTimeFunction from devito.tools import memoized_meth from examples.seismic.viscoelastic.operators import ForwardOperator diff --git a/examples/timestepping/acoustic_superstep.py b/examples/timestepping/acoustic_superstep.py index 4779adfb0d..c482de3563 100644 --- a/examples/timestepping/acoustic_superstep.py +++ b/examples/timestepping/acoustic_superstep.py @@ -8,15 +8,10 @@ import numpy as np from devito import ( - ConditionalDimension, - Eq, - Operator, - SparseTimeFunction, - TimeFunction, - solve, + ConditionalDimension, Eq, Operator, SparseTimeFunction, TimeFunction, solve ) from devito.timestepping.superstep import superstep_generator -from examples.seismic import demo_model, SeismicModel +from examples.seismic import SeismicModel, demo_model def ricker(t, f=10, A=1): diff --git a/tests/test_adjoint.py b/tests/test_adjoint.py index f5f89de22e..2a70be5d87 100644 --- a/tests/test_adjoint.py +++ b/tests/test_adjoint.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from devito import Operator, norm, Function, Grid, SparseFunction, inner +from devito import Function, Grid, Operator, SparseFunction, inner, norm from devito.logger import info from examples.seismic import demo_model from examples.seismic.acoustic import acoustic_setup diff --git a/tests/test_arch.py b/tests/test_arch.py index e9af3fa9e5..9565c6f08d 100644 --- a/tests/test_arch.py +++ b/tests/test_arch.py @@ -1,7 +1,7 @@ import pytest -from devito import switchconfig, configuration -from devito.arch.compiler import sniff_compiler_version, compiler_registry, GNUCompiler +from devito import configuration, switchconfig +from devito.arch.compiler import GNUCompiler, compiler_registry, sniff_compiler_version @pytest.mark.parametrize("cc", [ diff --git a/tests/test_autotuner.py b/tests/test_autotuner.py index ca1644316c..86a215e41f 100644 --- a/tests/test_autotuner.py +++ b/tests/test_autotuner.py @@ -1,10 +1,10 @@ -import pytest import numpy as np +import pytest from conftest import assert_blocking, skipif -from devito import Grid, Function, TimeFunction, Eq, Operator, configuration, switchconfig -from devito.data import LEFT +from devito import Eq, Function, Grid, Operator, TimeFunction, configuration, switchconfig from devito.core.autotuning import options # noqa +from devito.data import LEFT @switchconfig(log_level='DEBUG', develop_mode=True) diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 513776489c..0de45e4799 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -1,11 +1,12 @@ -import pytest import os import sys +from subprocess import check_call + +import pytest from benchmarks.user.benchmark import run -from devito import configuration, switchconfig, Grid, TimeFunction, Eq, Operator from conftest import skipif -from subprocess import check_call +from devito import Eq, Grid, Operator, TimeFunction, configuration, switchconfig @skipif('cpu64-icx') diff --git a/tests/test_buffering.py b/tests/test_buffering.py index cc0972f4bf..2959796851 100644 --- a/tests/test_buffering.py +++ b/tests/test_buffering.py @@ -1,12 +1,14 @@ -import pytest import numpy as np +import pytest from conftest import skipif -from devito import (Constant, Grid, TimeFunction, Operator, Eq, SubDimension, - SubDomain, ConditionalDimension, configuration, switchconfig) +from devito import ( + ConditionalDimension, Constant, Eq, Grid, Operator, SubDimension, SubDomain, + TimeFunction, configuration, switchconfig +) from devito.arch.archinfo import AppleArm -from devito.ir import FindSymbols, retrieve_iteration_tree from devito.exceptions import CompilationError +from devito.ir import FindSymbols, retrieve_iteration_tree def test_read_write(): diff --git a/tests/test_builtins.py b/tests/test_builtins.py index 980b6e6512..1a5c98dcb0 100644 --- a/tests/test_builtins.py +++ b/tests/test_builtins.py @@ -1,17 +1,19 @@ -import pytest import numpy as np +import pytest from scipy.ndimage import gaussian_filter + try: from scipy.datasets import ascent except ImportError: from scipy.misc import ascent -from devito import ConditionalDimension, Grid, Function, TimeFunction, switchconfig -from devito.builtins import (assign, norm, gaussian_smooth, initialize_function, - inner, mmin, mmax, sum, sumall) +from devito import ConditionalDimension, Function, Grid, TimeFunction, switchconfig +from devito.builtins import ( + assign, gaussian_smooth, initialize_function, inner, mmax, mmin, norm, sum, sumall +) from devito.data import LEFT, RIGHT from devito.tools import as_tuple -from devito.types import SubDomain, SparseTimeFunction +from devito.types import SparseTimeFunction, SubDomain class TestAssign: diff --git a/tests/test_caching.py b/tests/test_caching.py index 26ce3b6721..86f0d45097 100644 --- a/tests/test_caching.py +++ b/tests/test_caching.py @@ -1,16 +1,19 @@ -from ctypes import byref, c_void_p import weakref +from ctypes import byref, c_void_p import numpy as np -from sympy import Expr import pytest +from sympy import Expr -from devito import (Grid, Function, TimeFunction, SparseFunction, SparseTimeFunction, - ConditionalDimension, SubDimension, Constant, Operator, Eq, Dimension, - DefaultDimension, _SymbolCache, clear_cache, solve, VectorFunction, - TensorFunction, TensorTimeFunction, VectorTimeFunction) -from devito.types import (DeviceID, NThreadsBase, NPThreads, Object, LocalObject, - Scalar, Symbol, ThreadID) +from devito import ( + ConditionalDimension, Constant, DefaultDimension, Dimension, Eq, Function, Grid, + Operator, SparseFunction, SparseTimeFunction, SubDimension, TensorFunction, + TensorTimeFunction, TimeFunction, VectorFunction, VectorTimeFunction, _SymbolCache, + clear_cache, solve +) +from devito.types import ( + DeviceID, LocalObject, NPThreads, NThreadsBase, Object, Scalar, Symbol, ThreadID +) from devito.types.basic import AbstractSymbol diff --git a/tests/test_checkpointing.py b/tests/test_checkpointing.py index 75cca861cc..98f6f1603c 100644 --- a/tests/test_checkpointing.py +++ b/tests/test_checkpointing.py @@ -1,11 +1,13 @@ from functools import reduce -import pytest import numpy as np +import pytest from conftest import skipif -from devito import (Grid, TimeFunction, Operator, Function, Eq, switchconfig, Constant, - Revolver, CheckpointOperator, DevitoCheckpoint) +from devito import ( + CheckpointOperator, Constant, DevitoCheckpoint, Eq, Function, Grid, Operator, + Revolver, TimeFunction, switchconfig +) from examples.seismic.acoustic.acoustic_example import acoustic_setup diff --git a/tests/test_constant.py b/tests/test_constant.py index e39cf81502..5437c2478e 100644 --- a/tests/test_constant.py +++ b/tests/test_constant.py @@ -1,6 +1,6 @@ import numpy as np -from devito import Grid, Constant, Function, TimeFunction, Eq, solve, Operator +from devito import Constant, Eq, Function, Grid, Operator, TimeFunction, solve class TestConst: diff --git a/tests/test_cse.py b/tests/test_cse.py index 4359470c92..fe41ca0564 100644 --- a/tests/test_cse.py +++ b/tests/test_cse.py @@ -1,14 +1,14 @@ -import pytest - import numpy as np +import pytest from sympy import Ge, Lt from sympy.core.mul import _mulsort from conftest import assert_structure -from devito import (Grid, Function, TimeFunction, ConditionalDimension, Eq, # noqa - Operator, cos, sin) +from devito import ( # noqa + ConditionalDimension, Eq, Function, Grid, Operator, TimeFunction, cos, sin +) from devito.finite_differences.differentiable import diffify -from devito.ir import DummyEq, FindNodes, FindSymbols, Conditional +from devito.ir import Conditional, DummyEq, FindNodes, FindSymbols from devito.ir.support import generator from devito.passes.clusters.cse import CTemp, _cse from devito.symbolics import indexify diff --git a/tests/test_data.py b/tests/test_data.py index eac3aa1c43..f7a30b2bd9 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -1,11 +1,12 @@ -import pytest import numpy as np +import pytest -from devito import (Grid, Function, TimeFunction, SparseTimeFunction, Dimension, # noqa - Eq, Operator, ALLOC_GUARD, ALLOC_ALIGNED, configuration, - switchconfig, SparseFunction, PrecomputedSparseFunction, - PrecomputedSparseTimeFunction) -from devito.data import LEFT, RIGHT, Decomposition, loc_data_idx, convert_index +from devito import ( # noqa + ALLOC_ALIGNED, ALLOC_GUARD, Dimension, Eq, Function, Grid, Operator, + PrecomputedSparseFunction, PrecomputedSparseTimeFunction, SparseFunction, + SparseTimeFunction, TimeFunction, configuration, switchconfig +) +from devito.data import LEFT, RIGHT, Decomposition, convert_index, loc_data_idx from devito.data.allocators import DataReference from devito.tools import as_tuple from devito.types import Scalar diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index b2140ccdbd..4600305fb9 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -1,14 +1,16 @@ import numpy as np import pytest -from sympy import sympify, simplify, diff, Float, Symbol +from sympy import Float, Symbol, diff, simplify, sympify -from devito import (Grid, Function, TimeFunction, Eq, Operator, NODE, cos, sin, - ConditionalDimension, left, right, centered, div, grad, - curl, laplace, VectorFunction, TensorFunction) +from devito import ( + NODE, ConditionalDimension, Eq, Function, Grid, Operator, TensorFunction, + TimeFunction, VectorFunction, centered, cos, curl, div, grad, laplace, left, right, + sin +) from devito.finite_differences import Derivative, Differentiable, diffify -from devito.finite_differences.differentiable import (Add, EvalDerivative, IndexSum, - IndexDerivative, Weights, - DiffDerivative) +from devito.finite_differences.differentiable import ( + Add, DiffDerivative, EvalDerivative, IndexDerivative, IndexSum, Weights +) from devito.symbolics import indexify, retrieve_indexed from devito.types.dimension import StencilDimension from devito.warnings import DevitoWarning diff --git a/tests/test_differentiable.py b/tests/test_differentiable.py index c8f256aa2b..0f97a2ab2a 100644 --- a/tests/test_differentiable.py +++ b/tests/test_differentiable.py @@ -1,12 +1,13 @@ from itertools import product -import sympy -import pytest import numpy as np +import pytest +import sympy -from devito import Function, Grid, Differentiable, NODE -from devito.finite_differences.differentiable import (Add, Mul, Pow, diffify, - interp_for_fd, SafeInv) +from devito import NODE, Differentiable, Function, Grid +from devito.finite_differences.differentiable import ( + Add, Mul, Pow, SafeInv, diffify, interp_for_fd +) def test_differentiable(): diff --git a/tests/test_dimension.py b/tests/test_dimension.py index f43ae795eb..f803185db1 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -1,21 +1,23 @@ -from itertools import product from copy import deepcopy +from itertools import product import numpy as np -from sympy import And, Or import pytest +from sympy import And, Or -from conftest import assert_blocking, assert_structure, skipif, opts_tiling -from devito import (ConditionalDimension, Grid, Function, TimeFunction, floor, # noqa - SparseFunction, SparseTimeFunction, Eq, Operator, Constant, - Dimension, DefaultDimension, SubDimension, switchconfig, - SubDomain, Lt, Le, Gt, Ge, Ne, Buffer, sin, SpaceDimension, - CustomDimension, dimensions, configuration, norm, Inc, sum) -from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes, - FindSymbols, retrieve_iteration_tree) -from devito.ir.equations.algorithms import concretize_subdims +from conftest import assert_blocking, assert_structure, opts_tiling, skipif +from devito import ( # noqa + Buffer, ConditionalDimension, Constant, CustomDimension, DefaultDimension, Dimension, + Eq, Function, Ge, Grid, Gt, Inc, Le, Lt, Ne, Operator, SpaceDimension, SparseFunction, + SparseTimeFunction, SubDimension, SubDomain, TimeFunction, configuration, dimensions, + floor, norm, sin, sum, switchconfig +) from devito.ir import SymbolRegistry -from devito.symbolics import indexify, retrieve_functions, IntDiv, INT +from devito.ir.equations.algorithms import concretize_subdims +from devito.ir.iet import ( + Conditional, Expression, FindNodes, FindSymbols, Iteration, retrieve_iteration_tree +) +from devito.symbolics import INT, IntDiv, indexify, retrieve_functions from devito.types import Array, StencilDimension, Symbol from devito.types.basic import Scalar from devito.types.dimension import AffineIndexAccessFunction, Thickness diff --git a/tests/test_dle.py b/tests/test_dle.py index c49f3fa67b..3be184e46b 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -1,18 +1,20 @@ from functools import reduce from operator import mul -import sympy import numpy as np import pytest +import sympy -from conftest import assert_structure, assert_blocking, _R, skipif -from devito import (Grid, Function, TimeFunction, SparseTimeFunction, SpaceDimension, - CustomDimension, Dimension, DefaultDimension, SubDimension, - PrecomputedSparseTimeFunction, Eq, Inc, ReduceMin, ReduceMax, - Operator, configuration, dimensions, info, cos) +from conftest import _R, assert_blocking, assert_structure, skipif +from devito import ( + CustomDimension, DefaultDimension, Dimension, Eq, Function, Grid, Inc, Operator, + PrecomputedSparseTimeFunction, ReduceMax, ReduceMin, SpaceDimension, + SparseTimeFunction, SubDimension, TimeFunction, configuration, cos, dimensions, info +) from devito.exceptions import InvalidArgument -from devito.ir.iet import (Iteration, FindNodes, IsPerfectIteration, - retrieve_iteration_tree, Expression) +from devito.ir.iet import ( + Expression, FindNodes, IsPerfectIteration, Iteration, retrieve_iteration_tree +) from devito.passes.iet.languages.openmp import Ompizer, OmpRegion from devito.tools import as_tuple from devito.types import Barrier, Scalar, Symbol diff --git a/tests/test_docstrings.py b/tests/test_docstrings.py index e95e786ace..dae7abb149 100644 --- a/tests/test_docstrings.py +++ b/tests/test_docstrings.py @@ -10,10 +10,10 @@ # * skipping tests when using a devito backend (where they would fail, for # the most disparate reasons) +import doctest from importlib import import_module import pytest -import doctest @pytest.mark.parametrize('modname', [ diff --git a/tests/test_dse.py b/tests/test_dse.py index b968964f5e..e93f4215d2 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -2,31 +2,34 @@ import numpy as np import pytest - from sympy import Mul # noqa -from conftest import (skipif, EVAL, _R, assert_structure, assert_blocking, # noqa - get_params, get_arrays, check_array) -from devito import (NODE, Eq, Inc, Constant, Function, TimeFunction, # noqa - SparseTimeFunction, Dimension, SubDimension, - ConditionalDimension, DefaultDimension, Grid, Operator, - norm, grad, div, dimensions, switchconfig, configuration, - first_derivative, solve, transpose, Abs, cos, exp, - sin, sqrt, floor, Ge, Lt, Derivative) +from conftest import ( # noqa + _R, EVAL, assert_blocking, assert_structure, check_array, get_arrays, get_params, + skipif +) +from devito import ( # noqa + NODE, Abs, ConditionalDimension, Constant, DefaultDimension, Derivative, Dimension, + Eq, Function, Ge, Grid, Inc, Lt, Operator, SparseTimeFunction, SubDimension, + TimeFunction, configuration, cos, dimensions, div, exp, first_derivative, floor, grad, + norm, sin, solve, sqrt, switchconfig, transpose +) from devito.exceptions import InvalidArgument, InvalidOperator -from devito.ir import (Conditional, DummyEq, Expression, Iteration, FindNodes, - FindSymbols, ParallelIteration, retrieve_iteration_tree) +from devito.ir import ( + Conditional, DummyEq, Expression, FindNodes, FindSymbols, Iteration, + ParallelIteration, retrieve_iteration_tree +) from devito.passes.clusters.aliases import collect from devito.passes.clusters.factorization import collect_nested from devito.passes.iet.parpragma import VExpanded -from devito.symbolics import (INT, FLOAT, DefFunction, FieldFromPointer, # noqa - IndexedPointer, Keyword, SizeOf, estimate_cost, - pow_to_mul, indexify) +from devito.symbolics import ( # noqa + FLOAT, INT, DefFunction, FieldFromPointer, IndexedPointer, Keyword, SizeOf, + estimate_cost, indexify, pow_to_mul +) from devito.tools import as_tuple -from devito.types import Scalar, Symbol, PrecomputedSparseTimeFunction - +from devito.types import PrecomputedSparseTimeFunction, Scalar, Symbol +from examples.seismic import AcquisitionGeometry, demo_model from examples.seismic.acoustic import AcousticWaveSolver -from examples.seismic import demo_model, AcquisitionGeometry from examples.seismic.tti import AnisotropicWaveSolver diff --git a/tests/test_dtypes.py b/tests/test_dtypes.py index 370c169b07..a2a04e858f 100644 --- a/tests/test_dtypes.py +++ b/tests/test_dtypes.py @@ -7,10 +7,8 @@ except ImportError: from conftest import skipif -from devito import ( - Constant, Eq, Function, Grid, Operator, exp, log, sin, configuration -) -from devito.arch.compiler import GNUCompiler, CustomCompiler +from devito import Constant, Eq, Function, Grid, Operator, configuration, exp, log, sin +from devito.arch.compiler import CustomCompiler, GNUCompiler from devito.exceptions import InvalidOperator from devito.ir.cgen.printer import BasePrinter from devito.passes.iet.langbase import LangBB diff --git a/tests/test_error_checking.py b/tests/test_error_checking.py index f03cf708e4..5de3105fe4 100644 --- a/tests/test_error_checking.py +++ b/tests/test_error_checking.py @@ -1,6 +1,6 @@ import pytest -from devito import Grid, Function, TimeFunction, Eq, Operator, switchconfig +from devito import Eq, Function, Grid, Operator, TimeFunction, switchconfig from devito.exceptions import ExecutionError diff --git a/tests/test_fission.py b/tests/test_fission.py index 9f7c74b6a6..f62ac63bab 100644 --- a/tests/test_fission.py +++ b/tests/test_fission.py @@ -1,8 +1,9 @@ import numpy as np from conftest import assert_structure -from devito import (Eq, Inc, Grid, Function, TimeFunction, SubDimension, SubDomain, - Operator, solve) +from devito import ( + Eq, Function, Grid, Inc, Operator, SubDimension, SubDomain, TimeFunction, solve +) def test_issue_1725(): diff --git a/tests/test_gpu_common.py b/tests/test_gpu_common.py index 1359025a3a..a31560f988 100644 --- a/tests/test_gpu_common.py +++ b/tests/test_gpu_common.py @@ -1,25 +1,24 @@ import cloudpickle as pickle - -import pytest import numpy as np -import sympy +import pytest import scipy.sparse +import sympy -from conftest import assert_structure -from devito import (Constant, Eq, Inc, Grid, Function, ConditionalDimension, - Dimension, MatrixSparseTimeFunction, SparseTimeFunction, - SubDimension, SubDomain, SubDomainSet, TimeFunction, exp, - Operator, configuration, switchconfig, TensorTimeFunction, - Buffer, assign, switchenv) -from devito.arch import get_gpu_info, get_cpu_info, Device, Cpu64 +from conftest import assert_structure, skipif +from devito import ( + Buffer, ConditionalDimension, Constant, Dimension, Eq, Function, Grid, Inc, + MatrixSparseTimeFunction, Operator, SparseTimeFunction, SubDimension, SubDomain, + SubDomainSet, TensorTimeFunction, TimeFunction, assign, configuration, exp, + switchconfig, switchenv +) +from devito.arch import Cpu64, Device, get_cpu_info, get_gpu_info from devito.exceptions import InvalidArgument -from devito.ir import (Conditional, Expression, Section, FindNodes, FindSymbols, - retrieve_iteration_tree) +from devito.ir import ( + Conditional, Expression, FindNodes, FindSymbols, Section, retrieve_iteration_tree +) from devito.passes.iet.languages.openmp import OmpIteration from devito.types import DeviceID, DeviceRM, Lock, NPThreads, PThreadArray, Symbol -from conftest import skipif - pytestmark = skipif(['nodevice'], whole_module=True) diff --git a/tests/test_gpu_openacc.py b/tests/test_gpu_openacc.py index 8c4813db0b..ccaaa0bd6a 100644 --- a/tests/test_gpu_openacc.py +++ b/tests/test_gpu_openacc.py @@ -1,13 +1,14 @@ -import pytest import numpy as np +import pytest -from devito import (Grid, Function, TimeFunction, SparseTimeFunction, Eq, Operator, - norm, solve, Max) -from conftest import skipif, assert_blocking, opts_device_tiling +from conftest import assert_blocking, opts_device_tiling, skipif +from devito import ( + Eq, Function, Grid, Max, Operator, SparseTimeFunction, TimeFunction, norm, solve +) from devito.data import LEFT from devito.exceptions import InvalidOperator -from devito.ir.iet import retrieve_iteration_tree, FindNodes, Iteration -from examples.seismic import TimeAxis, RickerSource, Receiver +from devito.ir.iet import FindNodes, Iteration, retrieve_iteration_tree +from examples.seismic import Receiver, RickerSource, TimeAxis pytestmark = skipif(['nodevice'], whole_module=True) diff --git a/tests/test_gpu_openmp.py b/tests/test_gpu_openmp.py index 7150d66eb2..3d3b0d4b09 100644 --- a/tests/test_gpu_openmp.py +++ b/tests/test_gpu_openmp.py @@ -1,12 +1,13 @@ import numpy as np import pytest -from conftest import skipif, opts_device_tiling -from devito import (Grid, Dimension, Function, TimeFunction, Eq, Inc, solve, - Operator, norm, cos) +from conftest import opts_device_tiling, skipif +from devito import ( + Dimension, Eq, Function, Grid, Inc, Operator, TimeFunction, cos, norm, solve +) from devito.exceptions import InvalidOperator from devito.ir.iet import retrieve_iteration_tree -from examples.seismic import TimeAxis, RickerSource, Receiver +from examples.seismic import Receiver, RickerSource, TimeAxis pytestmark = skipif(['nodevice'], whole_module=True) diff --git a/tests/test_gradient.py b/tests/test_gradient.py index 67a691ab1d..fd070a91f5 100644 --- a/tests/test_gradient.py +++ b/tests/test_gradient.py @@ -3,11 +3,11 @@ from numpy import linalg from conftest import skipif -from devito import Function, info, TimeFunction, Operator, Eq, smooth +from devito import Eq, Function, Operator, TimeFunction, info, smooth from devito.parameters import switchconfig +from examples.seismic import Receiver, demo_model, setup_geometry from examples.seismic.acoustic import acoustic_setup as iso_setup from examples.seismic.acoustic.operators import iso_stencil -from examples.seismic import Receiver, demo_model, setup_geometry from examples.seismic.tti import tti_setup from examples.seismic.viscoacoustic import viscoacoustic_setup as vsc_setup diff --git a/tests/test_iet.py b/tests/test_iet.py index b843d12f9f..7f0675eaf0 100644 --- a/tests/test_iet.py +++ b/tests/test_iet.py @@ -1,25 +1,26 @@ -import pytest - from ctypes import c_void_p + import cgen import numpy as np +import pytest import sympy -from devito import (Eq, Grid, Function, TimeFunction, Operator, Dimension, # noqa - switchconfig) -from devito.ir.iet import ( - Call, Callable, Conditional, Definition, DeviceCall, DummyExpr, Iteration, - List, KernelLaunch, Dereference, Lambda, Switch, ElementalFunction, CGen, - FindSymbols, filter_iterations, make_efunc, retrieve_iteration_tree, - Transformer +from devito import ( # noqa + Dimension, Eq, Function, Grid, Operator, TimeFunction, switchconfig ) from devito.ir import SymbolRegistry +from devito.ir.iet import ( + Call, Callable, CGen, Conditional, Definition, Dereference, DeviceCall, DummyExpr, + ElementalFunction, FindSymbols, Iteration, KernelLaunch, Lambda, List, Switch, + Transformer, filter_iterations, make_efunc, retrieve_iteration_tree +) from devito.passes.iet.engine import Graph from devito.passes.iet.languages.C import CDataManager -from devito.symbolics import (Byref, FieldFromComposite, InlineIf, Macro, Class, - String, FLOAT) +from devito.symbolics import ( + FLOAT, Byref, Class, FieldFromComposite, InlineIf, Macro, String +) from devito.tools import CustomDtype, as_tuple, dtype_to_ctype -from devito.types import CustomDimension, Array, LocalObject, Symbol, Pointer +from devito.types import Array, CustomDimension, LocalObject, Pointer, Symbol @pytest.fixture diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index eae0ddb60e..0012c4ed40 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -1,18 +1,20 @@ import numpy as np -from numpy import sin, floor import pytest +import scipy.sparse +from numpy import floor, sin from sympy import Float from conftest import assert_structure -from devito import (Grid, Operator, Dimension, SparseFunction, SparseTimeFunction, - Function, TimeFunction, DefaultDimension, Eq, switchconfig, - PrecomputedSparseFunction, PrecomputedSparseTimeFunction, - MatrixSparseTimeFunction, SubDomain) +from devito import ( + DefaultDimension, Dimension, Eq, Function, Grid, MatrixSparseTimeFunction, Operator, + PrecomputedSparseFunction, PrecomputedSparseTimeFunction, SparseFunction, + SparseTimeFunction, SubDomain, TimeFunction, switchconfig +) from devito.operations.interpolators import LinearInterpolator, SincInterpolator -from examples.seismic import (demo_model, TimeAxis, RickerSource, Receiver, - AcquisitionGeometry) +from examples.seismic import ( + AcquisitionGeometry, Receiver, RickerSource, TimeAxis, demo_model +) from examples.seismic.acoustic import AcousticWaveSolver, acoustic_setup -import scipy.sparse def unit_box(name='a', shape=(11, 11), grid=None, space_order=1): diff --git a/tests/test_ir.py b/tests/test_ir.py index c9f9440297..be2bcedffc 100644 --- a/tests/test_ir.py +++ b/tests/test_ir.py @@ -1,20 +1,24 @@ -import pytest import numpy as np +import pytest from sympy import S from conftest import EVAL, skipif # noqa -from devito import (Eq, Inc, Grid, Constant, Function, TimeFunction, # noqa - Operator, Dimension, SubDimension, switchconfig) +from devito import ( # noqa + Constant, Dimension, Eq, Function, Grid, Inc, Operator, SubDimension, TimeFunction, + switchconfig +) from devito.ir.cgen import ccode from devito.ir.equations import LoweredEq from devito.ir.equations.algorithms import dimension_sort -from devito.ir.iet import Iteration, FindNodes -from devito.ir.support.basic import (IterationInstance, TimedAccess, Scope, - Vector, AFFINE, REGULAR, IRREGULAR, mocksym0, - mocksym1) -from devito.ir.support.space import (NullInterval, Interval, Forward, Backward, - IntervalGroup, IterationSpace) +from devito.ir.iet import FindNodes, Iteration +from devito.ir.support.basic import ( + AFFINE, IRREGULAR, REGULAR, IterationInstance, Scope, TimedAccess, Vector, mocksym0, + mocksym1 +) from devito.ir.support.guards import GuardOverflow +from devito.ir.support.space import ( + Backward, Forward, Interval, IntervalGroup, IterationSpace, NullInterval +) from devito.symbolics import DefFunction, FieldFromPointer from devito.tools import prod from devito.tools.data_structures import frozendict diff --git a/tests/test_linearize.py b/tests/test_linearize.py index 4201d84ff8..52cc220070 100644 --- a/tests/test_linearize.py +++ b/tests/test_linearize.py @@ -1,11 +1,13 @@ -import pytest import numpy as np +import pytest import scipy.sparse -from devito import (Grid, Function, TimeFunction, SparseTimeFunction, Operator, Eq, - Inc, MatrixSparseTimeFunction, sin, switchconfig, configuration) +from devito import ( + Eq, Function, Grid, Inc, MatrixSparseTimeFunction, Operator, SparseTimeFunction, + TimeFunction, configuration, sin, switchconfig +) from devito.ir import Call, Callable, DummyExpr, Expression, FindNodes, SymbolRegistry -from devito.passes import Graph, linearize, generate_macros +from devito.passes import Graph, generate_macros, linearize from devito.types import Array, Bundle, DefaultDimension diff --git a/tests/test_lower_clusters.py b/tests/test_lower_clusters.py index 4791571cd4..466466a734 100644 --- a/tests/test_lower_clusters.py +++ b/tests/test_lower_clusters.py @@ -1,4 +1,4 @@ -from devito import Grid, SparseTimeFunction, TimeFunction, Operator +from devito import Grid, Operator, SparseTimeFunction, TimeFunction from devito.ir.iet import FindSymbols diff --git a/tests/test_lower_exprs.py b/tests/test_lower_exprs.py index 5bafb89954..5a37237c6b 100644 --- a/tests/test_lower_exprs.py +++ b/tests/test_lower_exprs.py @@ -1,14 +1,11 @@ import numpy as np import pytest -from devito import (Grid, TimeFunction, Function, Operator, Eq, solve, - DefaultDimension) +from devito import DefaultDimension, Eq, Function, Grid, Operator, TimeFunction, solve from devito.finite_differences import Derivative from devito.finite_differences.differentiable import diff2sympy from devito.ir.equations import LoweredEq -from devito.passes.equations.linearity import ( - _collect_derivatives as collect_derivatives -) +from devito.passes.equations.linearity import _collect_derivatives as collect_derivatives from devito.tools import timed_region diff --git a/tests/test_mpi.py b/tests/test_mpi.py index c13e465a99..c0d0b4a564 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -1,27 +1,27 @@ +from functools import cached_property + import numpy as np import pytest -from functools import cached_property +from test_dse import TestTTI from conftest import _R, assert_blocking, assert_structure -from devito import (Grid, Constant, Function, TimeFunction, SparseFunction, - SparseTimeFunction, VectorTimeFunction, TensorTimeFunction, - Dimension, ConditionalDimension, div, solve, diag, grad, - SubDimension, SubDomain, Eq, Ne, Inc, NODE, Operator, norm, - inner, configuration, switchconfig, generic_derivative, - PrecomputedSparseFunction, DefaultDimension, Buffer, - CustomDimension) +from devito import ( + NODE, Buffer, ConditionalDimension, Constant, CustomDimension, DefaultDimension, + Dimension, Eq, Function, Grid, Inc, Ne, Operator, PrecomputedSparseFunction, + SparseFunction, SparseTimeFunction, SubDimension, SubDomain, TensorTimeFunction, + TimeFunction, VectorTimeFunction, configuration, diag, div, generic_derivative, grad, + inner, norm, solve, switchconfig +) from devito.arch.compiler import OneapiCompiler from devito.data import LEFT, RIGHT -from devito.ir.iet import (Call, Conditional, Iteration, FindNodes, FindSymbols, - retrieve_iteration_tree) +from devito.ir.iet import ( + Call, Conditional, FindNodes, FindSymbols, Iteration, retrieve_iteration_tree +) from devito.mpi import MPI -from devito.mpi.routines import (HaloUpdateCall, HaloUpdateList, MPICall, - ComputeCall) from devito.mpi.distributed import CustomTopology +from devito.mpi.routines import ComputeCall, HaloUpdateCall, HaloUpdateList, MPICall from devito.tools import Bunch - from examples.seismic.acoustic import acoustic_setup -from test_dse import TestTTI # Main body in Operator IET, depending on ISA diff --git a/tests/test_operator.py b/tests/test_operator.py index c73a25664a..1a9b82942a 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1,14 +1,13 @@ +import json +import logging import os -from itertools import permutations from functools import reduce +from itertools import permutations from operator import mul -import logging -import json import numpy as np -import sympy - import pytest +import sympy # Try-except required to allow for import of classes from this file # for testing in PRO @@ -17,26 +16,27 @@ except ImportError: from conftest import assert_structure, skipif -from devito import (Grid, Eq, Operator, Constant, Function, TimeFunction, - SparseFunction, SparseTimeFunction, Dimension, error, SpaceDimension, - NODE, CELL, dimensions, configuration, TensorFunction, - TensorTimeFunction, VectorFunction, VectorTimeFunction, - div, grad, switchconfig, exp, Buffer) -from devito import Inc, Le, Lt, Ge, Gt, sin # noqa +from devito import ( # noqa + CELL, NODE, Buffer, Constant, Dimension, Eq, Function, Ge, Grid, Gt, Inc, Le, Lt, + Operator, SpaceDimension, SparseFunction, SparseTimeFunction, TensorFunction, + TensorTimeFunction, TimeFunction, VectorFunction, VectorTimeFunction, configuration, + dimensions, div, error, exp, grad, sin, switchconfig +) from devito.arch.archinfo import Device from devito.exceptions import InvalidOperator from devito.finite_differences.differentiable import diff2sympy from devito.ir.equations import ClusterizedEq from devito.ir.equations.algorithms import lower_exprs -from devito.ir.iet import (Callable, Conditional, Expression, Iteration, TimedList, - FindNodes, IsPerfectIteration, retrieve_iteration_tree, - FindSymbols) +from devito.ir.iet import ( + Callable, Conditional, Expression, FindNodes, FindSymbols, IsPerfectIteration, + Iteration, TimedList, retrieve_iteration_tree +) from devito.ir.support import Any, Backward, Forward from devito.passes.iet.languages.C import CDataManager from devito.symbolics import ListInitializer, indexify, retrieve_indexed from devito.tools import flatten, powerset, timed_region from devito.types import ( - Array, Barrier, CustomDimension, Indirection, Scalar, Symbol, ConditionalDimension + Array, Barrier, ConditionalDimension, CustomDimension, Indirection, Scalar, Symbol ) diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 40e4c14ffe..0a4b848e55 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -2,31 +2,37 @@ import pickle as pickle0 import cloudpickle as pickle1 -import pytest import numpy as np +import pytest from sympy import Symbol -from devito import (Constant, Eq, Function, TimeFunction, SparseFunction, Grid, - Dimension, SubDimension, ConditionalDimension, IncrDimension, - TimeDimension, SteppingDimension, Operator, MPI, Min, solve, - PrecomputedSparseTimeFunction, SubDomain) -from devito.ir import Backward, Forward, GuardFactor, GuardBound, GuardBoundNext +from devito import ( + MPI, ConditionalDimension, Constant, Dimension, Eq, Function, Grid, IncrDimension, + Min, Operator, PrecomputedSparseTimeFunction, SparseFunction, SteppingDimension, + SubDimension, SubDomain, TimeDimension, TimeFunction, solve +) from devito.data import LEFT, OWNED -from devito.finite_differences.tools import direct, transpose, left, right, centered +from devito.finite_differences.tools import centered, direct, left, right, transpose +from devito.ir import Backward, Forward, GuardBound, GuardBoundNext, GuardFactor from devito.mpi.halo_scheme import Halo -from devito.mpi.routines import (MPIStatusObject, MPIMsgEnriched, MPIRequestObject, - MPIRegion) -from devito.types import (Array, CustomDimension, Symbol as dSymbol, Scalar, - PointerArray, Lock, PThreadArray, SharedData, Timer, - DeviceID, NPThreads, ThreadID, TempFunction, Indirection, - FIndexed, ComponentAccess, DefaultDimension) -from devito.types.basic import BoundSymbol, AbstractSymbol +from devito.mpi.routines import ( + MPIMsgEnriched, MPIRegion, MPIRequestObject, MPIStatusObject +) +from devito.symbolics import ( + CallFromPointer, Cast, DefFunction, FieldFromPointer, IntDiv, ListInitializer, SizeOf, + pow_to_mul +) from devito.tools import EnrichedTuple -from devito.symbolics import (IntDiv, ListInitializer, FieldFromPointer, - CallFromPointer, DefFunction, Cast, SizeOf, - pow_to_mul) -from examples.seismic import (demo_model, AcquisitionGeometry, - TimeAxis, RickerSource, Receiver) +from devito.types import ( + Array, ComponentAccess, CustomDimension, DefaultDimension, DeviceID, FIndexed, + Indirection, Lock, NPThreads, PointerArray, PThreadArray, Scalar, SharedData +) +from devito.types import Symbol as dSymbol +from devito.types import TempFunction, ThreadID, Timer +from devito.types.basic import AbstractSymbol, BoundSymbol +from examples.seismic import ( + AcquisitionGeometry, Receiver, RickerSource, TimeAxis, demo_model +) class SparseFirst(SparseFunction): diff --git a/tests/test_rebuild.py b/tests/test_rebuild.py index b910e609bd..56f21d271f 100644 --- a/tests/test_rebuild.py +++ b/tests/test_rebuild.py @@ -2,8 +2,8 @@ import pytest from devito import Dimension, Function, Grid -from devito.types import StencilDimension, SparseFunction, PrecomputedSparseFunction from devito.data.allocators import DataReference +from devito.types import PrecomputedSparseFunction, SparseFunction, StencilDimension class TestFunction: diff --git a/tests/test_resample.py b/tests/test_resample.py index 56771c4dcf..c01bf5ecf3 100644 --- a/tests/test_resample.py +++ b/tests/test_resample.py @@ -1,6 +1,6 @@ import numpy as np -from examples.seismic import TimeAxis, RickerSource, demo_model +from examples.seismic import RickerSource, TimeAxis, demo_model def test_resample(): diff --git a/tests/test_roundoff.py b/tests/test_roundoff.py index 5b3901f791..578a453e56 100644 --- a/tests/test_roundoff.py +++ b/tests/test_roundoff.py @@ -1,8 +1,8 @@ -import pytest import numpy as np +import pytest from conftest import skipif -from devito import Grid, Constant, TimeFunction, Eq, Operator, switchconfig +from devito import Constant, Eq, Grid, Operator, TimeFunction, switchconfig class TestRoundoff: diff --git a/tests/test_save.py b/tests/test_save.py index b3c8fd487b..1a29ecf09b 100644 --- a/tests/test_save.py +++ b/tests/test_save.py @@ -1,6 +1,6 @@ import numpy as np -from devito import Buffer, Grid, Eq, Operator, TimeFunction, solve +from devito import Buffer, Eq, Grid, Operator, TimeFunction, solve def initial(nt, nx, ny): diff --git a/tests/test_skewing.py b/tests/test_skewing.py index 0706377dee..458185c06a 100644 --- a/tests/test_skewing.py +++ b/tests/test_skewing.py @@ -1,9 +1,11 @@ -import pytest import numpy as np +import pytest from conftest import assert_blocking -from devito import Grid, Dimension, Eq, Function, TimeFunction, Operator, norm, Min # noqa -from devito.ir import Expression, Iteration, FindNodes +from devito import ( # noqa + Dimension, Eq, Function, Grid, Min, Operator, TimeFunction, norm +) +from devito.ir import Expression, FindNodes, Iteration class TestCodeGenSkewing: diff --git a/tests/test_sparse.py b/tests/test_sparse.py index 14906d3c74..b8fab00c2d 100644 --- a/tests/test_sparse.py +++ b/tests/test_sparse.py @@ -1,14 +1,14 @@ from math import floor -import pytest import numpy as np +import pytest import scipy.sparse -from devito import (Grid, TimeFunction, Eq, Operator, Dimension, Function, - SparseFunction, SparseTimeFunction, PrecomputedSparseFunction, - PrecomputedSparseTimeFunction, MatrixSparseTimeFunction, - switchconfig) - +from devito import ( + Dimension, Eq, Function, Grid, MatrixSparseTimeFunction, Operator, + PrecomputedSparseFunction, PrecomputedSparseTimeFunction, SparseFunction, + SparseTimeFunction, TimeFunction, switchconfig +) _sptypes = [SparseFunction, SparseTimeFunction, PrecomputedSparseFunction, PrecomputedSparseTimeFunction] diff --git a/tests/test_staggered_utils.py b/tests/test_staggered_utils.py index 4bf7d1e80a..86c6e99fa7 100644 --- a/tests/test_staggered_utils.py +++ b/tests/test_staggered_utils.py @@ -1,10 +1,12 @@ -import pytest import numpy as np +import pytest from sympy import simplify -from devito import (Function, Grid, NODE, CELL, VectorTimeFunction, - TimeFunction, Eq, Operator, div, Dimension) -from devito.tools import powerset, as_tuple +from devito import ( + CELL, NODE, Dimension, Eq, Function, Grid, Operator, TimeFunction, VectorTimeFunction, + div +) +from devito.tools import as_tuple, powerset @pytest.mark.parametrize('ndim', [1, 2, 3]) diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index 03d6d9ef9e..a053f4a296 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -1,14 +1,16 @@ -import pytest -import numpy as np from math import floor +import numpy as np +import pytest from sympy import sin, tan -from conftest import opts_tiling, assert_structure -from devito import (ConditionalDimension, Constant, Grid, Function, TimeFunction, - Eq, solve, Operator, SubDomain, SubDomainSet, Lt, SparseFunction, - SparseTimeFunction, VectorFunction, TensorFunction, Border) -from devito.ir import FindNodes, FindSymbols, Expression, Iteration, SymbolRegistry +from conftest import assert_structure, opts_tiling +from devito import ( + Border, ConditionalDimension, Constant, Eq, Function, Grid, Lt, Operator, + SparseFunction, SparseTimeFunction, SubDomain, SubDomainSet, TensorFunction, + TimeFunction, VectorFunction, solve +) +from devito.ir import Expression, FindNodes, FindSymbols, Iteration, SymbolRegistry from devito.tools import timed_region diff --git a/tests/test_symbolic_coefficients.py b/tests/test_symbolic_coefficients.py index 621a23ca88..cc540691a8 100644 --- a/tests/test_symbolic_coefficients.py +++ b/tests/test_symbolic_coefficients.py @@ -1,9 +1,10 @@ import numpy as np -import sympy as sp import pytest +import sympy as sp -from devito import (Grid, Function, TimeFunction, Eq, - Dimension, solve, Operator, div, grad, laplace) +from devito import ( + Dimension, Eq, Function, Grid, Operator, TimeFunction, div, grad, laplace, solve +) from devito.finite_differences import Differentiable from devito.finite_differences.coefficients import Coefficient, Substitutions from devito.finite_differences.finite_difference import _PRECISION diff --git a/tests/test_symbolics.py b/tests/test_symbolics.py index e77adb0c26..548ce3fc3c 100644 --- a/tests/test_symbolics.py +++ b/tests/test_symbolics.py @@ -1,26 +1,30 @@ from ctypes import c_void_p -import sympy -import pytest import numpy as np - +import pytest +import sympy from sympy import And, Expr, Number, Symbol, true -from devito import (Constant, Dimension, Grid, Function, solve, TimeFunction, Eq, # noqa - Operator, SubDimension, norm, Le, Ge, Gt, Lt, Abs, sin, cos, - Min, Max, Real, Imag, Conj, SubDomain, configuration) -from devito.finite_differences.differentiable import SafeInv, Weights, Mul + +from devito import ( # noqa + Abs, Conj, Constant, Dimension, Eq, Function, Ge, Grid, Gt, Imag, Le, Lt, Max, Min, + Operator, Real, SubDimension, SubDomain, TimeFunction, configuration, cos, norm, sin, + solve +) +from devito.finite_differences.differentiable import Mul, SafeInv, Weights from devito.ir import Expression, FindNodes, ccode -from devito.ir.support.guards import GuardExpr, simplify_and, pairwise_or +from devito.ir.support.guards import GuardExpr, pairwise_or, simplify_and from devito.mpi.halo_scheme import HaloTouch -from devito.symbolics import ( - retrieve_functions, retrieve_indexed, evalrel, CallFromPointer, Cast, # noqa - DefFunction, FieldFromPointer, INT, FieldFromComposite, IntDiv, Namespace, - Rvalue, ReservedWord, ListInitializer, uxreplace, pow_to_mul, - retrieve_derivatives, BaseCast, SizeOf, VectorAccess +from devito.symbolics import ( # noqa + INT, BaseCast, CallFromPointer, Cast, DefFunction, FieldFromComposite, + FieldFromPointer, IntDiv, ListInitializer, Namespace, ReservedWord, Rvalue, SizeOf, + VectorAccess, evalrel, pow_to_mul, retrieve_derivatives, retrieve_functions, + retrieve_indexed, uxreplace +) +from devito.tools import CustomDtype, as_tuple +from devito.types import ( + Array, Bundle, ComponentAccess, FIndexed, LocalObject, Object, StencilDimension ) -from devito.tools import as_tuple, CustomDtype -from devito.types import (Array, Bundle, FIndexed, LocalObject, Object, - ComponentAccess, StencilDimension, Symbol as dSymbol) +from devito.types import Symbol as dSymbol from devito.types.basic import AbstractSymbol diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 2f3090fee4..790cab9020 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -1,12 +1,11 @@ import numpy as np -import sympy -from sympy import Rational, Matrix - import pytest +import sympy +from sympy import Matrix, Rational -from devito import VectorFunction, TensorFunction, VectorTimeFunction, TensorTimeFunction from devito import ( - Grid, Function, TimeFunction, Dimension, Eq, div, grad, curl, laplace, diag + Dimension, Eq, Function, Grid, TensorFunction, TensorTimeFunction, TimeFunction, + VectorFunction, VectorTimeFunction, curl, diag, div, grad, laplace ) from devito.symbolics import retrieve_derivatives from devito.types import NODE diff --git a/tests/test_threading.py b/tests/test_threading.py index 9183f15f58..1c753a16a9 100644 --- a/tests/test_threading.py +++ b/tests/test_threading.py @@ -1,9 +1,11 @@ from concurrent.futures import ThreadPoolExecutor -from devito import Operator, TimeFunction, Grid, Eq -from devito.logger import info -import numpy as np from threading import current_thread +import numpy as np + +from devito import Eq, Grid, Operator, TimeFunction +from devito.logger import info + def test_concurrent_executing_operators(): rng = np.random.default_rng() diff --git a/tests/test_timestepping.py b/tests/test_timestepping.py index 9a84594728..b29400043f 100644 --- a/tests/test_timestepping.py +++ b/tests/test_timestepping.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from devito import Grid, Eq, Operator, TimeFunction +from devito import Eq, Grid, Operator, TimeFunction @pytest.fixture diff --git a/tests/test_tools.py b/tests/test_tools.py index d8b17e40fa..0b06883e78 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,14 +1,15 @@ import os +import time + import numpy as np import pytest from sympy.abc import a, b, c, d, e -import time - -from devito import Operator, Eq, switchenv -from devito.tools import (UnboundedMultiTuple, ctypes_to_cstr, toposort, - filter_ordered, transitive_closure, UnboundTuple, - CacheInstances) +from devito import Eq, Operator, switchenv +from devito.tools import ( + CacheInstances, UnboundedMultiTuple, UnboundTuple, ctypes_to_cstr, filter_ordered, + toposort, transitive_closure +) from devito.types.basic import Symbol diff --git a/tests/test_tti.py b/tests/test_tti.py index 6f34f531b0..698df07f79 100644 --- a/tests/test_tti.py +++ b/tests/test_tti.py @@ -4,8 +4,8 @@ from devito import TimeFunction from devito.logger import log -from examples.seismic.model import SeismicModel from examples.seismic.acoustic import acoustic_setup +from examples.seismic.model import SeismicModel from examples.seismic.tti import tti_setup diff --git a/tests/test_unexpansion.py b/tests/test_unexpansion.py index 94278f3499..d5ef86c7c4 100644 --- a/tests/test_unexpansion.py +++ b/tests/test_unexpansion.py @@ -1,14 +1,16 @@ import numpy as np import pytest -from conftest import assert_structure, get_params, get_arrays, check_array -from devito import (Buffer, Eq, Function, TimeFunction, Grid, Operator, - Coefficient, Substitutions, cos, sin) -from devito.finite_differences import Weights +from conftest import assert_structure, check_array, get_arrays, get_params +from devito import ( + Buffer, Coefficient, Eq, Function, Grid, Operator, Substitutions, TimeFunction, cos, + sin +) from devito.arch.compiler import OneapiCompiler +from devito.finite_differences import Weights from devito.ir import Expression, FindNodes, FindSymbols -from devito.parameters import switchconfig, configuration -from devito.types import Symbol, Dimension +from devito.parameters import configuration, switchconfig +from devito.types import Dimension, Symbol class TestLoopScheduling: diff --git a/tests/test_visitors.py b/tests/test_visitors.py index 0291d75cf2..a7db2c3f13 100644 --- a/tests/test_visitors.py +++ b/tests/test_visitors.py @@ -1,14 +1,15 @@ import cgen as c -from sympy import Mod import pytest +from sympy import Mod -from devito import Grid, Eq, Function, TimeFunction, Operator, Min, sin +from devito import Eq, Function, Grid, Min, Operator, TimeFunction, sin from devito.ir.equations import DummyEq -from devito.ir.iet import (Block, Expression, Callable, FindNodes, FindSections, - FindSymbols, IsPerfectIteration, Transformer, - Conditional, printAST, Iteration, MapNodes, Call, - FindApplications) -from devito.types import SpaceDimension, Array, Symbol +from devito.ir.iet import ( + Block, Call, Callable, Conditional, Expression, FindApplications, FindNodes, + FindSections, FindSymbols, IsPerfectIteration, Iteration, MapNodes, Transformer, + printAST +) +from devito.types import Array, SpaceDimension, Symbol @pytest.fixture(scope="module") diff --git a/tests/test_warnings.py b/tests/test_warnings.py index 579420d449..b3f9e22741 100644 --- a/tests/test_warnings.py +++ b/tests/test_warnings.py @@ -1,7 +1,8 @@ -import pytest import warnings -from devito.warnings import warn, DevitoWarning +import pytest + +from devito.warnings import DevitoWarning, warn class NewWarning(UserWarning): From 8e0ccb9d0b697ec819f0ae21009f70d0c62e8de4 Mon Sep 17 00:00:00 2001 From: JDBetteridge Date: Wed, 24 Dec 2025 18:25:29 +0000 Subject: [PATCH 21/42] misc: Update Hadolint rules and clean dockerfiles --- .github/workflows/lint.yml | 8 ++++---- .pre-commit-config.yaml | 1 + docker/Dockerfile.amd | 2 +- docker/Dockerfile.cpu | 2 +- docker/Dockerfile.devito | 12 ++++++------ docker/Dockerfile.intel | 4 +++- docker/Dockerfile.nvidia | 20 ++++++++++++-------- 7 files changed, 28 insertions(+), 21 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f9d87b5738..ac81a989d4 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -47,7 +47,7 @@ jobs: name: "Spellcheck everything" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 with: @@ -69,7 +69,7 @@ jobs: # #example-error-annotation-on-github-actions runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 - name: Check workflow files uses: docker://rhysd/actionlint:latest with: @@ -81,9 +81,9 @@ jobs: container: image: hadolint/hadolint:latest-alpine env: - HADOLINT_IGNORE: "DL3005,DL3007,DL3008,DL3015,DL3059" + HADOLINT_IGNORE: "DL3003,DL3004,DL3005,DL3007,DL3008,DL3009,DL3013,DL3015,DL3042,DL3059,SC2103,SC2046,SC2086" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 - name: Lint dockerfiles inside hadolint container run: | for DOCKERFILE in docker/Dockerfile.*; \ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58af58f97b..284d1fcaa4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,3 +33,4 @@ repos: rev: v2.12.0 hooks: - id: hadolint-docker + entry: -e HADOLINT_IGNORE=DL3003,DL3004,DL3005,DL3007,DL3008,DL3009,DL3013,DL3015,DL3042,DL3059,SC2103,SC2046,SC2086 ghcr.io/hadolint/hadolint hadolint diff --git a/docker/Dockerfile.amd b/docker/Dockerfile.amd index 67b65151f2..983fbecced 100644 --- a/docker/Dockerfile.amd +++ b/docker/Dockerfile.amd @@ -28,7 +28,7 @@ ENV PATH=$ROCM_HOME/bin:$PATH \ LD_LIBRARY_PATH=$ROCM_HOME/lib:$ROCM_HOME/lib/llvm/lib:$LD_LIBRARY_PATH # Until rocm base has it fixed -RUN ln -s /opt/rocm/llvm/bin/offload-arch /opt/rocm/bin/offload-arch | echo "offload-arch already exis" +RUN ln -s /opt/rocm/llvm/bin/offload-arch /opt/rocm/bin/offload-arch || echo "offload-arch already exists" # Install UCX RUN cd /tmp/ \ diff --git a/docker/Dockerfile.cpu b/docker/Dockerfile.cpu index 7e27266a09..8a5d5ad0a1 100644 --- a/docker/Dockerfile.cpu +++ b/docker/Dockerfile.cpu @@ -54,7 +54,7 @@ ENV PATH=${PATH}:/opt/openmpi/bin ENV LD_LIBRARY_PATH=/opt/openmpi/lib # Cleanup -RUN apt-get clean && apt-get autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* +RUN apt-get clean && apt-get autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* EXPOSE 8888 CMD ["/bin/bash"] diff --git a/docker/Dockerfile.devito b/docker/Dockerfile.devito index ac580ef95a..672daf0a9b 100644 --- a/docker/Dockerfile.devito +++ b/docker/Dockerfile.devito @@ -21,7 +21,7 @@ RUN python3 -m venv /venv && \ ln -fs /app/nvtop/build/src/nvtop /venv/bin/nvtop # Copy Devito -ADD . /app/devito +COPY . /app/devito # Remove git files RUN rm -rf /app/devito/.git @@ -30,7 +30,7 @@ RUN rm -rf /app/devito/.git RUN eval "$MPI4PY_FLAGS /venv/bin/pip install --no-cache-dir --verbose -r /app/devito/requirements-mpi.txt" # Devito -RUN /venv/bin/pip install --no-cache-dir -e /app/devito[extras,tests] && rm -rf ~/.cache/pip +RUN /venv/bin/pip install --no-cache-dir -e "/app/devito[extras,tests]" && rm -rf ~/.cache/pip FROM $base AS utilities @@ -80,10 +80,10 @@ RUN groupadd -g ${GROUP_ID} app && \ COPY --from=builder --chown=app:app /app /app COPY --from=utilities --chown=app:app /app/nvtop /app/nvtop -ADD --chown=app:app docker/run-jupyter.sh /jupyter -ADD --chown=app:app docker/run-tests.sh /tests -ADD --chown=app:app docker/run-print-defaults.sh /print-defaults -ADD --chown=app:app docker/entrypoint.sh /docker-entrypoint.sh +COPY --chown=app:app docker/run-jupyter.sh /jupyter +COPY --chown=app:app docker/run-tests.sh /tests +COPY --chown=app:app docker/run-print-defaults.sh /print-defaults +COPY --chown=app:app docker/entrypoint.sh /docker-entrypoint.sh RUN chmod +x /print-defaults /jupyter /tests /docker-entrypoint.sh # Venv diff --git a/docker/Dockerfile.intel b/docker/Dockerfile.intel index e013fccc03..a84f20fed7 100644 --- a/docker/Dockerfile.intel +++ b/docker/Dockerfile.intel @@ -26,7 +26,8 @@ FROM base AS oneapi # Download the key to system keyring # https://www.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top/installation/install-using-package-managers/apt.html#apt -RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg +SHELL /bin/bash -o pipefail +RUN wget --progress=dot:giga -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg RUN echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" > /etc/apt/sources.list.d/oneAPI.list # Intel advisor and drivers @@ -36,6 +37,7 @@ RUN apt-get update -y && \ # Drivers mandatory for intel gpu # https://dgpu-docs.intel.com/driver/installation.html#ubuntu-install-steps +SHELL /bin/bash -o pipefail RUN wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor > /usr/share/keyrings/intel-graphics.gpg RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy unified" > /etc/apt/sources.list.d/intel-gpu-jammy.list diff --git a/docker/Dockerfile.nvidia b/docker/Dockerfile.nvidia index 66e4cba5e7..00a86ca96a 100644 --- a/docker/Dockerfile.nvidia +++ b/docker/Dockerfile.nvidia @@ -15,10 +15,11 @@ ENV DEBIAN_FRONTEND=noninteractive # Install python RUN apt-get update && \ - apt-get install -y -q gpg apt-utils curl wget libnuma-dev cmake git \ + apt-get install -y -q gpg apt-utils curl libnuma-dev cmake git \ dh-autoreconf python3-venv python3-dev python3-pip # nodesource: nvdashboard requires nodejs>=10 +SHELL /bin/bash -o pipefail RUN curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | gpg --yes --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg RUN arch="$(uname -m)" && \ case "$arch" in \ @@ -27,9 +28,9 @@ RUN arch="$(uname -m)" && \ *) echo "Unsupported architecture: $arch" >&2; exit 1 ;; \ esac && \ echo "deb [trusted=yes, signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/${nvplat} /" | tee /etc/apt/sources.list.d/nvhpc.list -RUN apt-key update *&& apt-get update -y +RUN apt-key update -- * && apt-get update -y -# Install nvhpc. `nvhpc` is the alias for the latest avaialble version +# Install nvhpc. `nvhpc` is the alias for the latest available version ARG ver=nvhpc # We use the standard apt-get for the default latest nvhpc. For earlier version, apt has a bug that it will always # install the latest nvhpc-x-y no matter which version nvhpc-x-z is requested which would double (extra 10Gb) the size of the image. @@ -43,10 +44,12 @@ RUN arch="$(uname -m)" && \ if [ "$ver" = "nvhpc" ]; then \ apt-get install -y -q --allow-unauthenticated ${ver}; \ else \ - export year=$(echo $ver | cut -d "-" -f 2) && \ - export minor=$(echo $ver | cut -d "-" -f 3) && \ - wget -O nvhpc.deb "https://developer.download.nvidia.com/hpc-sdk/ubuntu/${nvplat}/nvhpc_${year}.${minor}_${nvplat}.deb" \ - || wget -O nvhpc.deb "https://developer.download.nvidia.com/hpc-sdk/ubuntu/${nvplat}/nvhpc_${year}.${minor}-0_${nvplat}.deb" && \ + year=$(echo $ver | cut -d "-" -f 2) && \ + export year && \ + minor=$(echo $ver | cut -d "-" -f 3) && \ + export minor && \ + curl -O nvhpc.deb -L "https://developer.download.nvidia.com/hpc-sdk/ubuntu/${nvplat}/nvhpc_${year}.${minor}_${nvplat}.deb" \ + || curl -O nvhpc.deb -L "https://developer.download.nvidia.com/hpc-sdk/ubuntu/${nvplat}/nvhpc_${year}.${minor}-0_${nvplat}.deb" && \ apt-get install --allow-unauthenticated -y -q ./nvhpc.deb; \ fi; @@ -89,6 +92,7 @@ ENV UCX_TLS=cuda,cuda_copy,cuda_ipc,sm,shm,self #ENV UCX_TLS=cuda,cuda_copy,cuda_ipc,sm,shm,self,rc_x,gdr_copy # Make simlink for path setup since ENV doesn't accept shell commands. +SHELL /bin/bash -o pipefail RUN arch="$(uname -m)" && \ case "$arch" in \ x86_64) linux=Linux_x86_64 ;; \ @@ -157,7 +161,7 @@ CMD ["/bin/bash"] FROM sdk-base AS nvc # Make devito env vars file and extras -ADD docker/nvdashboard.json /app/nvdashboard.json +COPY docker/nvdashboard.json /app/nvdashboard.json # mpi4py ENV MPI4PY_FLAGS='source $HPCSDK_HOME/comm_libs/hpcx/latest/hpcx-init.sh && hpcx_load && CC=nvc CFLAGS="-noswitcherror -tp=px"' From e2740eb75069f3df19f0a45ab40bf9330c62f43e Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 2 Jan 2026 13:04:53 +0000 Subject: [PATCH 22/42] misc: Lint action files --- .github/actionlint.yaml | 5 +++++ .github/workflows/asv.yml | 6 +++--- .github/workflows/docker-bases.yml | 16 ++++++++-------- .github/workflows/docker-devito.yml | 8 ++++---- .github/workflows/examples-mpi.yml | 2 +- .github/workflows/examples.yml | 2 +- .github/workflows/pytest-core-nompi.yml | 10 +++++----- .github/workflows/pytest-gpu.yml | 12 ++++++------ .github/workflows/tutorials.yml | 6 +++--- 9 files changed, 36 insertions(+), 31 deletions(-) create mode 100644 .github/actionlint.yaml diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 0000000000..0c71349fbe --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,5 @@ +self-hosted-runner: + labels: + - asv + - amdgpu + - nvidiagpu diff --git a/.github/workflows/asv.yml b/.github/workflows/asv.yml index 3ed67c1675..dde1ae91d4 100644 --- a/.github/workflows/asv.yml +++ b/.github/workflows/asv.yml @@ -36,12 +36,12 @@ jobs: - name: Set VIRTUAL_ENV run: | - echo "VIRTUAL_ENV=$ENVHOME/asv" >> $GITHUB_ENV - echo "PATH=$VIRTUAL_ENV/bin:$PATH" >> $GITHUB_ENV + echo "VIRTUAL_ENV=$ENVHOME/asv" >> "$GITHUB_ENV" + echo "PATH=$VIRTUAL_ENV/bin:$PATH" >> "$GITHUB_ENV" - name: Set PATH run: | - echo "PATH=$VIRTUAL_ENV/bin:$PATH" >> $GITHUB_ENV + echo "PATH=$VIRTUAL_ENV/bin:$PATH" >> "$GITHUB_ENV" - name: Install dependencies run: | diff --git a/.github/workflows/docker-bases.yml b/.github/workflows/docker-bases.yml index 30fdc4d8c2..48ada2ba8f 100644 --- a/.github/workflows/docker-bases.yml +++ b/.github/workflows/docker-bases.yml @@ -114,10 +114,10 @@ jobs: TAG_BASE: ${{ format('cpu-gcc{0}', matrix.gcc) }} run: | docker buildx imagetools create \ - --tag devitocodes/bases:${TAG_BASE} \ - devitocodes/bases:${TAG_BASE}-amd64 \ - devitocodes/bases:${TAG_BASE}-arm64 - docker buildx imagetools inspect devitocodes/bases:${TAG_BASE} + --tag "devitocodes/bases:${TAG_BASE}" \ + "devitocodes/bases:${TAG_BASE}-amd64" \ + "devitocodes/bases:${TAG_BASE}-arm64" + docker buildx imagetools inspect "devitocodes/bases:${TAG_BASE}" ####################################################### ############## Intel OneApi CPU ####################### @@ -296,10 +296,10 @@ jobs: FINAL_TAG: ${{ matrix.final_tag }} run: | docker buildx imagetools create \ - --tag devitocodes/bases:${FINAL_TAG} \ - devitocodes/bases:${FINAL_TAG}-amd64 \ - devitocodes/bases:${FINAL_TAG}-arm64 - docker buildx imagetools inspect devitocodes/bases:${FINAL_TAG} + --tag "devitocodes/bases:${FINAL_TAG}" \ + "devitocodes/bases:${FINAL_TAG}-amd64" \ + "devitocodes/bases:${FINAL_TAG}-arm64" + docker buildx imagetools inspect "devitocodes/bases:${FINAL_TAG}" ####################################################### ##################### AMD ############################# diff --git a/.github/workflows/docker-devito.yml b/.github/workflows/docker-devito.yml index 87db1bc77a..2d3211953d 100644 --- a/.github/workflows/docker-devito.yml +++ b/.github/workflows/docker-devito.yml @@ -113,7 +113,7 @@ jobs: - name: Set per‑runner variables run: | - echo "CONTAINER_NAME=testrun-${{ matrix.tag }}-${RUNNER_NAME// /_}" >> $GITHUB_ENV + echo "CONTAINER_NAME=testrun-${{ matrix.tag }}-${RUNNER_NAME// /_}" >> "$GITHUB_ENV" - name: Check event name run: echo ${{ github.event_name }} @@ -246,8 +246,8 @@ jobs: refs="$refs devitocodes/devito:${tag}-${arch}" done echo "Creating manifest for devitocodes/devito:${tag} using:${refs}" - docker buildx imagetools create --tag devitocodes/devito:${tag} $refs - docker buildx imagetools inspect devitocodes/devito:${tag} + docker buildx imagetools create --tag "devitocodes/devito:${tag}" "$refs" + docker buildx imagetools inspect "devitocodes/devito:${tag}" done test-devito: @@ -286,7 +286,7 @@ jobs: - name: Set per‑runner variables run: | - echo "CONTAINER_NAME=testrun-${{ matrix.tag }}-${RUNNER_NAME// /_}" >> $GITHUB_ENV + echo "CONTAINER_NAME=testrun-${{ matrix.tag }}-${RUNNER_NAME// /_}" >> "$GITHUB_ENV" - name: Run tests against multi-arch image run: | diff --git a/.github/workflows/examples-mpi.yml b/.github/workflows/examples-mpi.yml index 12f69caec7..034a3f3663 100644 --- a/.github/workflows/examples-mpi.yml +++ b/.github/workflows/examples-mpi.yml @@ -63,7 +63,7 @@ jobs: - name: Install dependencies run: | pip install --upgrade pip - pip install -e .[extras,mpi,tests] + pip install -e ".[extras,mpi,tests]" python3 scripts/clear_devito_cache.py - name: Test mpi notebooks diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 9859896247..07e4c54475 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -101,4 +101,4 @@ jobs: uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} - name: ${{ matrix.name }} + name: Examples diff --git a/.github/workflows/pytest-core-nompi.yml b/.github/workflows/pytest-core-nompi.yml index 085e0e0471..19c2379b9f 100644 --- a/.github/workflows/pytest-core-nompi.yml +++ b/.github/workflows/pytest-core-nompi.yml @@ -153,9 +153,9 @@ jobs: - name: Set run prefix run: | if [[ "${{ matrix.name }}" =~ "docker" ]]; then - echo "RUN_CMD=docker run --init -t --rm -e CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }} --name testrun devito_img" >> $GITHUB_ENV + echo "RUN_CMD=docker run --init -t --rm -e CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }} --name testrun devito_img" >> "$GITHUB_ENV" else - echo "RUN_CMD=" >> $GITHUB_ENV + echo "RUN_CMD=" >> "$GITHUB_ENV" fi id: set-run @@ -168,7 +168,7 @@ jobs: run : | if [ "${{ runner.os }}" == 'macOS' ]; then brew install llvm libomp - echo "/opt/homebrew/opt/llvm/bin" >> $GITHUB_PATH + echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" fi id: set-tests @@ -178,14 +178,14 @@ jobs: major=${ver%%.*} minor=${ver#*.}; minor=${minor%%.*} if [ "$major" -eq 3 ] && [ "$minor" -ge 12 ]; then - echo "PIPFLAGS='--break-system-packages'" >> $GITHUB_ENV + echo "PIPFLAGS='--break-system-packages'" >> "$GITHUB_ENV" fi - name: Install dependencies if: "!contains(matrix.name, 'docker')" run: | python3 -m pip install ${{ env.PIPFLAGS }} --upgrade pip - python3 -m pip install ${{ env.PIPFLAGS }} -e .[tests,extras] + python3 -m pip install ${{ env.PIPFLAGS }} -e ".[tests,extras]" python3 -m pip install ${{ env.PIPFLAGS }} sympy==${{matrix.sympy}} - name: Check Docker image Python version diff --git a/.github/workflows/pytest-gpu.yml b/.github/workflows/pytest-gpu.yml index 4b0dc72f9f..5725bca980 100644 --- a/.github/workflows/pytest-gpu.yml +++ b/.github/workflows/pytest-gpu.yml @@ -79,8 +79,8 @@ jobs: - name: Set per-runner tags run: | - echo "DOCKER_IMAGE=${{ matrix.name }}-${RUNNER_NAME// /_}" >> $GITHUB_ENV - echo "CONTAINER_BASENAME=testrun-${{ matrix.name }}-${RUNNER_NAME// /_}-${{ github.sha }}" >> $GITHUB_ENV + echo "DOCKER_IMAGE=${{ matrix.name }}-${RUNNER_NAME// /_}" >> "$GITHUB_ENV" + echo "CONTAINER_BASENAME=testrun-${{ matrix.name }}-${RUNNER_NAME// /_}-${{ github.sha }}" >> "$GITHUB_ENV" - name: Ensure buildx builder run: | @@ -93,7 +93,7 @@ jobs: docker buildx build . \ --builder "${RUNNER_NAME// /_}" \ --load \ - --label ci-run=$GITHUB_RUN_ID \ + --label ci-run="$GITHUB_RUN_ID" \ --rm --pull \ --file docker/Dockerfile.devito \ --tag "${DOCKER_IMAGE}" \ @@ -107,7 +107,7 @@ jobs: # Make sure CUDA_VISIBLE_DEVICES is at least *something* on NVIDIA # runners; fall back to "all" so the driver probe does not fail. if [[ "${{ matrix.runner_label }}" == "nvidiagpu" && -z "${CUDA_VISIBLE_DEVICES:-}" ]]; then - echo "CUDA_VISIBLE_DEVICES=all" >> $GITHUB_ENV + echo "CUDA_VISIBLE_DEVICES=all" >> "$GITHUB_ENV" fi # Run a simple driver-probe command (nvidia-smi / rocm-smi) @@ -124,7 +124,7 @@ jobs: # Run the test suite using the matrix-defined flags docker run ${{ matrix.flags }} \ - ${ci_env} \ + "${ci_env}" \ -e CI=true \ -e PYTHONFAULTHANDLER=1 \ -e DEVITO_LOGGING=DEBUG \ @@ -152,7 +152,7 @@ jobs: docker rmi -f "${DOCKER_IMAGE}" || true # Classic image layers created in this job - docker image prune -f --filter label=ci-run=$GITHUB_RUN_ID + docker image prune -f --filter label=ci-run="$GITHUB_RUN_ID" # BuildKit cache: target the per-runner builder explicitly docker builder prune --builder "${RUNNER_NAME// /_}" \ diff --git a/.github/workflows/tutorials.yml b/.github/workflows/tutorials.yml index 2e80a456f1..6dbc6010c8 100644 --- a/.github/workflows/tutorials.yml +++ b/.github/workflows/tutorials.yml @@ -78,9 +78,9 @@ jobs: - name: Set run prefix run: | if [ "${{ matrix.name }}" == 'tutos-docker-gcc-py310' ]; then - echo "RUN_CMD=docker run --init -t --rm --name testrun devito_img" >> $GITHUB_ENV + echo "RUN_CMD=docker run --init -t --rm --name testrun devito_img" >> "$GITHUB_ENV" else - echo "RUN_CMD=" >> $GITHUB_ENV + echo "RUN_CMD=" >> "$GITHUB_ENV" fi id: set-run @@ -88,7 +88,7 @@ jobs: if: matrix.name != 'tutos-docker-gcc-py310' run: | python -m pip install --upgrade pip - pip install -e .[tests,extras] + pip install -e ".[tests,extras]" pip install blosc - name: Check Docker image Python version From 143c4936928a485acde9d0eaf7ff53b9be7832da Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 2 Jan 2026 13:57:59 +0000 Subject: [PATCH 23/42] misc: Fix typos in Devito --- FAQ.md | 2 +- benchmarks/user/advisor/README.md | 2 +- conftest.py | 8 +++---- devito/arch/compiler.py | 8 +++---- devito/builtins/initializers.py | 1 + devito/builtins/utils.py | 2 +- devito/checkpointing/checkpoint.py | 4 ++-- devito/core/autotuning.py | 2 +- devito/core/operator.py | 6 ++--- devito/data/data.py | 2 +- devito/data/utils.py | 2 +- devito/finite_differences/derivative.py | 2 +- devito/finite_differences/differentiable.py | 2 +- devito/finite_differences/tools.py | 20 ++++++++-------- devito/ir/clusters/algorithms.py | 4 ++-- devito/ir/clusters/cluster.py | 2 +- devito/ir/equations/algorithms.py | 2 +- devito/ir/stree/tree.py | 6 ++--- devito/ir/support/basic.py | 8 +++---- devito/mpi/halo_scheme.py | 6 ++--- devito/operator/profiling.py | 2 +- devito/parameters.py | 2 +- devito/passes/clusters/aliases.py | 6 ++--- devito/passes/clusters/blocking.py | 4 ++-- devito/passes/clusters/misc.py | 2 +- devito/passes/iet/asynchrony.py | 2 +- devito/passes/iet/dtypes.py | 2 +- devito/passes/iet/engine.py | 4 ++-- devito/passes/iet/langbase.py | 26 ++++++++++----------- devito/passes/iet/languages/C.py | 2 +- devito/passes/iet/languages/openacc.py | 6 ++--- devito/passes/iet/linearization.py | 2 +- devito/passes/iet/misc.py | 2 +- devito/passes/iet/mpi.py | 4 ++-- devito/passes/iet/parpragma.py | 16 ++++++------- devito/symbolics/inspection.py | 2 +- devito/symbolics/manipulation.py | 2 +- devito/tools/utils.py | 2 +- devito/types/basic.py | 6 ++--- devito/types/caching.py | 14 +++++------ devito/types/dense.py | 4 ++-- devito/types/dimension.py | 4 ++-- devito/types/misc.py | 2 +- devito/types/sparse.py | 18 +++++++------- docker/entrypoint.sh | 2 +- pyproject.toml | 18 ++++++++++++++ 46 files changed, 133 insertions(+), 114 deletions(-) diff --git a/FAQ.md b/FAQ.md index 1cc79c3932..2d020d87a1 100644 --- a/FAQ.md +++ b/FAQ.md @@ -64,7 +64,7 @@ Focus on modeling and math — Devito handles the performance. ## What Devito is not -As is hopefully implied by the preceeding section, Devito is *not* a seismic modelling or imaging framework, nor is it a set of pre-baked solvers. Whilst Devito is often associated with seismic imaging, this is just one application domain. Our RTM and FWI examples use Devito under the hood, but the engine itself is physics-agnostic. There’s no built-in assumption about waves, seismics, or domains — you provide the physics, and Devito delivers efficient, architecture-aware implementations. +As is hopefully implied by the preceding section, Devito is *not* a seismic modelling or imaging framework, nor is it a set of pre-baked solvers. Whilst Devito is often associated with seismic imaging, this is just one application domain. Our RTM and FWI examples use Devito under the hood, but the engine itself is physics-agnostic. There’s no built-in assumption about waves, seismics, or domains — you provide the physics, and Devito delivers efficient, architecture-aware implementations. Furthermore, the examples provided with Devito are often conflated with the core DSL and compiler. However, the purpose of this collection of tutorials, code samples, and helper functions is fourfold: diff --git a/benchmarks/user/advisor/README.md b/benchmarks/user/advisor/README.md index 35f713c25a..bf25baed31 100644 --- a/benchmarks/user/advisor/README.md +++ b/benchmarks/user/advisor/README.md @@ -7,7 +7,7 @@ We recommend going through tutorial [02_advisor_roofline.ipynb](https://github.c * Support is guaranteed only for Intel oneAPI 2025; earlier versions may not work. You may download Intel oneAPI [here](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html?packages=oneapi-toolkit&oneapi-toolkit-os=linux&oneapi-lin=apt). -* Add Advisor (advixe-cl) and compilers (icx) in the path. The right env variables should be sourced along the lines of (depending on your isntallation folder): +* Add Advisor (advixe-cl) and compilers (icx) in the path. The right env variables should be sourced along the lines of (depending on your installation folder): ```sh source /opt/intel/oneapi/advisor/latest/env/vars.sh source /opt/intel/oneapi/compiler/latest/env/vars.sh diff --git a/conftest.py b/conftest.py index 3dc51e2eb0..195f3b96ba 100644 --- a/conftest.py +++ b/conftest.py @@ -370,15 +370,15 @@ def assert_structure(operator, exp_trees=None, exp_iters=None): if exp_trees is not None: trees = retrieve_iteration_tree(operator) exp_trees = [i.replace(',', '') for i in exp_trees] # 't,x,y' -> 'txy' - tree_struc = (["".join(mapper.get(i.dim.name, i.dim.name) for i in j) + tree_struct = (["".join(mapper.get(i.dim.name, i.dim.name) for i in j) for j in trees]) # Flatten every tree's dims as a string - assert tree_struc == exp_trees + assert tree_struct == exp_trees if exp_iters is not None: iters = FindNodes(Iteration).visit(operator) exp_iters = exp_iters.replace(',', '') # 't,x,y' -> 'txy' - iter_struc = "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) - assert iter_struc == exp_iters + iter_struct = "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) + assert iter_struct == exp_iters def assert_blocking(operator, exp_nests): diff --git a/devito/arch/compiler.py b/devito/arch/compiler.py index 130f0f5a0c..edfeec17db 100644 --- a/devito/arch/compiler.py +++ b/devito/arch/compiler.py @@ -1027,14 +1027,14 @@ def __init_finalize__(self, **kwargs): self._base.__init_finalize__(self, **kwargs) # Update cflags try: - extrac = environ.get('CFLAGS').split(' ') - self.cflags = self.cflags + extrac + extra_c = environ.get('CFLAGS').split(' ') + self.cflags = self.cflags + extra_c except AttributeError: pass # Update ldflags try: - extrald = environ.get('LDFLAGS').split(' ') - self.ldflags = self.ldflags + extrald + extra_ld = environ.get('LDFLAGS').split(' ') + self.ldflags = self.ldflags + extra_ld except AttributeError: pass diff --git a/devito/builtins/initializers.py b/devito/builtins/initializers.py index db571c4683..eb44c0360e 100644 --- a/devito/builtins/initializers.py +++ b/devito/builtins/initializers.py @@ -348,6 +348,7 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', [2, 3, 3, 3, 3, 2], [2, 2, 2, 2, 2, 2]], dtype=int32) """ + # TODO: fix the horrendous use of pluralisation in this function !!! if isinstance(function, (list, tuple)): if not isinstance(data, (list, tuple)): raise TypeError("Expected a list of `data`") diff --git a/devito/builtins/utils.py b/devito/builtins/utils.py index ce6b61d957..e271b86a64 100644 --- a/devito/builtins/utils.py +++ b/devito/builtins/utils.py @@ -109,7 +109,7 @@ def pad_outhalo(function): function._data_with_outhalo._local[tuple(slices)] \ = function._data_with_outhalo._local[tuple(slices_d)] if h.left == 0 and h.right == 0: - # Need to access it so that that worker is not blocking exectution since + # Need to access it so that that worker is not blocking execution since # _data_with_outhalo requires communication function._data_with_outhalo._local[0] = function._data_with_outhalo._local[0] diff --git a/devito/checkpointing/checkpoint.py b/devito/checkpointing/checkpoint.py index a3c5b8bcb5..8223ede722 100644 --- a/devito/checkpointing/checkpoint.py +++ b/devito/checkpointing/checkpoint.py @@ -51,7 +51,7 @@ class DevitoCheckpoint(Checkpoint): pyRevolve. Holds a list of symbol objects that hold data. """ def __init__(self, objects): - """Intialise a checkpoint object. Upon initialisation, a checkpoint + """Initialise a checkpoint object. Upon initialisation, a checkpoint stores only a reference to the objects that are passed into it.""" assert(all(isinstance(o, TimeFunction) for o in objects)) dtypes = set([o.dtype for o in objects]) @@ -92,7 +92,7 @@ def get_symbol_data(symbol, timestep): # Use `._data`, instead of `.data`, as `.data` is a view of the DOMAIN # data region which is non-contiguous in memory. The performance hit from # dealing with non-contiguous memory is so big (introduces >1 copy), it's - # better to checkpoint unneccesarry stuff to get a contiguous chunk of memory. + # better to checkpoint unnecessary stuff to get a contiguous chunk of memory. ptr = symbol._data[timestep - i, :, :] ptrs.append(ptr) return ptrs diff --git a/devito/core/autotuning.py b/devito/core/autotuning.py index ccef3a9e72..76812edf2e 100644 --- a/devito/core/autotuning.py +++ b/devito/core/autotuning.py @@ -141,7 +141,7 @@ def autotune(operator, args, level, mode): update_time_bounds(stepper, at_args, timesteps, mode) timer.reset() - # The best variant is the one that for a given number of threads had the minium + # The best variant is the one that for a given number of threads had the minimum # turnaround time try: runs = 0 diff --git a/devito/core/operator.py b/devito/core/operator.py index 10df2eed31..1bf7ed41bc 100644 --- a/devito/core/operator.py +++ b/devito/core/operator.py @@ -73,7 +73,7 @@ class BasicOperator(Operator): CIRE_MINMEM = True """ Minimize memory consumption when allocating temporaries for CIRE-optimized - expressions. This may come at the cost of slighly worse performance due to + expressions. This may come at the cost of slightly worse performance due to the potential need for extra registers to hold a greater number of support variables (e.g., strides). """ @@ -91,7 +91,7 @@ class BasicOperator(Operator): PAR_COLLAPSE_WORK = 100 """ - Use a collapse clause if the trip count of the collapsable loops is statically + Use a collapse clause if the trip count of the collapsible loops is statically known to exceed this threshold. """ @@ -172,7 +172,7 @@ class BasicOperator(Operator): @classmethod def _normalize_kwargs(cls, **kwargs): - # Will be populated with dummy values; this method is actually overriden + # Will be populated with dummy values; this method is actually overridden # by the subclasses o = {} oo = kwargs['options'] diff --git a/devito/data/data.py b/devito/data/data.py index 02660ca3f5..f9280305f6 100644 --- a/devito/data/data.py +++ b/devito/data/data.py @@ -337,7 +337,7 @@ def __setitem__(self, glb_idx, val, comm_type): elif np.isscalar(val): if index_is_basic(loc_idx): # Won't go through `__getitem__` as it's basic indexing mode, - # so we should just propage `loc_idx` + # so we should just propagate `loc_idx` super().__setitem__(loc_idx, val) else: super().__setitem__(glb_idx, val) diff --git a/devito/data/utils.py b/devito/data/utils.py index c624fcd7d6..ab8db341e6 100644 --- a/devito/data/utils.py +++ b/devito/data/utils.py @@ -172,7 +172,7 @@ def mpi_index_maps(loc_idx, shape, topology, coords, comm): that data is stored. send: An array of shape ``shape`` where each index signifies the rank to which - data beloning to that index should be sent. + data belonging to that index should be sent. global_si: An array of ``shape`` shape where each index contains the global index to which that index should be sent. diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index e3a03e3718..8d572e2a69 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -530,7 +530,7 @@ def _eval_at(self, func): # derivative at x0. return self._rebuild(self.expr._gather_for_diff, **rkw) else: - # For every other cases, that has more functions or more complexe arithmetic, + # For every other cases, that has more functions or more complex arithmetic, # there is not actual way to decide what to do so it’s as safe to use # the expression as is. return self._rebuild(self.expr, **rkw) diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index c20e32e3e8..f0d2b77d1f 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -39,7 +39,7 @@ class Differentiable(sympy.Expr, Evaluable): """ - A Differentiable is an algebric expression involving Functions, which can + A Differentiable is an algebraic expression involving Functions, which can be derived w.r.t. one or more Dimensions. """ diff --git a/devito/finite_differences/tools.py b/devito/finite_differences/tools.py index cbf3f4a752..91c8b43c85 100644 --- a/devito/finite_differences/tools.py +++ b/devito/finite_differences/tools.py @@ -108,8 +108,8 @@ def diff_f(expr, deriv_order, dims, fd_order, side=None, **kwargs): deriv = partial(diff_f, deriv_order=d_orders, dims=fd_dims, fd_order=fd_orders) name_fd = deriv_name(fd_dims, d_orders) dname = (d.root.name for d in fd_dims) - desciption = 'derivative of order %s w.r.t dimension %s' % (d_orders, dname) - derivatives[name_fd] = (deriv, desciption) + description = 'derivative of order %s w.r.t dimension %s' % (d_orders, dname) + derivatives[name_fd] = (deriv, description) # Add non-conventional, non-centered first-order FDs for d, o in zip(dims, orders): @@ -117,18 +117,18 @@ def diff_f(expr, deriv_order, dims, fd_order, side=None, **kwargs): # Add centered first derivatives deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=centered) name_fd = 'd%sc' % name - desciption = 'centered derivative staggered w.r.t dimension %s' % d.name - derivatives[name_fd] = (deriv, desciption) + description = 'centered derivative staggered w.r.t dimension %s' % d.name + derivatives[name_fd] = (deriv, description) # Left deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=left) name_fd = 'd%sl' % name - desciption = 'left first order derivative w.r.t dimension %s' % d.name - derivatives[name_fd] = (deriv, desciption) + description = 'left first order derivative w.r.t dimension %s' % d.name + derivatives[name_fd] = (deriv, description) # Right deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=right) name_fd = 'd%sr' % name - desciption = 'right first order derivative w.r.t dimension %s' % d.name - derivatives[name_fd] = (deriv, desciption) + description = 'right first order derivative w.r.t dimension %s' % d.name + derivatives[name_fd] = (deriv, description) # Add RSFD for first order derivatives for d, o in zip(dims, orders): @@ -136,8 +136,8 @@ def diff_f(expr, deriv_order, dims, fd_order, side=None, **kwargs): name = d.root.name deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, method='RSFD') name_fd = 'd%s45' % name - desciption = 'Derivative w.r.t %s with rotated 45 degree FD' % d.name - derivatives[name_fd] = (deriv, desciption) + description = 'Derivative w.r.t %s with rotated 45 degree FD' % d.name + derivatives[name_fd] = (deriv, description) return derivatives diff --git a/devito/ir/clusters/algorithms.py b/devito/ir/clusters/algorithms.py index 52c868ed12..c6c76567aa 100644 --- a/devito/ir/clusters/algorithms.py +++ b/devito/ir/clusters/algorithms.py @@ -323,7 +323,7 @@ def callback(self, clusters, prefix): for size, v in mapper.items(): for si, iafs in list(v.items()): # Offsets are sorted so that the semantic order (t0, t1, t2) follows - # SymPy's index ordering (t, t-1, t+1) afer modulo replacement so + # SymPy's index ordering (t, t-1, t+1) after modulo replacement so # that associativity errors are consistent. This corresponds to # sorting offsets {-1, 0, 1} as {0, -1, 1} assigning -inf to 0 key = lambda i: -np.inf if i - si == 0 else (i - si) @@ -666,7 +666,7 @@ def _normalize_reductions_dense(cluster, mapper, sregistry, platform): # Populate the Array (the "map" part) processed.append(e.func(a.indexify(), rhs, operation=None)) - # Set all untouched entried to the identity value if necessary + # Set all untouched entries to the identity value if necessary if e.conditionals: nc = {d: sympy.Not(v) for d, v in e.conditionals.items()} v = identity_mapper[e.lhs.dtype][e.operation] diff --git a/devito/ir/clusters/cluster.py b/devito/ir/clusters/cluster.py index 8c5f08835d..a0283865a4 100644 --- a/devito/ir/clusters/cluster.py +++ b/devito/ir/clusters/cluster.py @@ -429,7 +429,7 @@ def ops(self): @cached_property def traffic(self): """ - The Cluster compulsary traffic (number of reads/writes), as a mapper + The Cluster compulsory traffic (number of reads/writes), as a mapper from Functions to IntervalGroups. Notes diff --git a/devito/ir/equations/algorithms.py b/devito/ir/equations/algorithms.py index 813828e27a..00228c7cbb 100644 --- a/devito/ir/equations/algorithms.py +++ b/devito/ir/equations/algorithms.py @@ -71,7 +71,7 @@ def handle_indexed(indexed): # ----------------------------------------------- # 1) Note that (d.parent, d) is what we want, while (d, d.parent) would be # wrong; for example, in `((t, time), (t, x, y), (x, y))`, `x` could now - # preceed `time`, while `t`, and therefore `time`, *must* appear before `x`, + # precede `time`, while `t`, and therefore `time`, *must* appear before `x`, # as indicated by the second relation implicit_relations = {(d.parent, d) for d in extra if d.is_Derived and not d.indirect} diff --git a/devito/ir/stree/tree.py b/devito/ir/stree/tree.py index 961afedc65..6e4b48ec3e 100644 --- a/devito/ir/stree/tree.py +++ b/devito/ir/stree/tree.py @@ -126,10 +126,10 @@ def __init__(self, exprs, ispace, dspace, ops, traffic, parent=None): @property def __repr_render__(self): - ths = 2 + threshold = 2 n = len(self.exprs) - ret = ",".join("Eq" for i in range(min(n, ths))) - ret = ("%s,..." % ret) if n > ths else ret + ret = ",".join("Eq" for i in range(min(n, threshold))) + ret = ("%s,..." % ret) if n > threshold else ret return "[%s]" % ret diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index a304d50b01..d3f7d00247 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -594,7 +594,7 @@ def is_irregular(self): @cached_property def is_lex_positive(self): """ - True if the source preceeds the sink, False otherwise. + True if the source precedes the sink, False otherwise. """ return self.source.timestamp < self.sink.timestamp @@ -613,7 +613,7 @@ def is_lex_ne(self): @cached_property def is_lex_negative(self): """ - True if the sink preceeds the source, False otherwise. + True if the sink precedes the source, False otherwise. """ return self.source.timestamp > self.sink.timestamp @@ -948,7 +948,7 @@ def reads_implicit_gen(self): @memoized_generator def reads_synchro_gen(self): """ - Generate all reads due to syncronization operations. These may be explicit + Generate all reads due to synchronization operations. These may be explicit or implicit. """ # Objects altering the control flow (e.g., synchronization barriers, @@ -977,7 +977,7 @@ def reads_gen(self): """ Generate all read accesses. """ - # NOTE: The reason to keep the explicit and implict reads separated + # NOTE: The reason to keep the explicit and implicit reads separated # is efficiency. Sometimes we wish to extract all reads to a given # AbstractFunction, and we know that by construction these can't # appear among the implicit reads diff --git a/devito/mpi/halo_scheme.py b/devito/mpi/halo_scheme.py index a3b803d4f5..4bc3c44b51 100644 --- a/devito/mpi/halo_scheme.py +++ b/devito/mpi/halo_scheme.py @@ -549,11 +549,11 @@ def classify(exprs, ispace): v[(d, LEFT)] = IDENTITY v[(d, RIGHT)] = IDENTITY elif i.affine(d): - thl, thr = i.touched_halo(d) + th_left, th_right = i.touched_halo(d) # Note: if the left-HALO is touched (i.e., `thl = True`), then # the *right-HALO* is to be sent over in a halo exchange - v[(d, LEFT)] = (thr and STENCIL) or IDENTITY - v[(d, RIGHT)] = (thl and STENCIL) or IDENTITY + v[(d, LEFT)] = (th_right and STENCIL) or IDENTITY + v[(d, RIGHT)] = (th_left and STENCIL) or IDENTITY else: v[(d, LEFT)] = STENCIL v[(d, RIGHT)] = STENCIL diff --git a/devito/operator/profiling.py b/devito/operator/profiling.py index 9355b6efe6..b00c5cf04b 100644 --- a/devito/operator/profiling.py +++ b/devito/operator/profiling.py @@ -249,7 +249,7 @@ def _evaluate_section(self, name, data, args, dtype): traffic = np.nan - # Nmber of FLOPs performed at each iteration + # Number of FLOPs performed at each iteration sops = data.sops # Runtime itermaps/itershapes diff --git a/devito/parameters.py b/devito/parameters.py index 300ff3c5f7..f545139649 100644 --- a/devito/parameters.py +++ b/devito/parameters.py @@ -64,7 +64,7 @@ def wrapper(self, key, value=None): def _preprocess(self, key, value): """ - Execute the preprocesser associated to ``key``, if any. This will + Execute the preprocessor associated to ``key``, if any. This will return a new value. """ if key in self._preprocess_functions: diff --git a/devito/passes/clusters/aliases.py b/devito/passes/clusters/aliases.py index f6ad4c9777..8c33913326 100644 --- a/devito/passes/clusters/aliases.py +++ b/devito/passes/clusters/aliases.py @@ -178,7 +178,7 @@ def _generate(self, cgroup, exclude): Generate one or more extractions from a ClusterGroup. An extraction is a set of CIRE candidates which may be turned into aliases. Two different extractions may contain overlapping sub-expressions and, therefore, - should be processed and evaluated indipendently. An extraction won't + should be processed and evaluated independently. An extraction won't contain any of the symbols appearing in ``exclude``. """ raise NotImplementedError @@ -676,7 +676,7 @@ def lower_aliases(aliases, meta, maxpar): except KeyError: if i.dim in a.free_symbols: # Special case: the Dimension appears within the alias but - # not as an Indexed index. Then, it needs to be addeed to + # not as an Indexed index. Then, it needs to be added to # the `writeto` region too interval = i else: @@ -863,7 +863,7 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, name = sregistry.make_name() # Infer the dtype for the pivot # This prevents cases such as `floor(a*b)` with `a` and `b` floats - # that would creat a temporary `int r = b` leading to erronous + # that would creat a temporary `int r = b` leading to erroneous # numerical results if writeto: diff --git a/devito/passes/clusters/blocking.py b/devito/passes/clusters/blocking.py index 08ae0dc6fc..b279f5779c 100644 --- a/devito/passes/clusters/blocking.py +++ b/devito/passes/clusters/blocking.py @@ -100,9 +100,9 @@ def _process_fatd(self, clusters, level, prefix=None): return super()._process_fatd(clusters, level, prefix) def _has_data_reuse(self, cluster): - # A sufficient condition for the existance of data reuse in `cluster` + # A sufficient condition for the existence of data reuse in `cluster` # is that the same Function is accessed twice at the same memory location, - # which translates into the existance of any Relation accross Indexeds + # which translates into the existence of any Relation across Indexeds if any(r.function.is_AbstractFunction for r in cluster.scope.r_gen()): return True if search(cluster.exprs, IndexSum): diff --git a/devito/passes/clusters/misc.py b/devito/passes/clusters/misc.py index b8ec71d3c2..d2b4a2c9f2 100644 --- a/devito/passes/clusters/misc.py +++ b/devito/passes/clusters/misc.py @@ -451,7 +451,7 @@ def callback(self, clusters, prefix): if test0 or guards: # Heuristic: no gain from fissioning if unable to ultimately - # increase the number of collapsable iteration spaces, hence give up + # increase the number of collapsible iteration spaces, hence give up processed.extend(group) else: stamp = Stamp() diff --git a/devito/passes/iet/asynchrony.py b/devito/passes/iet/asynchrony.py index 2ac8d8581d..9d0c3387c8 100644 --- a/devito/passes/iet/asynchrony.py +++ b/devito/passes/iet/asynchrony.py @@ -57,7 +57,7 @@ def lower_async_objs(iet, **kwargs): @singledispatch def _lower_async_objs(iet, tracker=None, sregistry=None, **kwargs): - # All Callables, except for AsyncCallables, may containg one or more + # All Callables, except for AsyncCallables, may containing one or more # AsyncCalls, which we have to lower into thread-activation code efuncs = [] subs = {} diff --git a/devito/passes/iet/dtypes.py b/devito/passes/iet/dtypes.py index 28d30df13b..e545e7f1a8 100644 --- a/devito/passes/iet/dtypes.py +++ b/devito/passes/iet/dtypes.py @@ -26,7 +26,7 @@ def _complex_includes(iet: Callable, langbb: type[LangBB], compiler: Compiler, if langbb.get('complex-namespace') is not None: metadata['namespaces'] = langbb['complex-namespace'] - # Some languges such as c++11 need some extra arithmetic definitions + # Some languages such as c++11 need some extra arithmetic definitions if langbb.get('def-complex'): dest = compiler.get_jit_dir() hfile = dest.joinpath('complex_arith.h') diff --git a/devito/passes/iet/engine.py b/devito/passes/iet/engine.py index 3fa8f3249f..d119fbb664 100644 --- a/devito/passes/iet/engine.py +++ b/devito/passes/iet/engine.py @@ -461,7 +461,7 @@ def reuse_efuncs(root, efuncs, sregistry=None): key = afunc._signature() try: - # If we manage to succesfully map `efunc` to a previously abstracted + # If we manage to successfully map `efunc` to a previously abstracted # `afunc`, we need to update the call sites to use the new Call name afunc, mapped = mapper[key] mapped.append(efunc) @@ -744,7 +744,7 @@ def _filter(v, efunc=None): for a in new_params: if a in processed: - # A child efunc trying to add a symbol alredy added by a + # A child efunc trying to add a symbol already added by a # sibling efunc continue diff --git a/devito/passes/iet/langbase.py b/devito/passes/iet/langbase.py index 808cdba3f0..da28115d91 100644 --- a/devito/passes/iet/langbase.py +++ b/devito/passes/iet/langbase.py @@ -245,7 +245,7 @@ def __init__(self, key, sregistry, options, platform, compiler): * 'par-collapse-ncores': use a collapse clause if the number of available physical cores is greater than this threshold. * 'par-collapse-work': use a collapse clause if the trip count of the - collapsable Iterations is statically known to exceed this threshold. + collapsible Iterations is statically known to exceed this threshold. * 'par-chunk-nonaffine': coefficient to adjust the chunk size in non-affine parallel Iterations. * 'par-dynamic-work': use dynamic scheduling if the operation count per @@ -289,20 +289,20 @@ def nthreads_nonaffine(self): def threadid(self): return self.sregistry.threadid - def _score_candidate(self, n0, root, collapsable=()): + def _score_candidate(self, n0, root, collapsible=()): """ - The score of a collapsable nest depends on the number of fully-parallel + The score of a collapsible nest depends on the number of fully-parallel Iterations and their position in the nest (the outer, the better). """ - nest = [root] + list(collapsable) + nest = [root] + list(collapsible) n = len(nest) - # Number of fully-parallel collapsable Iterations + # Number of fully-parallel collapsible Iterations key = lambda i: i.is_ParallelNoAtomic fp_iters = list(takewhile(key, nest)) n_fp_iters = len(fp_iters) - # Number of parallel-if-atomic collapsable Iterations + # Number of parallel-if-atomic collapsible Iterations key = lambda i: i.is_ParallelAtomic pia_iters = list(takewhile(key, nest)) n_pia_iters = len(pia_iters) @@ -341,13 +341,13 @@ def _select_candidates(self, candidates): # Score `root` in isolation mapper[(root, ())] = self._score_candidate(n0, root) - collapsable = [] + collapsible = [] for n, i in enumerate(candidates[n0+1:], n0+1): # The Iteration nest [root, ..., i] must be perfect if not IsPerfectIteration(depth=i).visit(root): break - # Loops are collapsable only if none of the iteration variables + # Loops are collapsible only if none of the iteration variables # appear in initializer expressions. For example, the following # two loops cannot be collapsed # @@ -373,16 +373,16 @@ def _select_candidates(self, candidates): except TypeError: pass - collapsable.append(i) + collapsible.append(i) - # Score `root + collapsable` - v = tuple(collapsable) + # Score `root + collapsible` + v = tuple(collapsible) mapper[(root, v)] = self._score_candidate(n0, root, v) # Retrieve the candidates with highest score - root, collapsable = max(mapper, key=mapper.get) + root, collapsible = max(mapper, key=mapper.get) - return root, list(collapsable) + return root, list(collapsible) class DeviceAwareMixin: diff --git a/devito/passes/iet/languages/C.py b/devito/passes/iet/languages/C.py index 0e04d0feeb..ddd61b325f 100644 --- a/devito/passes/iet/languages/C.py +++ b/devito/passes/iet/languages/C.py @@ -101,5 +101,5 @@ def _print_ComplexPart(self, expr): f'({self._print(expr.args[0])})') def _print_Conj(self, expr): - # In C, conj is not preceeded by the func_prefix + # In C, conj is not preceded by the func_prefix return (f'conj{self.func_literal(expr)}({self._print(expr.args[0])})') diff --git a/devito/passes/iet/languages/openacc.py b/devito/passes/iet/languages/openacc.py index c04a81fccd..cb924845de 100644 --- a/devito/passes/iet/languages/openacc.py +++ b/devito/passes/iet/languages/openacc.py @@ -170,11 +170,11 @@ class DeviceAccizer(PragmaDeviceAwareTransformer): def _make_partree(self, candidates, nthreads=None): assert candidates - root, collapsable = self._select_candidates(candidates) - ncollapsable = len(collapsable) + 1 + root, collapsible = self._select_candidates(candidates) + ncollapsable = len(collapsible) + 1 if self._is_offloadable(root) and \ - all(i.is_Affine for i in [root] + collapsable) and \ + all(i.is_Affine for i in [root] + collapsible) and \ self.par_tile: tile = self.par_tile.nextitem() assert isinstance(tile, UnboundTuple) diff --git a/devito/passes/iet/linearization.py b/devito/passes/iet/linearization.py index c75b2334ce..0d371ccb8e 100644 --- a/devito/passes/iet/linearization.py +++ b/devito/passes/iet/linearization.py @@ -396,7 +396,7 @@ def linearize_transfers(iet, sregistry=None, **kwargs): except TypeError: start, size = imask[0], 1 - if start != 0: # Spare the ugly generated code if unneccesary (occurs often) + if start != 0: # Spare the ugly generated code if unnecessary (occurs often) name = sregistry.make_name(prefix='%s_ofs' % n.function.name) wildcard = Wildcard(name=name, dtype=np.int32, is_const=True) diff --git a/devito/passes/iet/misc.py b/devito/passes/iet/misc.py index 3b397f350e..dbfcbd8394 100644 --- a/devito/passes/iet/misc.py +++ b/devito/passes/iet/misc.py @@ -258,7 +258,7 @@ def _(expr, langbb, printer): @iet_pass def minimize_symbols(iet): """ - Remove unneccesary symbols. Currently applied sub-passes: + Remove unnecessary symbols. Currently applied sub-passes: * Remove redundant ModuloDimensions (e.g., due to using the `save=Buffer(2)` API) diff --git a/devito/passes/iet/mpi.py b/devito/passes/iet/mpi.py index 6830723314..9dcad26008 100644 --- a/devito/passes/iet/mpi.py +++ b/devito/passes/iet/mpi.py @@ -286,13 +286,13 @@ def _mark_overlappable(iet): scope = Scope(i.expr for i in expressions) # Comp/comm overlaps is legal only if the OWNED regions can grow - # arbitrarly, which means all of the dependences must be carried + # arbitrarily, which means all of the dependencies must be carried # along a non-halo Dimension for dep in scope.d_all_gen(): if dep.function in hs.functions: cause = dep.cause & hs.dimensions if any(dep.distance_mapper[d] is S.Infinity for d in cause): - # E.g., dependences across PARALLEL iterations + # E.g., dependencies across PARALLEL iterations # for x # for y # ... = ... f[x, y-1] ... diff --git a/devito/passes/iet/parpragma.py b/devito/passes/iet/parpragma.py index 670d85a9a4..f03b8a3305 100644 --- a/devito/passes/iet/parpragma.py +++ b/devito/passes/iet/parpragma.py @@ -267,9 +267,9 @@ def _make_threaded_prodders(self, partree): def _make_partree(self, candidates, nthreads=None): assert candidates - # Get the collapsable Iterations - root, collapsable = self._select_candidates(candidates) - ncollapsed = 1 + len(collapsable) + # Get the collapsible Iterations + root, collapsible = self._select_candidates(candidates) + ncollapsed = 1 + len(collapsible) # Prepare to build a ParallelTree if all(i.is_Affine for i in candidates): @@ -303,7 +303,7 @@ def _make_partree(self, candidates, nthreads=None): body = self.HostIteration(ncollapsed=ncollapsed, chunk_size=chunk_size, **root.args) - niters = prod([root.symbolic_size] + [j.symbolic_size for j in collapsable]) + niters = prod([root.symbolic_size] + [j.symbolic_size for j in collapsible]) value = INT(Max(INT(niters / (nthreads*self.chunk_nonaffine)), 1)) prefix = [Expression(DummyEq(chunk_size, value, dtype=np.int32))] @@ -403,7 +403,7 @@ def _make_parallel(self, iet, sync_mapper=None): continue # Ignore if already part of an asynchronous region of code - # (e.g., an Iteartion embedded within a SyncSpot defining an + # (e.g., an Iteration embedded within a SyncSpot defining an # asynchronous operation) if any(n in sync_mapper for n in candidates): continue @@ -508,7 +508,7 @@ def _score_candidate(self, n0, root, collapsable=()): # ensure the outermost loop is offloaded ndptrs = len(self._device_pointers(root)) - return (ndptrs,) + super()._score_candidate(n0, root, collapsable) + return (ndptrs,) + super()._score_candidate(n0, root, collapsible) def _make_threaded_prodders(self, partree): if isinstance(partree.root, self.DeviceIteration): @@ -531,11 +531,11 @@ def _make_partree(self, candidates, nthreads=None, index=None): """ assert candidates - root, collapsable = self._select_candidates(candidates) + root, collapsible = self._select_candidates(candidates) if self._is_offloadable(root): body = self.DeviceIteration(gpu_fit=self.gpu_fit, - ncollapsed=len(collapsable)+1, + ncollapsed=len(collapsible)+1, tile=self.par_tile.nextitem(), **root.args) partree = ParallelTree([], body, nthreads=nthreads) diff --git a/devito/symbolics/inspection.py b/devito/symbolics/inspection.py index 1a4a8babca..d9902e8364 100644 --- a/devito/symbolics/inspection.py +++ b/devito/symbolics/inspection.py @@ -144,7 +144,7 @@ def wrapper(expr, estimate, seen): @singledispatch def _estimate_cost(expr, estimate, seen): # Retval: flops (int), flag (bool) - # The flag tells wether it's an integer expression (implying flops==0) or not + # The flag tells whether it's an integer expression (implying flops==0) or not if not expr.args: return 0, False flops, flags = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) diff --git a/devito/symbolics/manipulation.py b/devito/symbolics/manipulation.py index 23dbf2b764..6574b9f096 100644 --- a/devito/symbolics/manipulation.py +++ b/devito/symbolics/manipulation.py @@ -487,7 +487,7 @@ def evalrel(func=min, input=None, assumptions=None): # Apply assumptions to fill a subs mapper # e.g. When looking for 'max' and Gt(a, b), mapper is filled with {b: a} so that `b` - # is subsituted by `a` + # is substituted by `a` mapper = {} for a in processed: if set(a.args).issubset(input): diff --git a/devito/tools/utils.py b/devito/tools/utils.py index 8b8cc2a72e..54c8a760cb 100644 --- a/devito/tools/utils.py +++ b/devito/tools/utils.py @@ -135,7 +135,7 @@ def f(): def grouper(iterable, n): - """Split an interable into groups of size n, plus a reminder""" + """Split an iterable into groups of size n, plus a reminder""" args = [iter(iterable)] * n return ([e for e in t if e is not None] for t in zip_longest(*args)) diff --git a/devito/types/basic.py b/devito/types/basic.py index a80adcf20e..7f5702a7d2 100644 --- a/devito/types/basic.py +++ b/devito/types/basic.py @@ -643,7 +643,7 @@ class AbstractFunction(sympy.Function, Basic, Pickable, Evaluable): """ Base class for tensor symbols, cached by both SymPy and Devito. It inherits - from and mimicks the behaviour of a sympy.Function. + from and mimics the behaviour of a sympy.Function. The hierarchy is structured as follows @@ -1431,10 +1431,10 @@ class AbstractTensor(sympy.ImmutableDenseMatrix, Basic, Pickable, Evaluable): """ Base class for vector and tensor valued functions. It inherits from and - mimicks the behavior of a sympy.ImmutableDenseMatrix. + mimics the behavior of a sympy.ImmutableDenseMatrix. - The sub-hierachy is as follows + The sub-hierarchy is as follows AbstractTensor | diff --git a/devito/types/caching.py b/devito/types/caching.py index 3c7814021e..742c0b3d33 100644 --- a/devito/types/caching.py +++ b/devito/types/caching.py @@ -144,17 +144,17 @@ class CacheManager: data is lost (and thus memory is freed). """ - gc_ths = 3*10**8 + gc_threshold = 3*10**8 """ The `clear` function will trigger garbage collection if at least one weak - reference points to an unreachable object whose size in bytes is greated - than the `gc_ths` value. Garbage collection is an expensive operation, so + reference points to an unreachable object whose size in bytes is greater + than the `gc_threshold` value. Garbage collection is an expensive operation, so we do it judiciously. """ - force_ths = 100 + force_threshold = 100 """ - After `force_ths` *consecutive* calls ``clear(force=False)``, the flag + After `force_threshold` *consecutive* calls ``clear(force=False)``, the flag ``force`` is ignored, and thus ``clear(force=True)`` is executed. `` """ @@ -183,11 +183,11 @@ def clear(cls, force=True): if force: gc.collect() else: - if cls.ncalls_w_force_false + 1 == cls.force_ths: + if cls.ncalls_w_force_false + 1 == cls.force_threshold: # Case 1: too long since we called gc.collect, let's do it now gc.collect() cls.ncalls_w_force_false = 0 - elif any(i.nbytes > cls.gc_ths for i in cache_copied.values()): + elif any(i.nbytes > cls.gc_threshold for i in cache_copied.values()): # Case 2: we got big objects in cache, we try to reclaim memory gc.collect() cls.ncalls_w_force_false = 0 diff --git a/devito/types/dense.py b/devito/types/dense.py index ffabf59d4f..8527cb089b 100644 --- a/devito/types/dense.py +++ b/devito/types/dense.py @@ -94,7 +94,7 @@ def __init_finalize__(self, *args, function=None, **kwargs): self._data = function._data elif isinstance(self._allocator, DataReference): # Don't want to reinitialise array if DataReference used as allocator; - # create a no-op intialiser to avoid overwriting the original array. + # create a no-op initialiser to avoid overwriting the original array. self._initializer = lambda x: None elif initializer is None or callable(initializer) or self.alias: # Initialization postponed until the first access to .data @@ -1056,7 +1056,7 @@ def __init_finalize__(self, *args, **kwargs): self._fd = self.__fd_setup__() else: # E.g., `self is f(x + i0, y)` and `self.function is f(x, y)` - # Dynamically genereating derivative shortcuts is expensive; we + # Dynamically generating derivative shortcuts is expensive; we # can clearly avoid that here though! self._fd = self.function._fd diff --git a/devito/types/dimension.py b/devito/types/dimension.py index 0261b33245..3482f8da2c 100644 --- a/devito/types/dimension.py +++ b/devito/types/dimension.py @@ -151,7 +151,7 @@ def __new__(cls, *args, **kwargs): def class_key(cls): """ Overrides sympy.Symbol.class_key such that Dimensions always - preceed other symbols when printed (e.g. x + h_x, not h_x + x). + precede other symbols when printed (e.g. x + h_x, not h_x + x). """ a, b, c = super().class_key() return a, b - 1, c @@ -317,7 +317,7 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): except (AttributeError, TypeError): pass - # Some `args` may still be DerivedDimenions' defaults. These, in turn, + # Some `args` may still be DerivedDimensions' defaults. These, in turn, # may represent sets of legal values. If that's the case, here we just # pick one. Note that we sort for determinism try: diff --git a/devito/types/misc.py b/devito/types/misc.py index 286e6b4886..0067d8b318 100644 --- a/devito/types/misc.py +++ b/devito/types/misc.py @@ -345,7 +345,7 @@ class CriticalRegion(sympy.Function, Fence): * Equations within a critical sequence cannot be moved outside of the opening and closing CriticalRegions. * However, internal rearrangements are possible - * An asynchronous operation initiated within the critial sequence must + * An asynchronous operation initiated within the critical sequence must terminate before re-entering the opening CriticalRegion. """ diff --git a/devito/types/sparse.py b/devito/types/sparse.py index c79ef05654..cea1f65664 100644 --- a/devito/types/sparse.py +++ b/devito/types/sparse.py @@ -218,7 +218,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): else: dtype = dtype or self.dtype - # Wether to initialize the subfunction with the provided data + # Whether to initialize the subfunction with the provided data # Useful when rebuilding with a placeholder array only used to # infer shape and dtype and set the actual data later if kwargs.get('init_subfunc', True): @@ -408,7 +408,7 @@ def guard(self, expr=None): """ conditions = {} - # Positon map and temporaries for it + # Position map and temporaries for it pmap = self._position_map # Temporaries for the position @@ -586,10 +586,10 @@ def _dist_subfunc_scatter(self, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFuncion values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv sfuncd = subfunc.data._local[mask[self._sparse_position]] - # Send out the sparse point SubFuncion + # Send out the sparse point SubFunction _, scount, sdisp, rshape, rcount, rdisp = \ self._dist_subfunc_alltoall(subfunc, dmap=dmap) scattered = np.empty(shape=rshape, dtype=subfunc.dtype) @@ -597,7 +597,7 @@ def _dist_subfunc_scatter(self, subfunc): [scattered, rcount, rdisp, self._smpitype[subfunc]]) sfuncd = scattered - # Translate global SubFuncion values into local SubFuncion values + # Translate global SubFunction values into local SubFunction values if self.dist_origin[subfunc] is not None: sfuncd = sfuncd - np.array(self.dist_origin[subfunc], dtype=subfunc.dtype) return {subfunc: sfuncd} @@ -642,11 +642,11 @@ def _dist_subfunc_gather(self, sfuncd, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFuncion values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv if self.dist_origin[subfunc] is not None: sfuncd = sfuncd + np.array(self.dist_origin[subfunc], dtype=subfunc.dtype) - # Send out the sparse point SubFuncion values + # Send out the sparse point SubFunction values sshape, scount, sdisp, _, rcount, rdisp = \ self._dist_subfunc_alltoall(subfunc, dmap=dmap) gathered = np.empty(shape=sshape, dtype=subfunc.dtype) @@ -1353,7 +1353,7 @@ class MatrixSparseTimeFunction(AbstractSparseTimeFunction): r: int or Mapping[Dimension, Optional[int]] The number of gridpoints in each Dimension used to inject/interpolate - each physical point. e.g. bi-/tri-linear interplation would use 2 coefficients + each physical point. e.g. bi-/tri-linear interpolation would use 2 coefficients in each Dimension. The Mapping version of this parameter allows a different number of grid points @@ -1561,7 +1561,7 @@ def __init_finalize__(self, *args, **kwargs): ) # This loop maintains a map of nnz indices which touch each - # coordinate of the parallised injection Dimension + # coordinate of the parallelised injection Dimension # This takes the form of a list of nnz indices, and a start/end # position in that list for each index in the parallel dim self.par_dim_to_nnz_dim = DynamicDimension('par_dim_to_nnz_%s' % self.name) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 1a70446c89..37b9bf6578 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -9,7 +9,7 @@ if [[ "$DEVITO_PLATFORM" = "nvidiaX" ]]; then fi if [[ "$DEVITO_ARCH" = "icx" || "$DEVITO_ARCH" = "icc" ]]; then - echo "Initializing oneapi environement" + echo "Initializing oneapi environment" source /opt/intel/oneapi/setvars.sh intel64 fi diff --git a/pyproject.toml b/pyproject.toml index 9732a44a56..67064d47cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,6 +102,24 @@ default.extend-ignore-words-re = ["\\b[[:alpha:]]{1,2}\\d?\\b", "\\b\\w{20,}\\b" arange = "arange" # dorder - derivative order dorder = "dorder" +# simpify - function in Devito for creating Sympy expressions +simpify = "simpify" + +[tool.typos.default.extend-words] +# datas - plural of data? +# optionss - plural of options? +# Horrendous use in builtins/initializers (TODO: FIX) +datas = "datas" +optionss = "optionss" +# HPE - Well known company name +HPE = "HPE" +# Sur - OS name: MacOS Big Sur +Sur = "Sur" +# pointss - plural of points? +# Horrendous use in operator/profiling (TODO: FIX) +pointss = "pointss" +# numer - common abbrevisation of numerator +numer = "numer" [tool.flake8] max-line-length = 90 From 94be5e78666e5b0969c440d6bfb4533a0921da5c Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 2 Jan 2026 14:11:43 +0000 Subject: [PATCH 24/42] misc: Fix typos in examples --- examples/cfd/02_convection_nonlinear.ipynb | 2 +- examples/cfd/07_cavity_flow.ipynb | 2 +- examples/cfd/08_shallow_water_equation.ipynb | 6 +++--- examples/cfd/09_Darcy_flow_equation.ipynb | 2 +- examples/cfd/tools.py | 2 +- examples/checkpointing/checkpoint.py | 2 +- examples/compiler/01_data_regions.ipynb | 2 +- examples/finance/bs_ivbp.ipynb | 2 +- examples/mpi/overview.ipynb | 2 +- examples/performance/00_overview.ipynb | 4 ++-- examples/seismic/abc_methods/01_introduction.ipynb | 14 +++++++------- examples/seismic/abc_methods/02_damping.ipynb | 8 ++++---- examples/seismic/abc_methods/04_habc.ipynb | 14 +++++++------- examples/seismic/abc_methods/README.md | 4 ++-- examples/seismic/acoustic/accuracy.ipynb | 2 +- examples/seismic/model.py | 10 +++++----- examples/seismic/self_adjoint/README.md | 2 +- .../self_adjoint/sa_01_iso_implementation1.ipynb | 6 +++--- .../self_adjoint/sa_02_iso_implementation2.ipynb | 14 +++++++------- .../self_adjoint/sa_03_iso_correctness.ipynb | 12 ++++++------ .../seismic/self_adjoint/test_wavesolver_iso.py | 12 ++++++------ examples/seismic/tutorials/02_rtm.ipynb | 2 +- examples/seismic/tutorials/03_fwi.ipynb | 2 +- examples/seismic/tutorials/04_dask.ipynb | 10 +++++----- examples/seismic/tutorials/04_dask_pickling.ipynb | 4 ++-- .../tutorials/06_elastic_varying_parameters.ipynb | 6 +++--- .../tutorials/07.1_dispersion_relation.ipynb | 6 +++--- examples/seismic/tutorials/07_DRP_schemes.ipynb | 10 +++++----- examples/seismic/tutorials/09_viscoelastic.ipynb | 2 +- examples/seismic/tutorials/10_nmo_correction.ipynb | 4 ++-- examples/seismic/tutorials/11_viscoacoustic.ipynb | 2 +- examples/seismic/tutorials/12_time_blocking.ipynb | 14 +++++++------- examples/seismic/tutorials/13_LSRTM_acoustic.ipynb | 2 +- .../seismic/tutorials/14_creating_synthetics.ipynb | 14 +++++++------- examples/seismic/tutorials/16_ader_fd.ipynb | 2 +- examples/seismic/viscoacoustic/operators.py | 4 ++-- examples/timestepping/acoustic_superstep.py | 2 +- examples/userapi/02_apply.ipynb | 2 +- examples/userapi/03_subdomains.ipynb | 2 +- examples/userapi/05_conditional_dimension.ipynb | 2 +- examples/userapi/06_sparse_operations.ipynb | 2 +- examples/userapi/07_functions_on_subdomains.ipynb | 2 +- pyproject.toml | 4 ++++ 43 files changed, 114 insertions(+), 110 deletions(-) diff --git a/examples/cfd/02_convection_nonlinear.ipynb b/examples/cfd/02_convection_nonlinear.ipynb index bd3559d268..55631f61af 100644 --- a/examples/cfd/02_convection_nonlinear.ipynb +++ b/examples/cfd/02_convection_nonlinear.ipynb @@ -137,7 +137,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Excellent, we again get a wave that resembles the one from the oiginal examples.\n", + "Excellent, we again get a wave that resembles the one from the original examples.\n", "\n", "Now we can set up our coupled problem in Devito. Let's start by creating two initial state variables $u$ and $v$, as before, and initialising them with our \"hat function." ] diff --git a/examples/cfd/07_cavity_flow.ipynb b/examples/cfd/07_cavity_flow.ipynb index de5b61acaa..f2c8506082 100644 --- a/examples/cfd/07_cavity_flow.ipynb +++ b/examples/cfd/07_cavity_flow.ipynb @@ -464,7 +464,7 @@ "u = TimeFunction(name='u', grid=grid, space_order=2)\n", "v = TimeFunction(name='v', grid=grid, space_order=2)\n", "p = TimeFunction(name='p', grid=grid, space_order=2)\n", - "#Variables are automatically initalized at 0.\n", + "#Variables are automatically initialized at 0.\n", "\n", "# First order derivatives will be handled with p.dxc\n", "eq_u =Eq(u.dt + u*u.dx + v*u.dy, -1./rho * p.dxc + nu*(u.laplace), subdomain=grid.interior)\n", diff --git a/examples/cfd/08_shallow_water_equation.ipynb b/examples/cfd/08_shallow_water_equation.ipynb index 60610c2cd6..343b1638af 100644 --- a/examples/cfd/08_shallow_water_equation.ipynb +++ b/examples/cfd/08_shallow_water_equation.ipynb @@ -27,7 +27,7 @@ "\n", "For a given bathymetry model, which can include a complex seafloor topography, we want to model the amplitudes, speed and interaction of waves at the seasurface. At a given point $(x,\\; y)$, the thickness of the water column between the seafloor and undisturbed water surface is defined by the variable $h$, while the wave amplitude is $\\eta$ and therefore the whole thickness of the water column $D = h + \\eta$.\n", "\n", - "Using appropriate boundary conditions at the water surface/seafloor, assuming that the horizontal wavelength of the modelled waves are much larger than the water depth and integrating the conservation of mass and momentum equations over the water column, we can derive the following equations to decribe wave propagation \n", + "Using appropriate boundary conditions at the water surface/seafloor, assuming that the horizontal wavelength of the modelled waves are much larger than the water depth and integrating the conservation of mass and momentum equations over the water column, we can derive the following equations to describe wave propagation \n", "\n", "\\begin{equation}\n", "\\begin{split}\n", @@ -212,7 +212,7 @@ "\\end{split}\\notag\n", "\\end{equation}\n", "\n", - "In order to avoid the occurence of high frequency artifacts, when waves are interacting with the boundaries, boundary conditions will be avoided. Normally Dirichlet conditions are used for the M and N discharge fluxes, and Neumann for the wave height field. Those lead to significant boundary reflections which might be not realistic for a given problem.\n", + "In order to avoid the occurrence of high frequency artifacts, when waves are interacting with the boundaries, boundary conditions will be avoided. Normally Dirichlet conditions are used for the M and N discharge fluxes, and Neumann for the wave height field. Those lead to significant boundary reflections which might be not realistic for a given problem.\n", "\n", "Let's assume the gravity $g = 9.81$ and the Manning's roughness coefficient $\\alpha = 0.025$ for the all the remaining examples." ] @@ -8076,7 +8076,7 @@ "source": [ "## Example VI: 2D circular dam break problem\n", "\n", - "As a final modelling example, let's take a look at an (academic) engineering problem: a tsunami induced by the collapse of a circular dam in a lake with a constant bathymetry of 30 m. We only need to set the wave height in a circle with radius $r_0 = 5\\; m$ to $\\eta_0 = 0.5 \\; m$ and to zero everywhere else. To avoid the occurence of high frequency artifacts in the wavefield, known as numerical grid dispersion, we apply a Gaussian filter to the initial wave height. To achieve a symmetric dam collapse, the initial discharge fluxes $M_0,N_0$ are set to equal values." + "As a final modelling example, let's take a look at an (academic) engineering problem: a tsunami induced by the collapse of a circular dam in a lake with a constant bathymetry of 30 m. We only need to set the wave height in a circle with radius $r_0 = 5\\; m$ to $\\eta_0 = 0.5 \\; m$ and to zero everywhere else. To avoid the occurrence of high frequency artifacts in the wavefield, known as numerical grid dispersion, we apply a Gaussian filter to the initial wave height. To achieve a symmetric dam collapse, the initial discharge fluxes $M_0,N_0$ are set to equal values." ] }, { diff --git a/examples/cfd/09_Darcy_flow_equation.ipynb b/examples/cfd/09_Darcy_flow_equation.ipynb index a4efc75a1f..482cda7096 100644 --- a/examples/cfd/09_Darcy_flow_equation.ipynb +++ b/examples/cfd/09_Darcy_flow_equation.ipynb @@ -400,7 +400,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Call operator for the 15,000th psuedo-timestep\n", + "# Call operator for the 15,000th pseudo-timestep\n", "output1 = darcy_flow_2d(w1, f)\n", "output2 = darcy_flow_2d(w2, f)\n", "output3 = darcy_flow_2d(w3, f)" diff --git a/examples/cfd/tools.py b/examples/cfd/tools.py index ee23177cac..5221176581 100644 --- a/examples/cfd/tools.py +++ b/examples/cfd/tools.py @@ -19,7 +19,7 @@ def plot_field(field, xmin=0., xmax=2., ymin=0., ymax=2., zmin=None, zmax=None, ymax : int, optional Length of the y-axis. view: int, optional - View point to intialise. + View point to initialise. """ if xmin > xmax or ymin > ymax: raise ValueError("Dimension min cannot be larger than dimension max.") diff --git a/examples/checkpointing/checkpoint.py b/examples/checkpointing/checkpoint.py index 72a1da1b2a..5ed8f10905 100644 --- a/examples/checkpointing/checkpoint.py +++ b/examples/checkpointing/checkpoint.py @@ -2,6 +2,6 @@ warning("""The location of Devito's checkpointing has changed. This location will be deprecated soon. Please change your imports to 'from devito import - DevitoCheckpoint, CheckpointOperato'""") + DevitoCheckpoint, CheckpointOperator'""") from devito.checkpointing import * # noqa diff --git a/examples/compiler/01_data_regions.ipynb b/examples/compiler/01_data_regions.ipynb index a4c0f426cb..b6392845cb 100644 --- a/examples/compiler/01_data_regions.ipynb +++ b/examples/compiler/01_data_regions.ipynb @@ -263,7 +263,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "One can also pass a 3-tuple `(o, lp, rp)` instead of a single integer representing the discretization order. Here, `o` is the discretization order, while `lp` and `rp` indicate how many points are expected on left and right sides of a point of interest, respectivelly." + "One can also pass a 3-tuple `(o, lp, rp)` instead of a single integer representing the discretization order. Here, `o` is the discretization order, while `lp` and `rp` indicate how many points are expected on left and right sides of a point of interest, respectively." ] }, { diff --git a/examples/finance/bs_ivbp.ipynb b/examples/finance/bs_ivbp.ipynb index 3b1970ed92..4c46d48e14 100644 --- a/examples/finance/bs_ivbp.ipynb +++ b/examples/finance/bs_ivbp.ipynb @@ -81,7 +81,7 @@ "# smin = 50.0\n", "# smax = 150.0\n", "\n", - "# Amount of padding to proccess left/right (hidden from graphs)\n", + "# Amount of padding to process left/right (hidden from graphs)\n", "padding = 10\n", "smin -= padding\n", "smax += padding\n", diff --git a/examples/mpi/overview.ipynb b/examples/mpi/overview.ipynb index cda7844dc0..bb7e010779 100644 --- a/examples/mpi/overview.ipynb +++ b/examples/mpi/overview.ipynb @@ -627,7 +627,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This is again a global data view. The shown *with_halo* is the \"true\" halo surrounding the physical domain (often referred to as the \"outer halo\"), **not** the halo used for the MPI halo exchanges (often referred to as \"ghost region\" or \"inner halo\"). A user can straightforwardly initialize the \"true\" halo region (which is typically read by a stencil `Eq` when an `Operator` iterates in proximity of the domain bounday).\n", + "This is again a global data view. The shown *with_halo* is the \"true\" halo surrounding the physical domain (often referred to as the \"outer halo\"), **not** the halo used for the MPI halo exchanges (often referred to as \"ghost region\" or \"inner halo\"). A user can straightforwardly initialize the \"true\" halo region (which is typically read by a stencil `Eq` when an `Operator` iterates in proximity of the domain boundary).\n", "\n", "Note: This \"halo\" is often encountered as \"ghost cell area\" in literature" ] diff --git a/examples/performance/00_overview.ipynb b/examples/performance/00_overview.ipynb index c54a38c840..6e80e74d56 100644 --- a/examples/performance/00_overview.ipynb +++ b/examples/performance/00_overview.ipynb @@ -254,7 +254,7 @@ "A few optimization options are available for this pass (but not on all platforms, see [here](https://github.com/devitocodes/devito/blob/main/examples/performance/README.md)), though in our experience the default values do a fine job:\n", "\n", "* `par-collapse-ncores`: use a collapse clause only if the number of available physical cores is greater than this value (default=4).\n", - "* `par-collapse-work`: use a collapse clause only if the trip count of the collapsable loops is statically known to exceed this value (default=100).\n", + "* `par-collapse-work`: use a collapse clause only if the trip count of the collapsible loops is statically known to exceed this value (default=100).\n", "* `par-chunk-nonaffine`: a coefficient to adjust the chunk size in non-affine parallel loops. The larger the coefficient, the smaller the chunk size (default=3).\n", "* `par-dynamic-work`: use dynamic scheduling if the operation count per iteration exceeds this value. Otherwise, use static scheduling (default=10).\n", "* `par-nested`: use nested parallelism if the number of hyperthreads per core is greater than this value (default=2).\n", @@ -643,7 +643,7 @@ "```\n", "\n", "So the sub-expression `a[1] + a[2]` is computed twice, by two consecutive iterations.\n", - "What makes CIRE complicated is the generalization to arbitrary expressions, the presence of multiple dimensions, the scheduling strategy due to the trade-off between redundant compute and working set, and the co-existance with other optimizations (e.g., blocking, vectorization). All these aspects won't be treated here. What instead we will show is the effect of CIRE in our running example and the optimization options at our disposal to drive the detection and scheduling of the captured redundancies.\n", + "What makes CIRE complicated is the generalization to arbitrary expressions, the presence of multiple dimensions, the scheduling strategy due to the trade-off between redundant compute and working set, and the co-existence with other optimizations (e.g., blocking, vectorization). All these aspects won't be treated here. What instead we will show is the effect of CIRE in our running example and the optimization options at our disposal to drive the detection and scheduling of the captured redundancies.\n", "\n", "In our running example, some cross-iteration redundancies are induced by the nested first-order derivatives along `y`. As we see below, these redundancies are captured and assigned to the two-dimensional temporary `r0`.\n", "\n", diff --git a/examples/seismic/abc_methods/01_introduction.ipynb b/examples/seismic/abc_methods/01_introduction.ipynb index 1d3b94f8e4..d041564b41 100644 --- a/examples/seismic/abc_methods/01_introduction.ipynb +++ b/examples/seismic/abc_methods/01_introduction.ipynb @@ -159,9 +159,9 @@ "\n", " The source $f(x, z, t)$ will be a Ricker source with the following properties:\n", "\n", - "- Postion at $x:$ $\\bar{x} = 500 m = 0.5 Km$;\n", + "- Position at $x:$ $\\bar{x} = 500 m = 0.5 Km$;\n", "- Position at $z:$ $\\bar{z} = 10 m = 0.01 Km$;\n", - "- Peak frequence: $f_{0} = 10 Hz = 0.01 Khz$;\n", + "- Peak frequency: $f_{0} = 10 Hz = 0.01 Khz$;\n", "\n", "The graph of $f(\\bar{x}, \\bar{z}, t)$ will be generated when building the code. We employ a synthetic velocity model $c(x,z)$ with the following properties:\n", "\n", @@ -286,7 +286,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once the mesh parameters are built, we can define the structures that represent $\\Omega$ subdomains, that is, particular regions of $\\Omega$, named *subdomains*. In the present notebook, there is no need to split the domain in particular subregions, so we define a single *subdomain* that correponds to the full domain $\\Omega$. This *subdomain* is built with the following command:" + "Once the mesh parameters are built, we can define the structures that represent $\\Omega$ subdomains, that is, particular regions of $\\Omega$, named *subdomains*. In the present notebook, there is no need to split the domain in particular subregions, so we define a single *subdomain* that corresponds to the full domain $\\Omega$. This *subdomain* is built with the following command:" ] }, { @@ -533,7 +533,7 @@ "- *grid* is the grid where the font will be placed;\n", "- *f0* is the frequency of the source;\n", "- *npoint* is the number of fonts that will be placed;\n", - "- *time_range* is the structure that contains the time informations that we are using in our simulations;\n", + "- *time_range* is the structure that contains the time information that we are using in our simulations;\n", "- *staggered* is the type of positioning of the points;\n", "- *src.coordinates.data[:, 0]* is the positioning of the source in the $x$ direction;\n", "- *src.coordinates.data[:, 1]* is the positioning of the source in the $z$ direction;\n", @@ -627,7 +627,7 @@ "- *name* is the symbolic name of the Receiver;\n", "- *grid* is the grid where the receivers will be placed;\n", "- *npoint* is the number of receivers that will be placed;\n", - "- *time_range* is the structure that contains the time informations that we are using in our simulations;\n", + "- *time_range* is the structure that contains the time information that we are using in our simulations;\n", "- *staggered* is the type of positioning of the points;\n", "- *rec.coordinates.data [:, 0]* is the positioning of receivers in the direction $x$;\n", "- *rec.coordinates.data [:, 1]* is the positioning of receivers in the $z$ direction;\n", @@ -757,7 +757,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We now create the acoustic equation without the external force term. As defined previusly, *u* represents the displacement field and *vel0* the field that carries the velocity information. \n", + "We now create the acoustic equation without the external force term. As defined previously, *u* represents the displacement field and *vel0* the field that carries the velocity information. \n", "\n", "- The structure that creates Equations in Devito is *Eq(eq)*, where *eq* is the equation we want to assign;\n", "- .dt2 calculates the second temporal derivative of the field it is applied to;\n", @@ -1011,7 +1011,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Realize that the solution has a large amount of noise, which is generated by the reflections at the boudaries. The main objective of this series of notebooks is to present several numerical schemes designed to reduce the wave reflections on the computational boundaries of the domain during simulation.\n", + "Realize that the solution has a large amount of noise, which is generated by the reflections at the boundaries. The main objective of this series of notebooks is to present several numerical schemes designed to reduce the wave reflections on the computational boundaries of the domain during simulation.\n", "\n", "We now create a routine to plot the shot records of the Receivers" ] diff --git a/examples/seismic/abc_methods/02_damping.ipynb b/examples/seismic/abc_methods/02_damping.ipynb index 60114f8991..6111471cf8 100644 --- a/examples/seismic/abc_methods/02_damping.ipynb +++ b/examples/seismic/abc_methods/02_damping.ipynb @@ -235,7 +235,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "And we define $L_{x}$ and $L_{z}$ as beeing:" + "And we define $L_{x}$ and $L_{z}$ as being:" ] }, { @@ -325,7 +325,7 @@ "The blue region will be the union of the following regions:\n", "\n", "- *d1* represents the left range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{0,npmlx\\}$ and $z\\in\\{0,nptz\\}$;\n", - "- *d2* represents the rigth range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{nptx-npmlx,nptx\\}$ and $z\\in\\{0,nptz\\}$;\n", + "- *d2* represents the right range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{nptx-npmlx,nptx\\}$ and $z\\in\\{0,nptz\\}$;\n", "- *d3* represents the left range in the direction *y*, where the pairs $(x,z)$ satisfy: $x\\in\\{npmlx,nptx-npmlx\\}$ and $z\\in\\{nptz-npmlz,nptz\\}$;\n", "\n", "Thus, the regions *d1*, *d2* and *d3* are described as follows in the language of *subdomains*:" @@ -518,7 +518,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With the temporal parameters, we generate the time informations with *TimeAxis* as follows:" + "With the temporal parameters, we generate the time information with *TimeAxis* as follows:" ] }, { @@ -895,7 +895,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The expressions for the acoustic equation with damping can be separeted between the white and blue regions.\n", + "The expressions for the acoustic equation with damping can be separated between the white and blue regions.\n", "\n", "Translating these expressions in terms of an *eq* that can be inserted in a Devito code, we have that in the white region the equation takes the form:\n", "\n", diff --git a/examples/seismic/abc_methods/04_habc.ipynb b/examples/seismic/abc_methods/04_habc.ipynb index cf338f8fba..0e452d6914 100644 --- a/examples/seismic/abc_methods/04_habc.ipynb +++ b/examples/seismic/abc_methods/04_habc.ipynb @@ -81,7 +81,7 @@ " Observation: There are similarities between Clayton's A2 and the Higdon condition. If one chooses $p=2$ and\n", " both angles equal to zero in Higdon's method, this leads to the condition:\n", " $ u_{tt}-2cu_{xt}+c^2u_{xx}=0$. But, using the wave equation, we have that $c^2u_{xx}=u_{tt}-c^2u_{zz}$. Replacing this relation in the previous equation, we get: $2u_{tt}-2cu_{xt}-c^2u_{zz}=0$ which is Clayton's A2\n", - " boundary condition. In this sence, Higdon's method would generalize Clayton's scheme. But the discretization of\n", + " boundary condition. In this since, Higdon's method would generalize Clayton's scheme. But the discretization of\n", " both methods are quite different, since in Higdon's scheme the boundary operators are unidirectional, while\n", " in Clayton's A2 not." ] @@ -375,10 +375,10 @@ "The blue region will be built with 3 divisions:\n", "\n", "- *d1* represents the left range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{0,npmlx\\}$ and $z\\in\\{0,nptz\\}$;\n", - "- *d2* represents the rigth range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{nptx-npmlx,nptx\\}$ and $z\\in\\{0,nptz\\}$;\n", + "- *d2* represents the right range in the direction *x*, where the pairs $(x,z)$ satisfy: $x\\in\\{nptx-npmlx,nptx\\}$ and $z\\in\\{0,nptz\\}$;\n", "- *d3* represents the left range in the direction *y*, where the pairs $(x,z)$ satisfy: $x\\in\\{npmlx,nptx-npmlx\\}$ and $z\\in\\{nptz-npmlz,nptz\\}$;\n", "\n", - "Thus, the regions *d1*, *d2* and *d3* aare described as follows in the language of *subdomains*:" + "Thus, the regions *d1*, *d2* and *d3* are described as follows in the language of *subdomains*:" ] }, { @@ -1065,7 +1065,7 @@ "source": [ "For the blue region we will divide it into $npmlx$ layers in the $x$ direction and $npmlz$ layers in the $z$ direction. In this case, the representation is a little more complex than shown in the figures that exemplify the regions $A_{k}$ because there are intersections between the layers.\n", "\n", - "**Observation:** Note that the representation of the $A_{k}$ layers that we present in our text reflects the case where $npmlx=npmlz$. However, our code includes the case illustrated in the figure, as well as situations in which $npmlx\\neq npmlz$. The discretizations of the bounadry conditions A1, A2 and Higdon follow in the bibliographic references at the end. They will not be detailled here, but can be seen in the codes below. \n", + "**Observation:** Note that the representation of the $A_{k}$ layers that we present in our text reflects the case where $npmlx=npmlz$. However, our code includes the case illustrated in the figure, as well as situations in which $npmlx\\neq npmlz$. The discretizations of the boundary conditions A1, A2 and Higdon follow in the bibliographic references at the end. They will not be detailed here, but can be seen in the codes below. \n", "\n", "In the sequence of codes below we build the *pdes* that represent the *eqs* of the regions $B_{1}$, $B_{2}$ and $B_{3}$ and/or in the corners (red points in the case of *A2*) as represented in the following figure:\n", "\n", @@ -1148,7 +1148,7 @@ " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])\n", "\n", - " # Red point rigth side\n", + " # Red point right side\n", " stencil4 = [Eq(u[t+1,nptx-1-k,nptz-1-k],(1-weightsz[nptx-1-k,nptz-1-k])*u3[nptx-1-k,nptz-1-k] +\n", " weightsz[nptx-1-k,nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-1-k,nptz-2-k]\n", " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-1-k]\n", @@ -1302,7 +1302,7 @@ "- 3. Updating solutions over time: *[stencil01,stencil02];*\n", "- 4. The acoustic wave equation in the *d1*, *d2* e *d3* regions: *[stencil1,stencil2,stencil3];*\n", "- 5. The equation for red points for A2 method: *[stencil5,stencil4];*\n", - "- 6. Boundry Conditions: *bc;*\n", + "- 6. Boundary Conditions: *bc;*\n", "- 7. Receivers: *rec_term;*\n", "\n", "We then define two types of *op*:\n", @@ -1551,7 +1551,7 @@ "# 4.7 - Conclusions\n", "\n", "We have presented the HABC method for the acoustic wave equation, which can be used with any of the \n", - "absorbing boundary conditions A1, A2 or Higdon. The notebook also include the possibility of using these boundary conditions alone, without being combined with the HABC. The user has the possibilty of testing several combinations of parameters and observe the effects in the absorption of spurious reflections on computational boundaries.\n", + "absorbing boundary conditions A1, A2 or Higdon. The notebook also include the possibility of using these boundary conditions alone, without being combined with the HABC. The user has the possibility of testing several combinations of parameters and observe the effects in the absorption of spurious reflections on computational boundaries.\n", "\n", " The relevant references for the boundary conditions are furnished next." ] diff --git a/examples/seismic/abc_methods/README.md b/examples/seismic/abc_methods/README.md index 9e633c4e32..c156d459e0 100644 --- a/examples/seismic/abc_methods/README.md +++ b/examples/seismic/abc_methods/README.md @@ -4,7 +4,7 @@ Institute of Mathematics and Statistics - Applied Mathematics Department (felipe.augusto.guedes@gmail.com, saulo@ime.usp.br, pedrosp@ime.usp.br) -**Important Informations:** These notebooks are part of the Project Software Technologies for Modeling and Inversion (STMI) at RCGI in the University of Sao Paulo. +**Important Information:** These notebooks are part of the Project Software Technologies for Modeling and Inversion (STMI) at RCGI in the University of Sao Paulo. The objective of these notebooks is to present several schemes which are designed to reduce artificial reflections on boundaries in the numerical solution of the acoustic wave equation with finite differences. We consider several methods, covering absorbing boundary conditions and absorbing boundary layers. Among the schemes, we have implemented: @@ -14,7 +14,7 @@ The objective of these notebooks is to present several schemes which are designe - Perfectly Matched Layer (PML); - Hybrid Absorbing Boundary Conditions (HABC); -The computational implementation of the methods above is done within the framework of Devito, which is aimed to produce highly optimized code for finite differences discretizations, generated from high level symbolic problem definitions. Devito presents a work structure in Python and generates code in C ++, which can be taylored for high performance on different computational platforms. The notebooks are organized as follows: +The computational implementation of the methods above is done within the framework of Devito, which is aimed to produce highly optimized code for finite differences discretizations, generated from high level symbolic problem definitions. Devito presents a work structure in Python and generates code in C ++, which can be tailored for high performance on different computational platforms. The notebooks are organized as follows: - 1. Introduction and description of the acoustic problem; - 2. Implementation of Sochaki's damping; diff --git a/examples/seismic/acoustic/accuracy.ipynb b/examples/seismic/acoustic/accuracy.ipynb index b9e4aa6533..7c9bccc058 100644 --- a/examples/seismic/acoustic/accuracy.ipynb +++ b/examples/seismic/acoustic/accuracy.ipynb @@ -185,7 +185,7 @@ "\n", "where $H_0^{(2)}$ is the Hankel function of the second kind, $F(\\omega)$ is the Fourier spectrum of the source time function at angular frequencies $\\omega$ and $k = \\frac{\\omega}{v}$ is the wavenumber.\n", "\n", - "We look at the analytical and numerical solution at a single grid point. We ensure that this grid point is on-the-grid for all discretizations analyised in the further verification." + "We look at the analytical and numerical solution at a single grid point. We ensure that this grid point is on-the-grid for all discretizations analysed in the further verification." ] }, { diff --git a/examples/seismic/model.py b/examples/seismic/model.py index 579d7a3827..c927b88abb 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -225,7 +225,7 @@ def spacing_map(self): @property def dtype(self): """ - Data type for all assocaited data objects. + Data type for all associated data objects. """ return self.grid.dtype @@ -290,10 +290,10 @@ def __init__(self, origin, spacing, shape, space_order, vp, nbl=20, fs=False, # User provided dt self._dt = kwargs.get('dt') - # Some wave equation need a rescaled dt that can't be infered from the model + # Some wave equation need a rescaled dt that can't be inferred from the model # parameters, such as isoacoustic OT4 that can use a dt sqrt(3) larger than # isoacoustic OT2. This property should be set from a wavesolver or after model - # instanciation only via model.dt_scale = value. + # instantiation only via model.dt_scale = value. self._dt_scale = 1 def _initialize_physics(self, vp, space_order, **kwargs): @@ -373,7 +373,7 @@ def critical_dt(self): """ # For a fixed time order this number decreases as the space order increases. # - # The CFL condtion is then given by + # The CFL condition is then given by # dt <= coeff * h / (max(velocity)) dt = self._cfl_coeff * np.min(self.spacing) / (self._thomsen_scale*self._max_vp) dt = self.dtype("%.3e" % (self.dt_scale * dt)) @@ -388,7 +388,7 @@ def update(self, name, value): try: param = getattr(self, name) except AttributeError: - # No physical parameter with tha name, create it + # No physical parameter with that name, create it setattr(self, name, self._gen_phys_param(value, name, self.space_order)) return # Update the physical parameter according to new value diff --git a/examples/seismic/self_adjoint/README.md b/examples/seismic/self_adjoint/README.md index 16f923097b..f5918a166a 100644 --- a/examples/seismic/self_adjoint/README.md +++ b/examples/seismic/self_adjoint/README.md @@ -2,7 +2,7 @@ ## These operators are contributed by Chevron Energy Technology Company (2020) -These operators are based on simplfications of the systems presented in: +These operators are based on simplifications of the systems presented in:
**Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse migration and full-waveform inversion** (2016)
Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth
SEG Technical Program Expanded Abstracts diff --git a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb index 7cd6905201..01f9d32716 100644 --- a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb +++ b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb @@ -8,7 +8,7 @@ "\n", "## This operator is contributed by Chevron Energy Technology Company (2020)\n", "\n", - "This operator is based on simplfications of the systems presented in:\n", + "This operator is based on simplifications of the systems presented in:\n", "
**Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse time migration and full-waveform inversion** (2016)\n", "
Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth\n", "
SEG Technical Program Expanded Abstracts\n", @@ -615,7 +615,7 @@ " Determine the temporal sampling to satisfy CFL stability.\n", " This method replicates the functionality in the Model class.\n", " Note we add a safety factor, reducing dt by a factor 0.75 due to the\n", - " w/Q attentuation term.\n", + " w/Q attenuation term.\n", " Parameters\n", " ----------\n", " v : Function\n", @@ -1336,7 +1336,7 @@ "\n", "By setting Devito logging ```configuration['log-level'] = 'DEBUG'``` we have enabled output of statistics related to the performance of the operator, which you will see below when the operator runs.\n", "\n", - "We will run the Operator once with the Q model as defined ```wOverQ_025```, and then run a second time passing the ```wOverQ_100``` Q model. For the second run with the different Q model, we take advantage of the ```placeholder design patten``` in the Devito ```Operator```. \n", + "We will run the Operator once with the Q model as defined ```wOverQ_025```, and then run a second time passing the ```wOverQ_100``` Q model. For the second run with the different Q model, we take advantage of the ```placeholder design pattern``` in the Devito ```Operator```. \n", "\n", "For more information on this see the [FAQ](https://github.com/devitocodes/devito/wiki/FAQ#how-are-abstractions-used-in-the-seismic-examples) entry." ] diff --git a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb index e70aca879b..38e0b5b472 100644 --- a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb +++ b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb @@ -8,7 +8,7 @@ "\n", "## This operator is contributed by Chevron Energy Technology Company (2020)\n", "\n", - "This operator is based on simplfications of the systems presented in:\n", + "This operator is based on simplifications of the systems presented in:\n", "
**Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse time migration and full-waveform inversion** (2016)\n", "
Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth\n", "
SEG Technical Program Expanded Abstracts\n", @@ -42,9 +42,9 @@ "## Outline \n", "1. Define symbols \n", "2. The nonlinear operator \n", - "3. The Jacobian opeator \n", + "3. The Jacobian operator \n", "4. Create the Devito grid and model fields \n", - "5. The simulation time range and acquistion geometry \n", + "5. The simulation time range and acquisition geometry \n", "6. Implement and run the nonlinear forward operator \n", "7. Implement and run the Jacobian forward operator \n", "8. Implement and run the Jacobian adjoint operator \n", @@ -452,7 +452,7 @@ " Determine the temporal sampling to satisfy CFL stability.\n", " This method replicates the functionality in the Model class.\n", " Note we add a safety factor, reducing dt by a factor 0.75 due to the\n", - " w/Q attentuation term.\n", + " w/Q attenuation term.\n", " Parameters\n", " ----------\n", " v : Function\n", @@ -928,11 +928,11 @@ "14 name='ISO_JacobianFwdOperator', **kwargs)\n", "```\n", "\n", - "One important thing to note about this code is the precedence of operations specified on the construction of the operator at line 13. It is guaranteed by Devito that ```eqn1``` will 'run' before ```eqn2```. This means that this specific order will occurr in the generated code: \n", + "One important thing to note about this code is the precedence of operations specified on the construction of the operator at line 13. It is guaranteed by Devito that ```eqn1``` will 'run' before ```eqn2```. This means that this specific order will occur in the generated code: \n", "1. The nonlinear wavefield is advanced in time\n", "2. The nonlinear source is injected in the nonlinear wavefield\n", "3. The linearized wavefield is advanced in time\n", - "4. The linearixzed wavefield is interpolated at the receiever locations\n", + "4. The linearixzed wavefield is interpolated at the receiver locations\n", "\n", "As an exercise, you might implement this operator and print the generated c code to confirm this. " ] @@ -986,7 +986,7 @@ "dmAdj = Function(name='dmAdj', grid=grid, space_order=space_order)\n", "\n", "# The linearized adjoint time update equation\n", - "# Note the small differencess from the linearized forward above\n", + "# Note the small differences from the linearized forward above\n", "eq_time_update_ln_adj = (t.spacing**2 * m0**2 / b) * \\\n", " ((b * duAdj.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * duAdj.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) +\\\n", diff --git a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb index 3756ac364e..62c4fdde57 100644 --- a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb +++ b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb @@ -8,7 +8,7 @@ "\n", "## This operator is contributed by Chevron Energy Technology Company (2020)\n", "\n", - "This operator is based on simplfications of the systems presented in:\n", + "This operator is based on simplifications of the systems presented in:\n", "
**Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse time migration and full-waveform inversion** (2016)\n", "
Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth\n", "
SEG Technical Program Expanded Abstracts\n", @@ -356,7 +356,7 @@ "model = Model(origin=origin, shape=shape, vp=v0, b=b0, spacing=spacing, nbl=npad,\n", " space_order=space_order, bcs=init_damp, dtype=dtype, dt=dt)\n", "\n", - "# Source and reciver coordinates\n", + "# Source and receiver coordinates\n", "src_coords = np.empty((1, 2), dtype=dtype)\n", "rec_coords = np.empty((1, 2), dtype=dtype)\n", "src_coords[:, :] = np.array(model.domain_size) * .5\n", @@ -556,7 +556,7 @@ "rec1.data[:] *= a\n", "\n", "# Check receiver wavefeild linearity\n", - "# Normalize by rms of rec2, to enable using abolute tolerance below\n", + "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", "print(\"\\nlinearity forward F %s (so=%d) rms 1,2,diff; \"\n", @@ -604,7 +604,7 @@ "src1.data[:] *= a\n", "\n", "# Check adjoint source wavefeild linearity\n", - "# Normalize by rms of rec2, to enable using abolute tolerance below\n", + "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(src2.data**2))\n", "diff = (src1.data - src2.data) / rms2\n", "print(\"\\nlinearity adjoint F %s (so=%d) rms 1,2,diff; \"\n", @@ -885,7 +885,7 @@ "m1.data[:] = a * m1.data[:]\n", "rec2, _, _, _ = solver.jacobian(m1, src0, vp=m0)\n", "\n", - "# Normalize by rms of rec2, to enable using abolute tolerance below\n", + "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", "print(\"\\nlinearity forward J %s (so=%d) rms 1,2,diff; \"\n", @@ -943,7 +943,7 @@ "rec0.data[:] = a * rec0.data[:]\n", "dm2, _, _, _ = solver.jacobian_adjoint(rec0, u0, vp=m0)\n", "\n", - "# Normalize by rms of rec2, to enable using abolute tolerance below\n", + "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(dm2.data**2))\n", "diff = (dm1.data - dm2.data) / rms2\n", "print(\"\\nlinearity adjoint J %s (so=%d) rms 1,2,diff; \"\n", diff --git a/examples/seismic/self_adjoint/test_wavesolver_iso.py b/examples/seismic/self_adjoint/test_wavesolver_iso.py index 368a9f4b8d..7d55122608 100644 --- a/examples/seismic/self_adjoint/test_wavesolver_iso.py +++ b/examples/seismic/self_adjoint/test_wavesolver_iso.py @@ -37,7 +37,7 @@ def test_linearity_forward_F(self, shape, dtype, so): rec1.data[:] *= a # Check receiver wavefeild linearity - # Normalize by rms of rec2, to enable using abolute tolerance below + # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 info("linearity forward F %s (so=%d) rms 1,2,diff; " @@ -66,7 +66,7 @@ def test_linearity_adjoint_F(self, shape, dtype, so): src1.data[:] *= a # Check adjoint source wavefeild linearity - # Normalize by rms of rec2, to enable using abolute tolerance below + # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(src2.data**2)) diff = (src1.data - src2.data) / rms2 info("linearity adjoint F %s (so=%d) rms 1,2,diff; " @@ -210,7 +210,7 @@ def test_linearity_forward_J(self, shape, dtype, so): m1.data[:] = a * m1.data[:] rec2, _, _, _ = solver.jacobian(m1, src0, vp=m0) - # Normalize by rms of rec2, to enable using abolute tolerance below + # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 info("linearity forward J %s (so=%d) rms 1,2,diff; " @@ -259,7 +259,7 @@ def test_linearity_adjoint_J(self, shape, dtype, so): rec0.data[:] = a * rec0.data[:] dm2, _, _, _ = solver.jacobian_adjoint(rec0, u0, vp=m0) - # Normalize by rms of rec2, to enable using abolute tolerance below + # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(dm2.data**2)) diff = (dm1.data - dm2.data) / rms2 info("linearity adjoint J %s (so=%d) rms 1,2,diff; " @@ -366,7 +366,7 @@ def test_derivative_symmetry(self, dtype, so): @pytest.mark.parametrize('so', space_orders) def test_analytic_comparison_2d(self, dtype, so): """ - Wnsure that the farfield response from the propagator matches analytic reponse + Wnsure that the farfield response from the propagator matches analytic response in a wholespace. """ # Setup time / frequency @@ -393,7 +393,7 @@ def test_analytic_comparison_2d(self, dtype, so): model = Model(origin=o, shape=shape, vp=v0, b=1.0, spacing=spacing, nbl=npad, space_order=space_order, bcs=init_damp) - # Source and reciver coordinates + # Source and receiver coordinates src_coords = np.empty((1, 2), dtype=dtype) rec_coords = np.empty((1, 2), dtype=dtype) src_coords[0, :] = np.array(model.domain_size) * .5 diff --git a/examples/seismic/tutorials/02_rtm.ipynb b/examples/seismic/tutorials/02_rtm.ipynb index 350d98bab0..7ed3736463 100644 --- a/examples/seismic/tutorials/02_rtm.ipynb +++ b/examples/seismic/tutorials/02_rtm.ipynb @@ -88,7 +88,7 @@ " grid=grid, nbl=20)\n", " filter_sigma = (6, 6)\n", " nshots = 301 # Need good covergae in shots, one every two grid points\n", - " nreceivers = 601 # One recevier every grid point\n", + " nreceivers = 601 # One receiver every grid point\n", " t0 = 0.\n", " tn = 3500. # Simulation last 3.5 second (3500 ms)\n", " f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)" diff --git a/examples/seismic/tutorials/03_fwi.ipynb b/examples/seismic/tutorials/03_fwi.ipynb index 83bf629448..c309542c12 100644 --- a/examples/seismic/tutorials/03_fwi.ipynb +++ b/examples/seismic/tutorials/03_fwi.ipynb @@ -44,7 +44,7 @@ "source": [ "## Computational considerations\n", "\n", - "As we will see, FWI is computationally extremely demanding, even more than RTM. To keep this tutorial as lightwight as possible we therefore again use a very small demonstration model. We also define here a few parameters for the final example runs that can be changed to modify the overall runtime of the tutorial." + "As we will see, FWI is computationally extremely demanding, even more than RTM. To keep this tutorial as lightweight as possible we therefore again use a very small demonstration model. We also define here a few parameters for the final example runs that can be changed to modify the overall runtime of the tutorial." ] }, { diff --git a/examples/seismic/tutorials/04_dask.ipynb b/examples/seismic/tutorials/04_dask.ipynb index 9084f2a3de..5677323a22 100644 --- a/examples/seismic/tutorials/04_dask.ipynb +++ b/examples/seismic/tutorials/04_dask.ipynb @@ -20,7 +20,7 @@ "source": [ "In this tutorial, we will build on the [previous](https://github.com/devitocodes/devito/blob/main/examples/seismic/tutorials/03_fwi.ipynb) FWI tutorial and implement parallel versions of both forward modeling and FWI objective functions. Furthermore, we will show how our parallel FWI function can be passed to black-box third party optimization libraries, such as SciPy's [optimize](https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html) package, to access sophisticated optimization algorithms without having to implement them from scratch!\n", "\n", - "To implement parallel versions of forward modeling and FWI, we will use [Dask](https://dask.pydata.org/en/latest/#dask), a Python library for distributed computing based on parallel data structures and task-based programming. As computing multiple seismic shot records or gradients for subsequent source locations is an embarassingly parallel process, we will use Dask to dynamically distribute our workload to a pool of available workers and afterwards collect the results.\n", + "To implement parallel versions of forward modeling and FWI, we will use [Dask](https://dask.pydata.org/en/latest/#dask), a Python library for distributed computing based on parallel data structures and task-based programming. As computing multiple seismic shot records or gradients for subsequent source locations is an embarrassingly parallel process, we will use Dask to dynamically distribute our workload to a pool of available workers and afterwards collect the results.\n", "\n", "The first part of this tutorial closely follows [tutorial 3](https://github.com/devitocodes/devito/blob/main/examples/seismic/tutorials/03_fwi.ipynb) and consists of reading the velocity model and setting up the acquisition geometry. Subsequently, we will implement serial versions of forward modeling and FWI objective functions and then show how we can use Dask to implement parallel versions of these functions. Finally, we will show how to write a wrapper that lets us pass our objective function to scipy's optimize package and how to run a small 2D FWI example using a limited-memory Quasi-Newton method." ] @@ -63,7 +63,7 @@ "source": [ "As before, we start by reading the true (i.e. unknown) velocity model, as well as the starting model for FWI. For our example, we once again use the 2D Camembert model with a transmission acquisition set up, which involves having sources on one side of the model and receivers on the other side.\n", "\n", - "In reality, we obvisouly cannot know what the true velocity is, but here we use the true model to generate our own data (inverse crime alert!) and to compare it to our FWI result." + "In reality, we obviously cannot know what the true velocity is, but here we use the true model to generate our own data (inverse crime alert!) and to compare it to our FWI result." ] }, { @@ -131,7 +131,7 @@ "from examples.seismic import AcquisitionGeometry\n", "import numpy as np\n", "\n", - "# Set up acquisiton geometry\n", + "# Set up acquisition geometry\n", "t0 = 0.\n", "tn = 1000.\n", "f0 = 0.010\n", @@ -556,7 +556,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The final preparation step before we can run our example, is the definition of box constraints for the velocity. At each iteration, the optimizer will project the current model iterate onto a feasible set of velocites as defined by the lower and upper bounds `vmin` and `vmax`. Box contraints allow us to prevent velocities from taking negative values or values that are too small or large for the stability criteria of our modeling stepping scheme. We define the box constraints for the velocity in $km/s$ and then convert them to squared slownesses. Furthermore, we define our initial guess `m0`:" + "The final preparation step before we can run our example, is the definition of box constraints for the velocity. At each iteration, the optimizer will project the current model iterate onto a feasible set of velocites as defined by the lower and upper bounds `vmin` and `vmax`. Box constraints allow us to prevent velocities from taking negative values or values that are too small or large for the stability criteria of our modeling stepping scheme. We define the box constraints for the velocity in $km/s$ and then convert them to squared slownesses. Furthermore, we define our initial guess `m0`:" ] }, { @@ -565,7 +565,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Box contraints\n", + "# Box constraints\n", "vmin = 1.4 # do not allow velocities slower than water\n", "vmax = 4.0\n", "bounds = [(1.0/vmax**2, 1.0/vmin**2) for _ in range(np.prod(model0.shape))] # in [s^2/km^2]\n", diff --git a/examples/seismic/tutorials/04_dask_pickling.ipynb b/examples/seismic/tutorials/04_dask_pickling.ipynb index d1de2fd35a..1fe51dd70e 100644 --- a/examples/seismic/tutorials/04_dask_pickling.ipynb +++ b/examples/seismic/tutorials/04_dask_pickling.ipynb @@ -380,7 +380,7 @@ " # Get src_position and data\n", " src_positions, rec = load_shot_data(param['shot_id'], dt)\n", "\n", - " # Set up solver -- load the solver used above in the generation of the syntethic data.\n", + " # Set up solver -- load the solver used above in the generation of the synthetic data.\n", " with open(\"arguments.pkl\", \"rb\") as cp_file:\n", " cp = pickle.load(cp_file)\n", " solver = cp['solver']\n", @@ -419,7 +419,7 @@ "metadata": {}, "source": [ "Define the global functional-gradient operator. This does the following:\n", - "* Maps the worklist (shots) to the workers so that the invidual contributions to (f, g) are computed.\n", + "* Maps the worklist (shots) to the workers so that the individual contributions to (f, g) are computed.\n", "* Sum individual contributions to (f, g) and returns the result." ] }, diff --git a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb index 4358debc87..1663635e8a 100644 --- a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb +++ b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb @@ -31,7 +31,7 @@ "from sympy import init_printing\n", "init_printing(use_latex='mathjax')\n", "\n", - "# Some ploting setup\n", + "# Some plotting setup\n", "plt.rc('font', family='serif')\n", "plt.rc('xtick', labelsize=20)\n", "plt.rc('ytick', labelsize=20)" @@ -157,14 +157,14 @@ "source": [ "# Vectorial form\n", "\n", - "While conventional litterature writes the elastic wave-equation as a set of scalar PDEs, the higher level representation comes from Hooke's law and the equation of motion and writes as:\n", + "While conventional literature writes the elastic wave-equation as a set of scalar PDEs, the higher level representation comes from Hooke's law and the equation of motion and writes as:\n", "\n", "\\begin{cases}\n", "&\\frac{dv}{dt} = \\nabla . \\tau \\\\\n", "&\\frac{d \\tau}{dt} = \\lambda tr(\\nabla v) \\mathbf{I} + \\mu (\\nabla v + (\\nabla v)^T)\n", "\\end{cases}\n", "\n", - "and as $tr(\\nabla v)$ is the divergence of $v$ we can reqrite it as\n", + "and as $tr(\\nabla v)$ is the divergence of $v$ we can rewrite it as\n", "\n", "\\begin{cases}\n", "&\\frac{dv}{dt} = \\nabla . \\tau \\\\\n", diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index 9a16f497d6..70b5e5c51b 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -123,7 +123,7 @@ "t = np.linspace(0, T, Nsamples + 1)\n", "rick = ricker(t, f=f)\n", "rick_fft = sp.fftpack.fft(rick)\n", - "abcissa = sp.fftpack.fftfreq(Nsamples, T/Nsamples)\n", + "abscissa = sp.fftpack.fftfreq(Nsamples, T/Nsamples)\n", "\n", "fig, ax = plt.subplots(1, 2)\n", "ax[0].plot(t, rick)\n", @@ -132,7 +132,7 @@ "ax[0].set_xlabel('Time (t)')\n", "ax[0].set_ylabel('Amplitude')\n", "\n", - "ax[1].plot(abcissa[:Nsamples//2], np.abs(rick_fft[:Nsamples//2]))\n", + "ax[1].plot(abscissa[:Nsamples//2], np.abs(rick_fft[:Nsamples//2]))\n", "ax[1].set_xlim(0, 10*f)\n", "ax[1].set_title('Fourier spectrum')\n", "ax[1].set_xlabel('Frequency (f = 1/t)')\n", @@ -1753,7 +1753,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The two DRP stencils produce curves that are indistinguishable in the left hand plot, the difference is only really noticable in the logarithmic difference, where we see larger error for DRP stencil 2 than DRP stencil 1, while still remaining small.\n", + "The two DRP stencils produce curves that are indistinguishable in the left hand plot, the difference is only really noticeable in the logarithmic difference, where we see larger error for DRP stencil 2 than DRP stencil 1, while still remaining small.\n", "\n", "We can also look at the isosurfaces of the velocity difference function $\\hat{\\delta}$, the expression that the objective function tried to minimise." ] diff --git a/examples/seismic/tutorials/07_DRP_schemes.ipynb b/examples/seismic/tutorials/07_DRP_schemes.ipynb index 7eae8a7019..e6bcf5fe10 100644 --- a/examples/seismic/tutorials/07_DRP_schemes.ipynb +++ b/examples/seismic/tutorials/07_DRP_schemes.ipynb @@ -67,9 +67,9 @@ "source": [ "By default the 'standard' Taylor series expansion result, where `h_x` represents the $x$-direction grid spacing, is returned. However, there may be instances when a user wishes to use 'non-standard' weights when, for example, implementing a dispersion-relation-preserving (DRP) scheme. See e.g. \n", "\n", - "[1] Christopher K.W. Tam, Jay C. Webb (1993). ”Dispersion-Relation-Preserving Finite Difference Schemes for Computational Acoustics.” **J. Comput. Phys.**, 107(2), 262--281. https://doi.org/10.1006/jcph.1993.1142\n", + "[1] Christopher K.W. Tam, Jay C. Webb (1993). ”Dispersion-Relation-Preserving Finite Difference Schemes for Computational Acoustics.” **J. Compute. Phys.**, 107(2), 262--281. https://doi.org/10.1006/jcph.1993.1142\n", "\n", - "for further details. The use of such modified weights is facilitated in Devito via the custom finite difference coefficents functionality. Lets form a Devito equation featuring a derivative with custom coefficients:" + "for further details. The use of such modified weights is facilitated in Devito via the custom finite difference coefficients functionality. Lets form a Devito equation featuring a derivative with custom coefficients:" ] }, { @@ -105,9 +105,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Example: Finite difference modeling for a large velocity-contrast acousitc wave model\n", + "## Example: Finite difference modeling for a large velocity-contrast acoustic wave model\n", "\n", - "It is advised to read through the 'Introduction to seismic modelling' notebook located in devito/examples/seismic/tutorials/01_modelling.ipynb before proceeding with this example since much introductory material will be ommited here. The example now considered is based on an example introduced in\n", + "It is advised to read through the 'Introduction to seismic modelling' notebook located in devito/examples/seismic/tutorials/01_modelling.ipynb before proceeding with this example since much introductory material will be omitted here. The example now considered is based on an example introduced in\n", "\n", "[2] Yang Liu (2013). ”Globally optimal finite-difference schemes based on least squares.” **GEOPHYSICS**, 78(4), 113--132. https://doi.org/10.1190/geo2012-0480.1.\n", "\n", @@ -509,7 +509,7 @@ "# Source term:\n", "src_term = src.inject(field=u_DRP.forward, expr=src * dt**2 / model.m)\n", "\n", - "# Create the operator, incoporating both upper and lower stencils:\n", + "# Create the operator, incorporating both upper and lower stencils:\n", "op = Operator([stencil_u, stencil_l] + src_term, subs=model.spacing_map)" ] }, diff --git a/examples/seismic/tutorials/09_viscoelastic.ipynb b/examples/seismic/tutorials/09_viscoelastic.ipynb index 01811adf75..76ab0fa2f7 100644 --- a/examples/seismic/tutorials/09_viscoelastic.ipynb +++ b/examples/seismic/tutorials/09_viscoelastic.ipynb @@ -112,7 +112,7 @@ "metadata": {}, "outputs": [], "source": [ - "# As pointed out in Thorbecke's implementation and documentation, the viscoelastic wave euqation is\n", + "# As pointed out in Thorbecke's implementation and documentation, the viscoelastic wave equation is\n", "# not always stable with the standard elastic CFL condition. We enforce a smaller critical dt here\n", "# to ensure the stability.\n", "model.dt_scale = .9" diff --git a/examples/seismic/tutorials/10_nmo_correction.ipynb b/examples/seismic/tutorials/10_nmo_correction.ipynb index 7abafd6aad..fb67563c40 100644 --- a/examples/seismic/tutorials/10_nmo_correction.ipynb +++ b/examples/seismic/tutorials/10_nmo_correction.ipynb @@ -34,7 +34,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Before the NMO corretion we will describe a setup of seismic modelling with Devito in a simple 2D case. We will create a physical model of our domain and define a multiple source and an according set of receivers to model for the forward model. But first, we initialize some basic utilities." + "Before the NMO correction we will describe a setup of seismic modelling with Devito in a simple 2D case. We will create a physical model of our domain and define a multiple source and an according set of receivers to model for the forward model. But first, we initialize some basic utilities." ] }, { @@ -349,7 +349,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The computed offset for each trace will be arraged in another SparseFunction with number of points equal to number of traces." + "The computed offset for each trace will be arranged in another SparseFunction with number of points equal to number of traces." ] }, { diff --git a/examples/seismic/tutorials/11_viscoacoustic.ipynb b/examples/seismic/tutorials/11_viscoacoustic.ipynb index b883f31677..0f6317dfe1 100644 --- a/examples/seismic/tutorials/11_viscoacoustic.ipynb +++ b/examples/seismic/tutorials/11_viscoacoustic.ipynb @@ -72,7 +72,7 @@ "| :--- | :--- |\n", "|$f$ |Frequency |\n", "|$f_o$ |Reference frequency |\n", - "|$\\omega$ |Angular frenquency |\n", + "|$\\omega$ |Angular frequency |\n", "|$\\omega_0$ |Angular Reference Frequency |\n", "|$v$ |Velocity model |\n", "|$v_0$ |Reference velocity at $\\omega_0$ |\n", diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index 5d3882cdc0..4079423a32 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -113,7 +113,7 @@ "1. Jacobian linearized forward: maps model perturbation to data perturbation, *forward in time*\n", "1. Jacobian linearized adjoint: maps data perturbation to model perturbation, *backward in time*\n", "\n", - "We will design a small 2D test experiment with a source in the middle of the model and short enough elapsed modeling time that we do not need to worry about boundary reflections for these tests, or runnin out of memory saving all time steps." + "We will design a small 2D test experiment with a source in the middle of the model and short enough elapsed modeling time that we do not need to worry about boundary reflections for these tests, or running out of memory saving all time steps." ] }, { @@ -126,7 +126,7 @@ "\n", "### Propagation uses ```TimeFunction(..., save=None)```\n", "\n", - "We use a default constructed ```TimeFunction``` for propagation. This can be specified in the constructor via either ```save=None``` or no ```save``` argument at all. Devito backs such a default ```TimeFunction``` by a ```Buffer``` of size ```time_order+1```, or 3 for second order in time. We show below the mapping from the monotonic *ordinary time indices* to the buffered *modulo time indices* as used by a ```Buffer``` in a ```TimeFuntion``` with ```time_order=2```.\n", + "We use a default constructed ```TimeFunction``` for propagation. This can be specified in the constructor via either ```save=None``` or no ```save``` argument at all. Devito backs such a default ```TimeFunction``` by a ```Buffer``` of size ```time_order+1```, or 3 for second order in time. We show below the mapping from the monotonic *ordinary time indices* to the buffered *modulo time indices* as used by a ```Buffer``` in a ```TimeFunction``` with ```time_order=2```.\n", "\n", "### Modulo indexing for ```Buffer``` of size 3\n", "```\n", @@ -134,7 +134,7 @@ "Modulo time indices: 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2 0\n", "```\n", "\n", - "*Important note:* the modulo indexing of ```Buffer``` is the reason we will separate propagation from serialization. If we use a larger ```Bufffer``` as the ```TimeFunction``` for propagation, we would have to deal with the modulo indexing not just for the current time index, but also previous and next time indices (assuming second order in time). This means that the previous and next time steps can overwrite the locations of the ordinary time indices when you propagate for a block of time steps. **This is the reason we do not use the same ```TimeFunction``` for both propagation and serialization.**\n", + "*Important note:* the modulo indexing of ```Buffer``` is the reason we will separate propagation from serialization. If we use a larger ```Buffer``` as the ```TimeFunction``` for propagation, we would have to deal with the modulo indexing not just for the current time index, but also previous and next time indices (assuming second order in time). This means that the previous and next time steps can overwrite the locations of the ordinary time indices when you propagate for a block of time steps. **This is the reason we do not use the same ```TimeFunction``` for both propagation and serialization.**\n", "\n", "### Generated code for a second order in time PDE\n", "We now show an excerpt from Devito generated code for a second order in time operator. A second order in time PDE requires two wavefields in order to advance in time: the wavefield at the next time step $u(t+\\Delta t)$ is a function of the wavefield at previous time step $u(t-\\Delta t)$ and the wavefield at the current time step $u(t)$. Remember that Devito uses a ```Buffer``` of size 3 to handle this. \n", @@ -211,7 +211,7 @@ "\n", "## Arrays used to save file offsets and compressed sizes\n", "\n", - "We use two arrays the length of the total number of time steps to save bookeeping information used for the serialization and compression. During de-serialization these offsets and lengths will be used to seek the correct location and read the correct length from the binary file saving the compressed data.\n", + "We use two arrays the length of the total number of time steps to save bookkeeping information used for the serialization and compression. During de-serialization these offsets and lengths will be used to seek the correct location and read the correct length from the binary file saving the compressed data.\n", "\n", "| Array | Description |\n", "|:---|:---|\n", @@ -625,7 +625,7 @@ "\n", "The stencils for the two operators you see below are exactly the same, the only significant difference is that we use two different ```TimeFunction```s. We could therefore reduce code duplication in two ways:\n", "\n", - "1. Use the placeholder design pattern and ```stencil.subs``` to substitude the appropriate ```TimeFunction```. \n", + "1. Use the placeholder design pattern and ```stencil.subs``` to substitute the appropriate ```TimeFunction```. \n", "Please see the FAQ for more information [https://github.com/devitocodes/devito/wiki/FAQ#how-are-abstractions-used-in-the-seismic-examples](https://github.com/devitocodes/devito/wiki/FAQ#how-are-abstractions-used-in-the-seismic-examples)\n", "\n", "2. Write a function and use it to build the stencils.\n", @@ -1287,7 +1287,7 @@ "# Open the binary file in read only mode\n", "f = open(filename, \"rb\")\n", "\n", - "# Temporay nd array for decompression\n", + "# Temporary nd array for decompression\n", "d = copy.copy(v2._data[0,:,:])\n", "\n", "# Array to hold compression ratio\n", @@ -1646,7 +1646,7 @@ "# Open the binary file in read only mode\n", "f = open(filename, \"rb\")\n", "\n", - "# Temporay nd array for decompression\n", + "# Temporary nd array for decompression\n", "d = copy.copy(v2._data[0,:,:])\n", "\n", "# Array to hold compression ratio\n", diff --git a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb index 26d3eb5cca..9e3f0ee186 100644 --- a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb +++ b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "source": [ "## Introduction \n", - "The goal of this tutorial is to implement and validate the Least-squares reverse time migration (LSRTM) using a 2D three-layered velocity model with a square in the middle. The algorithm has been implemented using the Born's appoximation.\n", + "The goal of this tutorial is to implement and validate the Least-squares reverse time migration (LSRTM) using a 2D three-layered velocity model with a square in the middle. The algorithm has been implemented using the Born's approximation.\n", "\n", "The acoustic wave equation for constant density is:\n", "\n", diff --git a/examples/seismic/tutorials/14_creating_synthetics.ipynb b/examples/seismic/tutorials/14_creating_synthetics.ipynb index bb27537db3..86dc692d1d 100644 --- a/examples/seismic/tutorials/14_creating_synthetics.ipynb +++ b/examples/seismic/tutorials/14_creating_synthetics.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "source": [ "## Overview and setup\n", - "The synthetics which we will be building in this tutorial will be made with the use of GemPy, an open-source 3D geological modelling package for python. As this is not a core dependency of Devito, we will need to install it. If issues are encountered whilst installing GemPy into a `conda` environment using `pip`, you can alternatively create a python `venv` and install Devito in this environment using `pip` as per usual. Note that it will also be necesary to install an `ipykernel` in this environment to run this notebook. From here, we can install GemPy:" + "The synthetics which we will be building in this tutorial will be made with the use of GemPy, an open-source 3D geological modelling package for python. As this is not a core dependency of Devito, we will need to install it. If issues are encountered whilst installing GemPy into a `conda` environment using `pip`, you can alternatively create a python `venv` and install Devito in this environment using `pip` as per usual. Note that it will also be necessary to install an `ipykernel` in this environment to run this notebook. From here, we can install GemPy:" ] }, { @@ -117,7 +117,7 @@ "![gempy_devito_grid_diagram.png](https://github.com/devitocodes/devito/examples/seismic/tutorials/gempy_devito_grid_diagram.png?raw=1)\n", "A comparison of the cell-centered vs node-centered conventions of GemPy and Devito respectively, along with the differences in how they measure extent. It is necessary to account for this to ensure that the two grids are co-located.\n", "\n", - "As we can see in the figure above, this is due to differences in the way in which grids are defined in each package and is necessary to ensure that the model is not stretched and distorted when transistioning between the two, and that they are correctly aligned." + "As we can see in the figure above, this is due to differences in the way in which grids are defined in each package and is necessary to ensure that the model is not stretched and distorted when transitioning between the two, and that they are correctly aligned." ] }, { @@ -276,7 +276,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will now set some points for the base of the sands and CO2. The lower shale is considered the basement, meaning that its base does not need to be defined and it will extend to the bottom of the model. Alongside these points, we wil need to define an orientation for the surface.\n", + "We will now set some points for the base of the sands and CO2. The lower shale is considered the basement, meaning that its base does not need to be defined and it will extend to the bottom of the model. Alongside these points, we will need to define an orientation for the surface.\n", "\n", "To minimise repetition, we will define a function to loop over a list of points and add each to the surface." ] @@ -1146,7 +1146,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we will set up the time axis for our model. Again, this is a convenience object, which we will use in setting up the source and recievers." + "Now we will set up the time axis for our model. Again, this is a convenience object, which we will use in setting up the source and receivers." ] }, { @@ -1209,7 +1209,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will also configure our recievers in a line along the x axis, centered in the y, also at a depth of 20m." + "We will also configure our receivers in a line along the x axis, centered in the y, also at a depth of 20m." ] }, { @@ -1308,7 +1308,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can set up our source and reciever terms to include in our `Operator`." + "Now we can set up our source and receiver terms to include in our `Operator`." ] }, { @@ -1408,7 +1408,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(rec.data, cmap='viridis', aspect='auto', vmax=0.01, vmin=-0.01)\n", - "plt.xlabel(\"Reciever number\")\n", + "plt.xlabel(\"Receiver number\")\n", "plt.ylabel(\"Time (ms)\")\n", "plt.colorbar()\n", "plt.show()" diff --git a/examples/seismic/tutorials/16_ader_fd.ipynb b/examples/seismic/tutorials/16_ader_fd.ipynb index 27443675a5..c98c83d15f 100644 --- a/examples/seismic/tutorials/16_ader_fd.ipynb +++ b/examples/seismic/tutorials/16_ader_fd.ipynb @@ -104,7 +104,7 @@ "\n", "rho.data[:] = c.data[:]\n", "\n", - "# Define bouyancy for shorthand\n", + "# Define buoyancy for shorthand\n", "b = 1/rho\n", "# Define celerity shorthands\n", "c2 = c**2\n", diff --git a/examples/seismic/viscoacoustic/operators.py b/examples/seismic/viscoacoustic/operators.py index de84aad76f..d4a0d0adb4 100755 --- a/examples/seismic/viscoacoustic/operators.py +++ b/examples/seismic/viscoacoustic/operators.py @@ -237,7 +237,7 @@ def kv_1st_order(model, geometry, p, **kwargs): return [u_v, u_p] else: # Particle velocity - # Becaue v is a Vector, `.T` applies a standard matrix transpose + # Because v is a Vector, `.T` applies a standard matrix transpose # so we need to do the derivative transpose by hand with `-*.dtl` pde_v = -v.dtl - grad(rho * p) u_v = Eq(v.backward, damp * solve(pde_v, v.backward)) @@ -346,7 +346,7 @@ def maxwell_1st_order(model, geometry, p, **kwargs): else: # Particle velocity - # Becaue v is a Vector, `.T` applies a standard matrix transpose + # Because v is a Vector, `.T` applies a standard matrix transpose # so we need to do the derivative transpose by hand with `-*.dtl` pde_v = -v.dtl - grad(rho * p) u_v = Eq(v.backward, damp * solve(pde_v, v.backward)) diff --git a/examples/timestepping/acoustic_superstep.py b/examples/timestepping/acoustic_superstep.py index c482de3563..2c87b41407 100644 --- a/examples/timestepping/acoustic_superstep.py +++ b/examples/timestepping/acoustic_superstep.py @@ -62,7 +62,7 @@ def acoustic_model(model, t0, t1, t2, critical_dt, source, step=1, snapshots=1): if step == 1: # Non-superstep case # In this case we need to create a new `TimeFunction` and copy - # the previous soluton into that new function. This is necessary + # the previous solution into that new function. This is necessary # when a rotating buffer is used in the `TimeFunction` and the # order of the timesteps is not necessarily the right order for # resuming the simulation. We also create a new stencil that diff --git a/examples/userapi/02_apply.ipynb b/examples/userapi/02_apply.ipynb index 1c3fac6ae8..9a6c809309 100644 --- a/examples/userapi/02_apply.ipynb +++ b/examples/userapi/02_apply.ipynb @@ -616,7 +616,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see the discrepency between naively summing the sizes of the `Function`s used to construct the `Operator` and the results of the estimate. This is due to the array temporary reduced by the compiler." + "We can see the discrepancy between naively summing the sizes of the `Function`s used to construct the `Operator` and the results of the estimate. This is due to the array temporary reduced by the compiler." ] }, { diff --git a/examples/userapi/03_subdomains.ipynb b/examples/userapi/03_subdomains.ipynb index 8c73671543..cf0697f238 100644 --- a/examples/userapi/03_subdomains.ipynb +++ b/examples/userapi/03_subdomains.ipynb @@ -310,7 +310,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First, note that in the above we have used the shortand `d = dimensions` so that `d` will be a tuple of N-dimensions and then `{d: ... for d in dimensions}` such that our mapper will be valid for N-dimensional grids. Next we note the inclusion of `('middle', 1, 1)`. For mappers of the form `('middle', N, M)`, the `SubDomain` spans a contiguous region of `dimension_size - (N + M)` points starting at (in terms of python indexing) `N` and finishing at ``dimension_size - M - 1``.\n", + "First, note that in the above we have used the shorthand `d = dimensions` so that `d` will be a tuple of N-dimensions and then `{d: ... for d in dimensions}` such that our mapper will be valid for N-dimensional grids. Next we note the inclusion of `('middle', 1, 1)`. For mappers of the form `('middle', N, M)`, the `SubDomain` spans a contiguous region of `dimension_size - (N + M)` points starting at (in terms of python indexing) `N` and finishing at ``dimension_size - M - 1``.\n", "\n", "The two other options available are `'left'` and `'right'`. For a statement of the form `d: ('left', N)` the `SubDomain` spans a contiguous region of `N` points starting at `d`\\'s left extreme. A statement of the form `('right', N)` is analogous to the previous case but starting at the dimensions right extreme instead.\n", "\n", diff --git a/examples/userapi/05_conditional_dimension.ipynb b/examples/userapi/05_conditional_dimension.ipynb index c8d50d629d..cd4c4cca60 100644 --- a/examples/userapi/05_conditional_dimension.ipynb +++ b/examples/userapi/05_conditional_dimension.ipynb @@ -587,7 +587,7 @@ "ci = ConditionalDimension(name='ci', parent=i, factor=factor)\n", "\n", "g = Function(name='g', shape=(size,), dimensions=(i,))\n", - "# Intialize g\n", + "# Initialize g\n", "g.data[:,]= list(range(size))\n", "f = Function(name='f', shape=(int(size/factor),), dimensions=(ci,))\n", "\n", diff --git a/examples/userapi/06_sparse_operations.ipynb b/examples/userapi/06_sparse_operations.ipynb index b150474ac8..c71d9d5be8 100644 --- a/examples/userapi/06_sparse_operations.ipynb +++ b/examples/userapi/06_sparse_operations.ipynb @@ -104,7 +104,7 @@ "\n", "A `SparseFunction` is a devito object representing a `Function` defined at sparse positions. It contains the coordinates of the sparse positions and the data at those positions. The coordinates are stored in a `SubFunction` object as a `Function` of shape `(npoints, ndim)` where `npoints` is the number of sparse positions and `ndim` is the number of dimension of the grid.\n", "\n", - "A `SparseFunction` comes with the two main methods `inject(field, expr)` and `interpolate(field)` that respectively inject `expr` into `field` at the sparse positons and interpolate `field` at the sparse positions." + "A `SparseFunction` comes with the two main methods `inject(field, expr)` and `interpolate(field)` that respectively inject `expr` into `field` at the sparse positions and interpolate `field` at the sparse positions." ] }, { diff --git a/examples/userapi/07_functions_on_subdomains.ipynb b/examples/userapi/07_functions_on_subdomains.ipynb index 4c8d1a50da..7724809163 100644 --- a/examples/userapi/07_functions_on_subdomains.ipynb +++ b/examples/userapi/07_functions_on_subdomains.ipynb @@ -2724,7 +2724,7 @@ "tau = TensorTimeFunction(name='tau', grid=lowerfield, space_order=so)\n", "v = VectorTimeFunction(name='v', grid=lowerfield, space_order=so)\n", "\n", - "# Intialise parameter fields\n", + "# Initialise parameter fields\n", "ro.data[:] = 1\n", "cp.data[:] = 1.5\n", "\n", diff --git a/pyproject.toml b/pyproject.toml index 67064d47cb..5bdbcaf224 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,6 +120,10 @@ Sur = "Sur" pointss = "pointss" # numer - common abbrevisation of numerator numer = "numer" +# Yto - Very unfortunately appears in a short random binary string in a notebook +Yto = "Yto" +# ges - Random HTML identifier in examples/seismic/tutorials/17_fourier_mode +ges = "ges" [tool.flake8] max-line-length = 90 From 611eba667954dff949322770e715ab2ec04e281c Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 2 Jan 2026 14:32:49 +0000 Subject: [PATCH 25/42] misc: Fix typos in tests --- pyproject.toml | 8 +++++++- tests/test_adjoint.py | 2 +- tests/test_caching.py | 4 ++-- tests/test_constant.py | 2 +- tests/test_dimension.py | 20 ++++++++++---------- tests/test_dle.py | 2 +- tests/test_dse.py | 4 ++-- tests/test_fission.py | 4 ++-- tests/test_gpu_common.py | 4 ++-- tests/test_interpolation.py | 14 +++++++------- tests/test_ir.py | 10 +++++----- tests/test_linearize.py | 2 +- tests/test_mpi.py | 4 ++-- tests/test_operator.py | 8 ++++---- tests/test_pickle.py | 4 ++-- tests/test_staggered_utils.py | 2 +- tests/test_subdomains.py | 2 +- tests/test_symbolics.py | 6 +++--- tests/test_tensors.py | 2 +- tests/test_visitors.py | 2 +- 20 files changed, 56 insertions(+), 50 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5bdbcaf224..c8627e0bf8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,11 @@ arange = "arange" dorder = "dorder" # simpify - function in Devito for creating Sympy expressions simpify = "simpify" +# vas - plural of va +# Used in tests/test_gpu_common.py::TestStreaming::test_streaming_complete (TODO: FIX) +vas = "vas" +# tpe - Thread Pool Executor +tpe = "tpe" [tool.typos.default.extend-words] # datas - plural of data? @@ -118,13 +123,14 @@ Sur = "Sur" # pointss - plural of points? # Horrendous use in operator/profiling (TODO: FIX) pointss = "pointss" -# numer - common abbrevisation of numerator +# numer - common abbreviation of numerator numer = "numer" # Yto - Very unfortunately appears in a short random binary string in a notebook Yto = "Yto" # ges - Random HTML identifier in examples/seismic/tutorials/17_fourier_mode ges = "ges" + [tool.flake8] max-line-length = 90 ignore = [ diff --git a/tests/test_adjoint.py b/tests/test_adjoint.py index 2a70be5d87..d7c9bbf736 100644 --- a/tests/test_adjoint.py +++ b/tests/test_adjoint.py @@ -92,7 +92,7 @@ def test_adjoint_F(self, mkey, shape, kernel, space_order, time_order, setup_fun """ Adjoint test for the forward modeling operator. The forward modeling operator F generates a shot record (measurements) - from a source while the adjoint of F generates measurments at the source + from a source while the adjoint of F generates measurements at the source location from data. This test uses the conventional dot test: < Fx, y> = """ diff --git a/tests/test_caching.py b/tests/test_caching.py index 86f0d45097..71ace0680a 100644 --- a/tests/test_caching.py +++ b/tests/test_caching.py @@ -561,7 +561,7 @@ def test_symbol_aliasing_reverse(self): u.data[:] = 6. u_ref = weakref.ref(u.data) - # Create derivative and delete orignal u[x, y] + # Create derivative and delete original u[x, y] dx = u.dx del u clear_cache() @@ -682,7 +682,7 @@ def test_sparse_function(self, operate_on_empty_cache): assert len(_SymbolCache) == cur_cache_size + ncreated - # No new symbolic obejcts are created + # No new symbolic objects are created u.inject(expr=u, field=u) assert len(_SymbolCache) == cur_cache_size + ncreated diff --git a/tests/test_constant.py b/tests/test_constant.py index 5437c2478e..eaaafc543a 100644 --- a/tests/test_constant.py +++ b/tests/test_constant.py @@ -10,7 +10,7 @@ class TestConst: def test_const_change(self): """ - Test that Constand.data can be set as required. + Test that Constant.data can be set as required. """ n = 5 diff --git a/tests/test_dimension.py b/tests/test_dimension.py index f803185db1..d72d3404a2 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -1482,16 +1482,16 @@ def test_stepping_dim_in_condition_lowering(self): grid = Grid(shape=(4, 4)) _, y = grid.dimensions - ths = 10 + threshold = 10 g = TimeFunction(name='g', grid=grid) - ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths)) + ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, threshold)) op = Operator(Eq(g.forward, g + 1, implicit_dims=ci)) - op.apply(time_M=ths+3) - assert np.all(g.data[0, :, :] == ths) - assert np.all(g.data[1, :, :] == ths + 1) + op.apply(time_M=threshold+3) + assert np.all(g.data[0, :, :] == threshold) + assert np.all(g.data[1, :, :] == threshold + 1) assert 'if (g[t0][x + 1][y + 1] <= 10)\n' '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode) @@ -1809,7 +1809,7 @@ def test_issue_2007(self): # proxy integral f.data[:] = np.array(freq[:]) # Proxy Fourier integral holder - ure = Function(name="ure", grid=grid, + u_re = Function(name="u_re", grid=grid, dimensions=(freq_dim,) + u.indices[1:], shape=(nfreq,) + u.shape[1:]) @@ -1817,16 +1817,16 @@ def test_issue_2007(self): ct = ConditionalDimension(name="ct", parent=time, condition=Ge(time, f)) eqns = [ Eq(u.forward, u+1), - Eq(ure, ure + u, implicit_dims=ct) + Eq(u_re, u_re + u, implicit_dims=ct) ] op = Operator(eqns) op.apply(time_M=10) - assert np.all(ure.data[0] == 54) - assert np.all(ure.data[1] == 49) - assert np.all(ure.data[2] == 27) + assert np.all(u_re.data[0] == 54) + assert np.all(u_re.data[1] == 49) + assert np.all(u_re.data[2] == 27) # Make sure the ConditionalDimension is at the right depth for performance trees = retrieve_iteration_tree(op) diff --git a/tests/test_dle.py b/tests/test_dle.py index 3be184e46b..c3330ad26a 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -1010,7 +1010,7 @@ def test_incs_no_atomic(self): u = TimeFunction(name='u', grid=grid) v = TimeFunction(name='v', grid=grid) - # Format: u(t, x, nastyness) += 1 + # Format: u(t, x, nastiness) += 1 uf = u[t, x, f, z] # All loops get collapsed, but the `y` and `z` loops are PARALLEL_IF_ATOMIC, diff --git a/tests/test_dse.py b/tests/test_dse.py index e93f4215d2..b79605c099 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -2609,7 +2609,7 @@ def test_premature_evalderiv_lowering(self): # it behaves as if it were one mock_custom_deriv = u.dx.dy.evaluate - # This symbolic operation -- creating an Add between an arbitray object + # This symbolic operation -- creating an Add between an arbitrary object # and an EvalDerivative -- caused the EvalDerivative to be prematurely # simplified being flatten into an Add expr0 = u.dt - mock_custom_deriv @@ -2745,7 +2745,7 @@ class TestTTI: @cached_property def model(self): - # TTI layered model for the tti test, no need for a smooth interace + # TTI layered model for the tti test, no need for a smooth interface # bewtween the two layer as the compilation passes are tested, not the # physical prettiness of the result -- which ultimately saves time return demo_model('layers-tti', nlayers=3, nbl=10, space_order=8, diff --git a/tests/test_fission.py b/tests/test_fission.py index f62ac63bab..3c6263e2e1 100644 --- a/tests/test_fission.py +++ b/tests/test_fission.py @@ -39,7 +39,7 @@ def define(self, dimensions): def test_nofission_as_unprofitable(): """ - Test there's no fission if no increase in number of collapsable loops. + Test there's no fission if no increase in number of collapsible loops. """ grid = Grid(shape=(20, 20)) x, y = grid.dimensions @@ -79,7 +79,7 @@ def test_nofission_as_illegal(): def test_fission_partial(): """ - Test there's no fission if no increase in number of collapsable loops. + Test there's no fission if no increase in number of collapsible loops. """ grid = Grid(shape=(20, 20)) x, y = grid.dimensions diff --git a/tests/test_gpu_common.py b/tests/test_gpu_common.py index a31560f988..af5c71fc11 100644 --- a/tests/test_gpu_common.py +++ b/tests/test_gpu_common.py @@ -158,7 +158,7 @@ def test_visible_devices_with_devito_deviceid(self): def test_deviceid_per_rank(self, mode): """ Test that Device IDs set by the user on a per-rank basis do not - get modifed. + get modified. """ # Reversed order to ensure it is different to default user_set_deviceids = (1, 0) @@ -1312,7 +1312,7 @@ def test_streaming_complete(self): def test_streaming_split_noleak(self): """ - Make sure the helper pthreads leak no memory in the target langauge runtime. + Make sure the helper pthreads leak no memory in the target language runtime. """ nt = 1000 grid = Grid(shape=(20, 20, 20)) diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index 0012c4ed40..cdb6f7773f 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -255,7 +255,7 @@ def test_precomputed_injection_time(r): ]) def test_interpolate(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. """ a = unit_box(shape=shape) p = points(a.grid, coords, npoints=npoints) @@ -274,7 +274,7 @@ def test_interpolate(shape, coords, npoints=20): ]) def test_interpolate_cumm(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. """ a = unit_box(shape=shape) p = points(a.grid, coords, npoints=npoints) @@ -295,7 +295,7 @@ def test_interpolate_cumm(shape, coords, npoints=20): ]) def test_interpolate_time_shift(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. This test verifies the optional time shifting for SparseTimeFunctions """ a = unit_box_time(shape=shape) @@ -334,7 +334,7 @@ def test_interpolate_time_shift(shape, coords, npoints=20): ]) def test_interpolate_array(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. """ a = unit_box(shape=shape) p = points(a.grid, coords, npoints=npoints) @@ -354,7 +354,7 @@ def test_interpolate_array(shape, coords, npoints=20): ]) def test_interpolate_custom(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. """ a = unit_box(shape=shape) p = custom_points(a.grid, coords, npoints=npoints) @@ -399,7 +399,7 @@ def test_interpolation_dx(): ]) def test_interpolate_indexed(shape, coords, npoints=20): """Test generic point interpolation testing the x-coordinate of an - abitrary set of points going across the grid. Unlike other tests, + arbitrary set of points going across the grid. Unlike other tests, here we interpolate an expression built using the indexed notation. """ a = unit_box(shape=shape) @@ -476,7 +476,7 @@ def test_multi_inject(shape, coords, nexpr, result, npoints=19): ]) def test_inject_time_shift(shape, coords, result, npoints=19): """Test generic point injection testing the x-coordinate of an - abitrary set of points going across the grid. + arbitrary set of points going across the grid. This test verifies the optional time shifting for SparseTimeFunctions """ a = unit_box_time(shape=shape) diff --git a/tests/test_ir.py b/tests/test_ir.py index be2bcedffc..90de31ddfd 100644 --- a/tests/test_ir.py +++ b/tests/test_ir.py @@ -234,7 +234,7 @@ def test_iteration_instance_cmp(self, ii_num, ii_literal): def test_timed_access_regularity(self, ta_literal): """ - Test TimedAcces.{is_regular,is_irregular} + Test TimedAccess.{is_regular,is_irregular} """ (tcxy_w0, tcxy_r0, tcx1y1_r1, tcx1y_r1, rev_tcxy_w0, rev_tcx1y1_r1, tcyx_irr0, tcxx_irr1, tcxy_irr2) = ta_literal @@ -866,12 +866,12 @@ def test_critical_region_v0(self): scope = Scope(exprs) - # Mock depedencies so that the fences (CriticalRegions) don't float around + # Mock dependencies so that the fences (CriticalRegions) don't float around assert len(scope.writes[mocksym0]) == 2 assert len(scope.reads[mocksym0]) == 2 assert len(scope.d_all) == 3 - # No other mock depedencies because there's no other place the Eq + # No other mock dependencies because there's no other place the Eq # within the critical sequence can float to assert len(scope.writes[mocksym1]) == 1 assert mocksym1 not in scope.reads @@ -897,14 +897,14 @@ def test_critical_region_v1(self): scope = Scope(exprs) - # Mock depedencies so that the fences (CriticalRegions) don't float around + # Mock dependencies so that the fences (CriticalRegions) don't float around assert len(scope.writes[mocksym0]) == 2 assert len(scope.reads[mocksym0]) == 4 assert len([i for i in scope.d_all if i.source.access is mocksym0 or i.sink.access is mocksym0]) == 7 - # More mock depedencies because Eq must not float outside of the critical + # More mock dependencies because Eq must not float outside of the critical # sequence assert len(scope.writes[mocksym1]) == 1 assert len(scope.reads[mocksym1]) == 2 diff --git a/tests/test_linearize.py b/tests/test_linearize.py index 52cc220070..882777cb54 100644 --- a/tests/test_linearize.py +++ b/tests/test_linearize.py @@ -270,7 +270,7 @@ def test_unsubstituted_indexeds(): we end up with two `r0[x, y, z]`, but the former's `x` and `y` are SpaceDimensions, while the latter's are BlockDimensions. This means that the two objects, while looking identical, are different, and in - partical they hash differently, hence we need two entries in a mapper + particular they hash differently, hence we need two entries in a mapper to perform an Uxreplace. But FindSymbols made us detect only one entry... """ grid = Grid(shape=(8, 8, 8)) diff --git a/tests/test_mpi.py b/tests/test_mpi.py index c0d0b4a564..ede7989683 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -1543,7 +1543,7 @@ def test_merge_and_hoist_haloupdate_if_diff_locindices(self, mode): In the IET we end up with *two* HaloSpots, one placed before the time loop, and one placed before the second Eq. The third Eq, - reading from f[t0], will seamlessy find its halo up-to-date, + reading from f[t0], will seamlessly find its halo up-to-date, due to the f[t1] being updated in the previous time iteration. """ grid = Grid(shape=(10,)) @@ -2576,7 +2576,7 @@ def test_nontrivial_operator(self, mode): # 0 0 4 4 4 4 4 0 0 # 0 0 5 5 5 5 5 0 0 - assert np.all(u.data_ro_domain[0] == 0) # The write occures at t=1 + assert np.all(u.data_ro_domain[0] == 0) # The write occurs at t=1 glb_pos_map = u.grid.distributor.glb_pos_map # Check cornes diff --git a/tests/test_operator.py b/tests/test_operator.py index 1a9b82942a..b05aa642b8 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -637,7 +637,7 @@ def test_sparsetimefunction_inject(self): def test_sparsetimefunction_inject_dt(self): """ - Test injection of the time deivative of a SparseTimeFunction into a TimeFunction + Test injection of the time derivative of a SparseTimeFunction into a TimeFunction """ grid = Grid(shape=(11, 11)) u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=1) @@ -1009,12 +1009,12 @@ def test_dimension_size_override(self): one.data[:] = 1. op = Operator(Eq(a.forward, a + one)) - # Test dimension override via the buffered dimenions + # Test dimension override via the buffered dimensions a.data[0] = 0. op(a=a, t=5) assert(np.allclose(a.data[1], 5.)) - # Test dimension override via the parent dimenions + # Test dimension override via the parent dimensions a.data[0] = 0. op(a=a, time=4) assert(np.allclose(a.data[0], 4.)) @@ -1073,7 +1073,7 @@ def test_override_sparse_data_default_dim(self): def test_argument_derivation_order(self, nt=100): """ Ensure the precedence order of arguments is respected - Defaults < (overriden by) Tensor Arguments < Dimensions < Scalar Arguments + Defaults < (overridden by) Tensor Arguments < Dimensions < Scalar Arguments """ i, j, k = dimify('i j k') shape = (10, 10, 10) diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 0a4b848e55..018720a330 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -643,7 +643,7 @@ def test_equation(self, pickle): assert new_eq.implicit_dims[0].name == 'xs' assert new_eq.implicit_dims[0].factor == 4 - @pytest.mark.parametrize('typ', [ctypes.c_float, 'struct truct']) + @pytest.mark.parametrize('typ', [ctypes.c_float, 'struct my_struct']) def test_Cast(self, pickle, typ): a = Symbol('a') un = Cast(a, dtype=typ) @@ -653,7 +653,7 @@ def test_Cast(self, pickle, typ): assert un == new_un - @pytest.mark.parametrize('typ', [ctypes.c_float, 'struct truct']) + @pytest.mark.parametrize('typ', [ctypes.c_float, 'struct my_struct']) def test_SizeOf(self, pickle, typ): un = SizeOf(typ) diff --git a/tests/test_staggered_utils.py b/tests/test_staggered_utils.py index 86c6e99fa7..b522b70d3f 100644 --- a/tests/test_staggered_utils.py +++ b/tests/test_staggered_utils.py @@ -62,7 +62,7 @@ def test_avg(ndim): @pytest.mark.parametrize('ndim', [1, 2, 3]) def test_is_param(ndim): """ - Test that only parameter are evaluated at the variable anf Function and FD indices + Test that only parameter are evaluated at the variable and Function and FD indices stay unchanged """ grid = Grid(tuple([10]*ndim)) diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index a053f4a296..72c9a320a7 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -542,7 +542,7 @@ class DummySubdomains(SubDomainSet): def test_issue_1761_b(self): """ - Follow-up issue emerged after patching #1761. The thicknesses assigments + Follow-up issue emerged after patching #1761. The thicknesses assignments were missing before the third equation. Further improvements have enabled fusing the third equation with the first diff --git a/tests/test_symbolics.py b/tests/test_symbolics.py index 548ce3fc3c..5945e7ce9a 100644 --- a/tests/test_symbolics.py +++ b/tests/test_symbolics.py @@ -1054,7 +1054,7 @@ def test_multibounds_op(self): ]) def test_relations_w_complex_assumptions(self, op, expr, assumptions, expected): """ - Tests evalmin/evalmax with multiple args and assumtpions""" + Tests evalmin/evalmax with multiple args and assumptions""" a = Symbol('a', positive=True) # noqa b = Symbol('b', positive=True) # noqa c = Symbol('c', positive=True) # noqa @@ -1095,7 +1095,7 @@ def test_relations_w_complex_assumptions(self, op, expr, assumptions, expected): ]) def test_relations_w_complex_assumptions_II(self, op, expr, assumptions, expected): """ - Tests evalmin/evalmax with multiple args and assumtpions""" + Tests evalmin/evalmax with multiple args and assumptions""" a = Symbol('a', positive=False) # noqa b = Symbol('b', positive=False) # noqa c = Symbol('c', positive=True) # noqa @@ -1114,7 +1114,7 @@ def test_relations_w_complex_assumptions_II(self, op, expr, assumptions, expecte ]) def test_assumptions(self, op, expr, assumptions, expected): """ - Tests evalmin/evalmax with multiple args and assumtpions""" + Tests evalmin/evalmax with multiple args and assumptions""" a = Symbol('a', positive=False) b = Symbol('b', positive=False) c = Symbol('c', positive=True) diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 790cab9020..dee3c40696 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -178,7 +178,7 @@ def test_transpose_vs_T(func1): f4 = f1.dx.transpose(inner=False) # inner=True is the same as T assert f3 == f2 - # inner=False doesn't tranpose inner derivatives + # inner=False doesn't transpose inner derivatives for f4i, f2i in zip(f4, f2): assert f4i == f2i.T diff --git a/tests/test_visitors.py b/tests/test_visitors.py index a7db2c3f13..28b61761e1 100644 --- a/tests/test_visitors.py +++ b/tests/test_visitors.py @@ -324,7 +324,7 @@ def test_transformer_add_replace(exprs, block2, block3): def test_nested_transformer(exprs, iters, block2): """When created with the kwarg ``nested=True``, a Transformer performs nested replacements. This test simultaneously replace an inner expression - and an Iteration sorrounding it.""" + and an Iteration surrounding it.""" target_loop = block2.nodes[1] target_expr = target_loop.nodes[0].nodes[0] mapper = {target_loop: iters[3](target_loop.nodes[0]), From f779176ff7c7b20ea16d5c18827b0346516808e7 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 2 Jan 2026 16:24:58 +0000 Subject: [PATCH 26/42] misc: Kick CI From fab89c89fb323d01fb3498584a3c92a520e0231f Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Sat, 3 Jan 2026 00:11:56 +0000 Subject: [PATCH 27/42] misc: Manual linting for Devito --- conftest.py | 13 +- devito/arch/archinfo.py | 14 +- devito/arch/compiler.py | 69 ++++---- devito/builtins/arithmetic.py | 14 +- devito/builtins/initializers.py | 41 ++--- devito/core/autotuning.py | 40 +++-- devito/core/cpu.py | 5 +- devito/core/gpu.py | 5 +- devito/core/intel.py | 16 +- devito/core/operator.py | 35 ++-- devito/core/power.py | 8 +- devito/data/data.py | 50 +++--- devito/data/decomposition.py | 20 +-- devito/data/utils.py | 24 ++- devito/finite_differences/coefficients.py | 4 +- devito/finite_differences/derivative.py | 14 +- devito/finite_differences/differentiable.py | 53 +++--- .../finite_differences/finite_difference.py | 28 +--- devito/finite_differences/operators.py | 8 +- devito/finite_differences/tools.py | 41 ++--- devito/ir/cgen/printer.py | 11 +- devito/ir/clusters/algorithms.py | 17 +- devito/ir/clusters/cluster.py | 20 +-- devito/ir/clusters/visitors.py | 2 +- devito/ir/equations/algorithms.py | 12 +- devito/ir/equations/equation.py | 22 +-- devito/ir/iet/algorithms.py | 4 +- devito/ir/iet/efunc.py | 16 +- devito/ir/iet/nodes.py | 113 ++++++------- devito/ir/iet/utils.py | 2 +- devito/ir/iet/visitors.py | 71 +++----- devito/ir/stree/algorithms.py | 12 +- devito/ir/stree/tree.py | 11 +- devito/ir/support/basic.py | 69 ++++---- devito/ir/support/guards.py | 4 +- devito/ir/support/space.py | 49 +++--- devito/ir/support/symregistry.py | 2 +- devito/ir/support/syncs.py | 6 +- devito/ir/support/utils.py | 21 +-- devito/ir/support/vector.py | 55 +++--- devito/mpatches/rationaltools.py | 5 +- devito/mpi/distributed.py | 113 ++++++++----- devito/mpi/halo_scheme.py | 28 ++-- devito/mpi/reduction_scheme.py | 2 +- devito/mpi/routines.py | 150 ++++++++++------- devito/operations/interpolators.py | 38 +++-- devito/operations/solve.py | 7 +- devito/operator/operator.py | 42 ++--- devito/operator/profiling.py | 11 +- devito/operator/registry.py | 7 +- devito/parameters.py | 6 +- devito/passes/__init__.py | 2 +- devito/passes/clusters/aliases.py | 107 ++++++------ devito/passes/clusters/asynchrony.py | 10 +- devito/passes/clusters/blocking.py | 16 +- devito/passes/clusters/buffering.py | 40 ++--- devito/passes/clusters/cse.py | 5 +- devito/passes/clusters/derivatives.py | 7 +- devito/passes/clusters/factorization.py | 24 ++- devito/passes/clusters/implicit.py | 11 +- devito/passes/clusters/misc.py | 21 ++- devito/passes/equations/linearity.py | 5 +- devito/passes/iet/asynchrony.py | 6 +- devito/passes/iet/definitions.py | 11 +- devito/passes/iet/engine.py | 18 +- devito/passes/iet/langbase.py | 15 +- devito/passes/iet/languages/CXX.py | 6 +- devito/passes/iet/languages/openacc.py | 8 +- devito/passes/iet/languages/openmp.py | 27 ++- devito/passes/iet/linearization.py | 20 +-- devito/passes/iet/misc.py | 6 +- devito/passes/iet/mpi.py | 7 +- devito/passes/iet/orchestration.py | 19 ++- devito/passes/iet/parpragma.py | 26 ++- devito/symbolics/extended_dtypes.py | 2 +- devito/symbolics/extended_sympy.py | 27 ++- devito/symbolics/inspection.py | 32 ++-- devito/symbolics/manipulation.py | 9 +- devito/symbolics/queries.py | 12 +- devito/symbolics/search.py | 7 +- devito/tools/abc.py | 10 +- devito/tools/algorithms.py | 2 +- devito/tools/data_structures.py | 44 +++-- devito/tools/dtypes_lowering.py | 41 +++-- devito/tools/os_helper.py | 4 +- devito/tools/timing.py | 14 +- devito/tools/utils.py | 22 +-- devito/types/args.py | 15 +- devito/types/array.py | 21 ++- devito/types/basic.py | 67 ++++---- devito/types/constant.py | 8 +- devito/types/dense.py | 156 +++++++++++------ devito/types/dimension.py | 90 +++++----- devito/types/equation.py | 8 +- devito/types/grid.py | 133 +++++++++------ devito/types/lazy.py | 7 +- devito/types/misc.py | 13 +- devito/types/object.py | 7 +- devito/types/parallel.py | 13 +- devito/types/sparse.py | 157 ++++++++++-------- devito/types/tensor.py | 61 ++++--- devito/types/utils.py | 2 +- .../seismic/tutorials/12_time_blocking.ipynb | 2 + requirements-testing.txt | 1 + tests/test_data.py | 19 ++- tests/test_dimension.py | 9 +- 106 files changed, 1497 insertions(+), 1365 deletions(-) diff --git a/conftest.py b/conftest.py index 195f3b96ba..23581cd74b 100644 --- a/conftest.py +++ b/conftest.py @@ -187,10 +187,17 @@ def parallel(item, m): testname = get_testname(item) # Only spew tracebacks on rank 0. # Run xfailing tests to ensure that errors are reported to calling process - args = ["-n", "1", pyversion, "-m", "pytest", "-s", "--runxfail", "-qq", testname] + args = [ + "-n", "1", pyversion, "-m", "pytest", "-s", "--runxfail", "-v", + "--timeout=600", "--timeout-method=thread", "-o faulthandler_timeout=660", + testname + ] if nprocs > 1: - args.extend([":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", - "-s", "--runxfail", "--tb=no", "-qq", "--no-summary", testname]) + args.extend([ + ":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", + "-s", "--runxfail", "-v", "--timeout=600", "--timeout-method=thread", + "-o faulthandler_timeout=660", testname + ]) # OpenMPI requires an explicit flag for oversubscription. We need it as some # of the MPI tests will spawn lots of processes if mpi_distro == 'OpenMPI': diff --git a/devito/arch/archinfo.py b/devito/arch/archinfo.py index 0273e88a22..020f66323d 100644 --- a/devito/arch/archinfo.py +++ b/devito/arch/archinfo.py @@ -18,7 +18,7 @@ from devito.logger import warning from devito.tools import all_equal, as_tuple, memoized_func -__all__ = [ +__all__ = [ # noqa: RUF022 'platform_registry', 'get_cpu_info', 'get_gpu_info', 'get_visible_devices', 'get_nvidia_cc', 'get_cuda_path', 'get_cuda_version', 'get_hip_path', 'check_cuda_runtime', 'get_m1_llvm_path', 'get_advisor_path', 'Platform', @@ -391,7 +391,7 @@ def cbk(deviceid=0): return None return cbk - gpu_info['mem.%s' % i] = make_cbk(i) + gpu_info[f'mem.{i}'] = make_cbk(i) gpu_info['architecture'] = 'unspecified' gpu_info['vendor'] = 'INTEL' @@ -780,7 +780,7 @@ def __str__(self): return self.name def __repr__(self): - return "TargetPlatform[%s]" % self.name + return f'TargetPlatform[{self.name}]' def _detect_isa(self): return 'unknown' @@ -1141,7 +1141,7 @@ def supports(self, query, language=None): elif query == 'async-loads' and cc >= 80: # Asynchronous pipeline loads -- introduced in Ampere return True - elif query in ('tma', 'thread-block-cluster') and cc >= 90: + elif query in ('tma', 'thread-block-cluster') and cc >= 90: # noqa: SIM103 # Tensor Memory Accelerator -- introduced in Hopper return True else: @@ -1202,10 +1202,8 @@ def march(cls): try: p1 = Popen(['offload-arch'], stdout=PIPE, stderr=PIPE) except OSError: - try: + with suppress(OSError): p1 = Popen(['mygpu', '-d', fallback], stdout=PIPE, stderr=PIPE) - except OSError: - pass return fallback output, _ = p1.communicate() @@ -1248,7 +1246,7 @@ def node_max_mem_trans_nbytes(platform): elif isinstance(platform, Device): return max(Cpu64.max_mem_trans_nbytes, mmtb0) else: - assert False, f"Unknown platform type: {type(platform)}" + raise AssertionError(f"Unknown platform type: {type(platform)}") # CPUs diff --git a/devito/arch/compiler.py b/devito/arch/compiler.py index edfeec17db..a0b4ce8a75 100644 --- a/devito/arch/compiler.py +++ b/devito/arch/compiler.py @@ -1,6 +1,7 @@ import platform import time import warnings +from contextlib import suppress from functools import partial from hashlib import sha1 from itertools import filterfalse @@ -43,16 +44,20 @@ def sniff_compiler_version(cc, allow_fail=False): return Version("0") except UnicodeDecodeError: return Version("0") - except OSError: + except OSError as e: if allow_fail: return Version("0") else: - raise RuntimeError(f"The `{cc}` compiler isn't available on this system") + raise RuntimeError( + f"The `{cc}` compiler isn't available on this system" + ) from e ver = ver.strip() if ver.startswith("gcc"): compiler = "gcc" - elif ver.startswith("clang") or ver.startswith("Apple LLVM") or ver.startswith("Homebrew clang"): + elif ver.startswith("clang") \ + or ver.startswith("Apple LLVM") \ + or ver.startswith("Homebrew clang"): compiler = "clang" elif ver.startswith("Intel"): compiler = "icx" @@ -92,10 +97,8 @@ def sniff_compiler_version(cc, allow_fail=False): pass # Pure integer versions (e.g., ggc5, rather than gcc5.0) need special handling - try: + with suppress(TypeError): ver = Version(float(ver)) - except TypeError: - pass return ver @@ -335,21 +338,21 @@ def make(self, loc, args): logfile = path.join(self.get_jit_dir(), f"{hash_key}.log") errfile = path.join(self.get_jit_dir(), f"{hash_key}.err") - with change_directory(loc), open(logfile, "w") as lf: - with open(errfile, "w") as ef: - - command = ['make'] + args - lf.write("Compilation command:\n") - lf.write(" ".join(command)) - lf.write("\n\n") - try: - check_call(command, stderr=ef, stdout=lf) - except CalledProcessError as e: - raise CompilationError(f'Command "{e.cmd}" return error status' - f'{e.returncode}. ' - f'Unable to compile code.\n' - f'Compile log in {logfile}\n' - f'Compile errors in {errfile}\n') + with change_directory(loc), open(logfile, "w") as lf, open(errfile, "w") as ef: + command = ['make'] + args + lf.write("Compilation command:\n") + lf.write(" ".join(command)) + lf.write("\n\n") + try: + check_call(command, stderr=ef, stdout=lf) + except CalledProcessError as e: + raise CompilationError( + f'Command "{e.cmd}" return error status' + f'{e.returncode}. ' + f'Unable to compile code.\n' + f'Compile log in {logfile}\n' + f'Compile errors in {errfile}\n' + ) from e debug(f"Make <{' '.join(args)}>") def _cmdline(self, files, object=False): @@ -395,9 +398,11 @@ def jit_compile(self, soname, code): # ranks would end up creating different cache dirs cache_dir = cache_dir.joinpath('jit-backdoor') cache_dir.mkdir(parents=True, exist_ok=True) - except FileNotFoundError: - raise ValueError(f"Trying to use the JIT backdoor for `{src_file}`, but " - "the file isn't present") + except FileNotFoundError as e: + raise ValueError( + f"Trying to use the JIT backdoor for `{src_file}`, but " + "the file isn't present" + ) from e # Should the compilation command be emitted? debug = configuration['log-level'] == 'DEBUG' @@ -708,12 +713,10 @@ def __init_finalize__(self, **kwargs): # explicitly pass the flags that an `mpicc` would implicitly use compile_flags, link_flags = sniff_mpi_flags('mpicxx') - try: + with suppress(ValueError): # No idea why `-pthread` would pop up among the `compile_flags` + # Just in case they fix it, we wrap it up within a suppress compile_flags.remove('-pthread') - except ValueError: - # Just in case they fix it, we wrap it up within a try-except - pass self.cflags.extend(compile_flags) # Some arguments are for the host compiler @@ -1005,15 +1008,9 @@ def __new__(cls, *args, **kwargs): elif isinstance(platform, IntelDevice): _base = OneapiCompiler elif isinstance(platform, NvidiaDevice): - if language == 'cuda': - _base = CudaCompiler - else: - _base = NvidiaCompiler + _base = CudaCompiler if language == 'cuda' else NvidiaCompiler elif platform is AMDGPUX: - if language == 'hip': - _base = HipCompiler - else: - _base = AOMPCompiler + _base = HipCompiler if language == 'hip' else AOMPCompiler else: _base = GNUCompiler diff --git a/devito/builtins/arithmetic.py b/devito/builtins/arithmetic.py index 6c19f056be..64f1e0547b 100644 --- a/devito/builtins/arithmetic.py +++ b/devito/builtins/arithmetic.py @@ -33,7 +33,7 @@ def norm(f, order=2): op = dv.Operator([dv.Eq(s, 0.0)] + eqns + [dv.Inc(s, Pow(dv.Abs(p), order)), dv.Eq(n[0], s)], - name='norm%d' % order) + name=f'norm{order}') op.apply(**kwargs) v = np.power(n.data[0], 1/order) @@ -63,24 +63,24 @@ def sum(f, dims=None): new_dims = tuple(d for d in f.dimensions if d not in dims) shape = tuple(f._size_domain[d] for d in new_dims) if f.is_TimeFunction and f.time_dim not in dims: - out = f._rebuild(name="%ssum" % f.name, shape=shape, dimensions=new_dims, + out = f._rebuild(name=f'{f.name}sum', shape=shape, dimensions=new_dims, initializer=np.empty(0)) elif f.is_SparseTimeFunction: if f.time_dim in dims: # Sum over time -> SparseFunction new_coords = f.coordinates._rebuild( - name="%ssum_coords" % f.name, initializer=f.coordinates.initializer + name=f'{f.name}sum_coords', initializer=f.coordinates.initializer ) - out = dv.SparseFunction(name="%ssum" % f.name, grid=f.grid, + out = dv.SparseFunction(name=f'{f.name}sum', grid=f.grid, dimensions=new_dims, npoint=f.shape[1], coordinates=new_coords) else: # Sum over rec -> TimeFunction - out = dv.TimeFunction(name="%ssum" % f.name, grid=f.grid, shape=shape, + out = dv.TimeFunction(name=f'{f.name}sum', grid=f.grid, shape=shape, dimensions=new_dims, space_order=0, time_order=f.time_order) else: - out = dv.Function(name="%ssum" % f.name, grid=f.grid, + out = dv.Function(name=f'{f.name}sum', grid=f.grid, space_order=f.space_order, shape=shape, dimensions=new_dims) @@ -217,4 +217,4 @@ def _reduce_func(f, func, mfunc): else: return v.item() else: - raise ValueError("Expected Function, got `%s`" % type(f)) + raise ValueError(f'Expected Function, got `{type(f)}`') diff --git a/devito/builtins/initializers.py b/devito/builtins/initializers.py index eb44c0360e..d8c6055e20 100644 --- a/devito/builtins/initializers.py +++ b/devito/builtins/initializers.py @@ -59,18 +59,18 @@ def assign(f, rhs=0, options=None, name='assign', assign_halo=False, **kwargs): eqs = [] if options: - for i, j, k in zip(as_list(f), rhs, options): + for i, j, k in zip(as_list(f), rhs, options, strict=True): if k is not None: eqs.append(dv.Eq(i, j, **k)) else: eqs.append(dv.Eq(i, j)) else: - for i, j in zip(as_list(f), rhs): + for i, j in zip(as_list(f), rhs, strict=True): eqs.append(dv.Eq(i, j)) if assign_halo: subs = {} - for d, h in zip(f.dimensions, f._size_halo): + for d, h in zip(f.dimensions, f._size_halo, strict=True): if sum(h) == 0: continue subs[d] = dv.CustomDimension(name=d.name, parent=d, @@ -143,15 +143,15 @@ def __init__(self, lw): self.lw = lw def define(self, dimensions): - return {d: ('middle', l, l) for d, l in zip(dimensions, self.lw)} + return {d: ('middle', l, l) for d, l in zip(dimensions, self.lw, strict=True)} def create_gaussian_weights(sigma, lw): weights = [w/w.sum() for w in (np.exp(-0.5/s**2*(np.linspace(-l, l, 2*l+1))**2) - for s, l in zip(sigma, lw))] + for s, l in zip(sigma, lw, strict=True))] return as_tuple(np.array(w) for w in weights) def fset(f, g): - indices = [slice(l, -l, 1) for _, l in zip(g.dimensions, lw)] + indices = [slice(l, -l, 1) for _, l in zip(g.dimensions, lw, strict=True)] slices = (slice(None, None, 1), )*g.ndim if isinstance(f, np.ndarray): f[slices] = g.data[tuple(indices)] @@ -182,7 +182,7 @@ def fset(f, g): # Create the padded grid: objective_domain = ObjectiveDomain(lw) - shape_padded = tuple([np.array(s) + 2*l for s, l in zip(shape, lw)]) + shape_padded = tuple([np.array(s) + 2*l for s, l in zip(shape, lw, strict=True)]) extent_padded = tuple([s-1 for s in shape_padded]) grid = dv.Grid(shape=shape_padded, subdomains=objective_domain, extent=extent_padded) @@ -193,7 +193,7 @@ def fset(f, g): weights = create_gaussian_weights(sigma, lw) mapper = {} - for d, l, w in zip(f_c.dimensions, lw, weights): + for d, l, w in zip(f_c.dimensions, lw, weights, strict=True): lhs = [] rhs = [] options = [] @@ -238,13 +238,15 @@ def _initialize_function(function, data, nbl, mapper=None, mode='constant'): def buff(i, j): return [(i + k - 2*max(max(nbl))) for k in j] - b = [min(l) for l in (w for w in (buff(i, j) for i, j in zip(local_size, halo)))] + b = [min(l) for l in ( + w for w in (buff(i, j) for i, j in zip(local_size, halo, strict=True)) + )] if any(np.array(b) < 0): - raise ValueError("Function `%s` halo is not sufficiently thick." % function) + raise ValueError(f'Function `{function}` halo is not sufficiently thick.') - for d, (nl, nr) in zip(function.space_dimensions, as_tuple(nbl)): - dim_l = dv.SubDimension.left(name='abc_%s_l' % d.name, parent=d, thickness=nl) - dim_r = dv.SubDimension.right(name='abc_%s_r' % d.name, parent=d, thickness=nr) + for d, (nl, nr) in zip(function.space_dimensions, as_tuple(nbl), strict=True): + dim_l = dv.SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=nl) + dim_r = dv.SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=nr) if mode == 'constant': subsl = nl subsr = d.symbolic_max - nr @@ -259,7 +261,7 @@ def buff(i, j): rhs.append(function.subs({d: subsr})) options.extend([None, None]) - if mapper and d in mapper.keys(): + if mapper and d in mapper: exprs = mapper[d] lhs_extra = exprs['lhs'] rhs_extra = exprs['rhs'] @@ -353,8 +355,9 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', if not isinstance(data, (list, tuple)): raise TypeError("Expected a list of `data`") elif len(function) != len(data): - raise ValueError("Expected %d `data` items, got %d" % - (len(function), len(data))) + raise ValueError( + f'Expected {len(function)} `data` items, got {len(data)}' + ) if mapper is not None: raise NotImplementedError("Unsupported `mapper` with batching") @@ -374,14 +377,14 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', f._create_data() if nbl == 0: - for f, data in zip(functions, datas): + for f, data in zip(functions, datas, strict=True): if isinstance(data, dv.Function): f.data[:] = data.data[:] else: f.data[:] = data[:] else: lhss, rhss, optionss = [], [], [] - for f, data in zip(functions, datas): + for f, data in zip(functions, datas, strict=True): lhs, rhs, options = _initialize_function(f, data, nbl, mapper, mode) @@ -391,7 +394,7 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', assert len(lhss) == len(rhss) == len(optionss) - name = name or 'initialize_%s' % '_'.join(f.name for f in functions) + name = name or f'initialize_{"_".join(f.name for f in functions)}' assign(lhss, rhss, options=optionss, name=name, **kwargs) if pad_halo: diff --git a/devito/core/autotuning.py b/devito/core/autotuning.py index 76812edf2e..fa833b84a1 100644 --- a/devito/core/autotuning.py +++ b/devito/core/autotuning.py @@ -38,8 +38,8 @@ def autotune(operator, args, level, mode): key = [level, mode] accepted = configuration._accepted['autotuning'] if key not in accepted: - raise ValueError("The accepted `(level, mode)` combinations are `%s`; " - "provided `%s` instead" % (accepted, key)) + raise ValueError(f"The accepted `(level, mode)` combinations are `{accepted}`; " + f"provided `{key}` instead") # We get passed all the arguments, but the cfunction only requires a subset at_args = OrderedDict([(p.name, args[p.name]) for p in operator.parameters]) @@ -84,7 +84,9 @@ def autotune(operator, args, level, mode): if timesteps is None: return args, {} else: - warning("cannot perform autotuning with %d time loops; skipping" % len(steppers)) + warning( + f'Cannot perform autotuning with {len(steppers)} time loops; skipping' + ) return args, {} # Use a fresh Timer for auto-tuning @@ -134,8 +136,11 @@ def autotune(operator, args, level, mode): # Record timing elapsed = timer.total timings.setdefault(nt, OrderedDict()).setdefault(n, {})[bs] = elapsed - log("run <%s> took %f (s) in %d timesteps" % - (','.join('%s=%s' % i for i in run), elapsed, timesteps)) + log( + f'run <{",".join(f"{i[0]}={i[1]}" for i in run)}> ' + f'took {elapsed} (s) ' + f'in {timesteps} timesteps' + ) # Prepare for the next autotuning run update_time_bounds(stepper, at_args, timesteps, mode) @@ -154,7 +159,7 @@ def autotune(operator, args, level, mode): best = min(mapper, key=mapper.get) best = OrderedDict(best + tuple(mapper[best].args)) best.pop(None, None) - log("selected <%s>" % (','.join('%s=%s' % i for i in best.items()))) + log("selected <{}>".format(','.join('{}={}'.format(*i) for i in best.items()))) except ValueError: warning("could not perform any runs") return args, {} @@ -210,10 +215,9 @@ def init_time_bounds(stepper, at_args, args): return False else: at_args[dim.max_name] = at_args[dim.min_name] + options['squeezer'] - if dim.size_name in args: - if not isinstance(args[dim.size_name], range): - # May need to shrink to avoid OOB accesses - at_args[dim.max_name] = min(at_args[dim.max_name], args[dim.max_name]) + if dim.size_name in args and not isinstance(args[dim.size_name], range): + # May need to shrink to avoid OOB accesses + at_args[dim.max_name] = min(at_args[dim.max_name], args[dim.max_name]) if at_args[dim.min_name] > at_args[dim.max_name]: warning("too few time iterations; skipping") return False @@ -271,9 +275,9 @@ def calculate_nblocks(tree, blockable): collapsed = tree[index:index + (ncollapsed or index+1)] blocked = [i.dim for i in collapsed if i.dim in blockable] remainders = [(d.root.symbolic_max-d.root.symbolic_min+1) % d.step for d in blocked] - niters = [d.root.symbolic_max - i for d, i in zip(blocked, remainders)] + niters = [d.root.symbolic_max - i for d, i in zip(blocked, remainders, strict=True)] nblocks = prod((i - d.root.symbolic_min + 1) / d.step - for d, i in zip(blocked, niters)) + for d, i in zip(blocked, niters, strict=True)) return nblocks @@ -299,7 +303,7 @@ def generate_block_shapes(blockable, args, level): if level in ['aggressive', 'max']: # Ramp up to larger block shapes handle = tuple((i, options['blocksize-l0'][-1]) for i, _ in ret[0]) - for i in range(3): + for _ in range(3): new_bs = tuple((b, v*2) for b, v in handle) ret.insert(ret.index(handle) + 1, new_bs) handle = new_bs @@ -324,7 +328,7 @@ def generate_block_shapes(blockable, args, level): level_1 = [d for d, v in mapper.items() if v == 1] if level_1: assert len(level_1) == len(level_0) - assert all(d1.parent is d0 for d0, d1 in zip(level_0, level_1)) + assert all(d1.parent is d0 for d0, d1 in zip(level_0, level_1, strict=True)) for bs in list(ret): handle = [] for v in options['blocksize-l1']: @@ -369,9 +373,9 @@ def generate_nthreads(nthreads, args, level): ret.extend([((name, nthread),) for nthread in cases]) if basic not in ret: - warning("skipping `%s`; perhaps you've set OMP_NUM_THREADS to a " + warning(f"skipping `{dict(basic)}`; perhaps you've set OMP_NUM_THREADS to a " "non-standard value while attempting autotuning in " - "`max` mode?" % dict(basic)) + "`max` mode?") return ret @@ -385,8 +389,8 @@ def generate_nthreads(nthreads, args, level): def log(msg): - perf("AutoTuner: %s" % msg) + perf(f"AutoTuner: {msg}") def warning(msg): - _warning("AutoTuner: %s" % msg) + _warning(f"AutoTuner: {msg}") diff --git a/devito/core/cpu.py b/devito/core/cpu.py index b090552838..5b5aa8448e 100644 --- a/devito/core/cpu.py +++ b/devito/core/cpu.py @@ -106,8 +106,9 @@ def _normalize_kwargs(cls, **kwargs): oo.pop('gpu-create', None) if oo: - raise InvalidOperator("Unrecognized optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unrecognized optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) diff --git a/devito/core/gpu.py b/devito/core/gpu.py index eaf68b0a5b..7f88d9aa0e 100644 --- a/devito/core/gpu.py +++ b/devito/core/gpu.py @@ -116,8 +116,9 @@ def _normalize_kwargs(cls, **kwargs): o['scalar-min-type'] = oo.pop('scalar-min-type', cls.SCALAR_MIN_TYPE) if oo: - raise InvalidOperator("Unsupported optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unsupported optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) diff --git a/devito/core/intel.py b/devito/core/intel.py index 6ab2f70b5f..429bbc3a02 100644 --- a/devito/core/intel.py +++ b/devito/core/intel.py @@ -4,14 +4,14 @@ ) __all__ = [ - 'Intel64AdvCOperator', - 'Intel64AdvCXXOmpOperator', - 'Intel64AdvOmpOperator', - 'Intel64CXXAdvCOperator', - 'Intel64FsgCOperator', - 'Intel64FsgCXXOmpOperator', - 'Intel64FsgCXXOperator', - 'Intel64FsgOmpOperator', + 'Intel64AdvCOperator', + 'Intel64AdvCXXOmpOperator', + 'Intel64AdvOmpOperator', + 'Intel64CXXAdvCOperator', + 'Intel64FsgCOperator', + 'Intel64FsgCXXOmpOperator', + 'Intel64FsgCXXOperator', + 'Intel64FsgOmpOperator', ] diff --git a/devito/core/operator.py b/devito/core/operator.py index 1bf7ed41bc..35a34e5898 100644 --- a/devito/core/operator.py +++ b/devito/core/operator.py @@ -1,4 +1,5 @@ from collections.abc import Iterable +from contextlib import suppress from functools import cached_property import numpy as np @@ -182,8 +183,9 @@ def _normalize_kwargs(cls, **kwargs): o['parallel'] = False if oo: - raise InvalidOperator("Unrecognized optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unrecognized optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) @@ -194,7 +196,7 @@ def _check_kwargs(cls, **kwargs): oo = kwargs['options'] if oo['mpi'] and oo['mpi'] not in cls.MPI_MODES: - raise InvalidOperator("Unsupported MPI mode `%s`" % oo['mpi']) + raise InvalidOperator("Unsupported MPI mode `{}`".format(oo['mpi'])) if oo['cse-algo'] not in ('basic', 'smartsort', 'advanced'): raise InvalidOperator("Illegal `cse-algo` value") @@ -224,8 +226,9 @@ def _autotune(self, args, setup): else: args, summary = autotune(self, args, level, mode) else: - raise ValueError("Expected bool, str, or 2-tuple, got `%s` instead" - % type(setup)) + raise ValueError( + f"Expected bool, str, or 2-tuple, got `{type(setup)}` instead" + ) # Record the tuned values self._state.setdefault('autotuning', []).append(summary) @@ -285,10 +288,10 @@ def _build(cls, expressions, **kwargs): for i in passes: if i not in cls._known_passes: if i in cls._known_passes_disabled: - warning("Got explicit pass `%s`, but it's unsupported on an " - "Operator of type `%s`" % (i, str(cls))) + warning(f"Got explicit pass `{i}`, but it's unsupported on an " + f"Operator of type `{str(cls)}`") else: - raise InvalidOperator("Unknown pass `%s`" % i) + raise InvalidOperator(f"Unknown pass `{i}`") return super()._build(expressions, **kwargs) @@ -302,10 +305,8 @@ def _specialize_dsl(cls, expressions, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): expressions = passes_mapper[i](expressions, **kwargs) - except KeyError: - pass return expressions @@ -319,10 +320,8 @@ def _specialize_exprs(cls, expressions, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): expressions = passes_mapper[i](expressions) - except KeyError: - pass return expressions @@ -336,10 +335,8 @@ def _specialize_clusters(cls, clusters, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): clusters = passes_mapper[i](clusters) - except KeyError: - pass return clusters @@ -460,9 +457,9 @@ def __new__(cls, items, default=None, sparse=None, reduce=None): # E.g., ((32, 4, 8),) items = (ParTileArg(x),) else: - raise ValueError("Expected int or tuple, got %s instead" % type(x)) + raise ValueError(f"Expected int or tuple, got {type(x)} instead") else: - raise ValueError("Expected bool or iterable, got %s instead" % type(items)) + raise ValueError(f"Expected bool or iterable, got {type(items)} instead") obj = super().__new__(cls, *items) obj.default = as_tuple(default) diff --git a/devito/core/power.py b/devito/core/power.py index 0b0fe86533..8d475ab084 100644 --- a/devito/core/power.py +++ b/devito/core/power.py @@ -3,10 +3,10 @@ ) __all__ = [ - 'PowerAdvCOperator', - 'PowerAdvCXXOmpOperator', - 'PowerAdvOmpOperator', - 'PowerCXXAdvCOperator', + 'PowerAdvCOperator', + 'PowerAdvCXXOmpOperator', + 'PowerAdvOmpOperator', + 'PowerCXXAdvCOperator', ] PowerAdvCOperator = Cpu64AdvCOperator diff --git a/devito/data/data.py b/devito/data/data.py index f9280305f6..7da3b0eaf8 100644 --- a/devito/data/data.py +++ b/devito/data/data.py @@ -70,7 +70,7 @@ def __new__(cls, shape, dtype, decomposition=None, modulo=None, # Sanity check -- A Dimension can't be at the same time modulo-iterated # and MPI-distributed - assert all(i is None for i, j in zip(obj._decomposition, obj._modulo) + assert all(i is None for i, j in zip(obj._decomposition, obj._modulo, strict=True) if j is True) return obj @@ -118,10 +118,13 @@ def __array_finalize__(self, obj): # From `__getitem__` self._distributor = obj._distributor glb_idx = obj._normalize_index(obj._index_stash) - self._modulo = tuple(m for i, m in zip(glb_idx, obj._modulo) - if not is_integer(i)) + self._modulo = tuple( + m + for i, m in zip(glb_idx, obj._modulo, strict=False) + if not is_integer(i) + ) decomposition = [] - for i, dec in zip(glb_idx, obj._decomposition): + for i, dec in zip(glb_idx, obj._decomposition, strict=False): if is_integer(i): continue elif dec is None: @@ -240,7 +243,7 @@ def __getitem__(self, glb_idx, comm_type, gather_rank=None): shape = [r.stop-r.start for r in self._distributor.all_ranges[i]] idx = [slice(r.start - d.glb_min, r.stop - d.glb_min, r.step) for r, d in zip(self._distributor.all_ranges[i], - self._distributor.decomposition)] + self._distributor.decomposition, strict=True)] for j in range(len(self.shape) - len(self._distributor.glb_shape)): shape.insert(j, glb_shape[j]) idx.insert(j, slice(0, glb_shape[j]+1, 1)) @@ -309,7 +312,7 @@ def __getitem__(self, glb_idx, comm_type, gather_rank=None): # Check if dimensions of the view should now be reduced to # be consistent with those of an equivalent NumPy serial view if not is_gather: - newshape = tuple(s for s, i in zip(retval.shape, loc_idx) + newshape = tuple(s for s, i in zip(retval.shape, loc_idx, strict=True) if type(i) is not np.int64) else: newshape = () @@ -373,7 +376,7 @@ def __setitem__(self, glb_idx, val, comm_type): glb_idx = self._normalize_index(glb_idx) glb_idx, val = self._process_args(glb_idx, val) val_idx = [index_dist_to_repl(i, dec) for i, dec in - zip(glb_idx, self._decomposition)] + zip(glb_idx, self._decomposition, strict=True)] if NONLOCAL in val_idx: # no-op return @@ -388,7 +391,7 @@ def __setitem__(self, glb_idx, val, comm_type): val_idx = val_idx[len(val_idx)-val.ndim:] processed = [] # Handle step size > 1 - for i, j in zip(glb_idx, val_idx): + for i, j in zip(glb_idx, val_idx, strict=False): if isinstance(i, slice) and i.step is not None and i.step > 1 and \ j.stop > j.start: processed.append(slice(j.start, j.stop, 1)) @@ -435,12 +438,9 @@ def _process_args(self, idx, val): for i in as_tuple(idx)): processed = [] transform = [] - for j, k in zip(idx, self._distributor.glb_shape): + for j, k in zip(idx, self._distributor.glb_shape, strict=True): if isinstance(j, slice) and j.step is not None and j.step < 0: - if j.start is None: - stop = None - else: - stop = j.start + 1 + stop = None if j.start is None else j.start + 1 if j.stop is None and j.start is None: start = int(np.mod(k-1, -j.step)) elif j.stop is None: @@ -482,7 +482,9 @@ def _index_glb_to_loc(self, glb_idx): return glb_idx loc_idx = [] - for i, s, mod, dec in zip(glb_idx, self.shape, self._modulo, self._decomposition): + for i, s, mod, dec in zip( + glb_idx, self.shape, self._modulo, self._decomposition, strict=False + ): if mod is True: # Need to wrap index based on modulo v = index_apply_modulo(i, s) @@ -490,10 +492,11 @@ def _index_glb_to_loc(self, glb_idx): # Convert the user-provided global indices into local indices. try: v = convert_index(i, dec, mode='glb_to_loc') - except TypeError: + except TypeError as e: if self._is_decomposed: - raise NotImplementedError("Unsupported advanced indexing with " - "MPI-distributed Data") + raise NotImplementedError( + "Unsupported advanced indexing with MPI-distributed Data" + ) from e v = i else: v = i @@ -523,7 +526,7 @@ def _set_global_idx(self, val, idx, val_idx): # Convert integers to slices so that shape dims are preserved if is_integer(as_tuple(idx)[0]): data_glb_idx.append(slice(0, 1, 1)) - for i, j in zip(data_loc_idx, val._decomposition): + for i, j in zip(data_loc_idx, val._decomposition, strict=True): if not j.loc_empty: data_glb_idx.append(j.index_loc_to_glb(i)) else: @@ -536,17 +539,16 @@ def _set_global_idx(self, val, idx, val_idx): data_glb_idx.insert(index, value) # Based on `data_glb_idx` the indices to which the locally stored data # block correspond can now be computed: - for i, j, k in zip(data_glb_idx, as_tuple(idx), self._decomposition): + for i, j, k in zip( + data_glb_idx, as_tuple(idx), self._decomposition, strict=False + ): if is_integer(j): mapped_idx.append(j) continue elif isinstance(j, slice) and j.start is None: norm = 0 elif isinstance(j, slice) and j.start is not None: - if j.start >= 0: - norm = j.start - else: - norm = j.start+k.glb_max+1 + norm = j.start if j.start >= 0 else j.start+k.glb_max+1 else: norm = j if i is not None: @@ -580,7 +582,7 @@ def _gather(self, start=None, stop=None, step=1, rank=0): if isinstance(step, int) or step is None: step = [step for _ in self.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) if self._distributor.is_parallel and self._distributor.nprocs > 1: diff --git a/devito/data/decomposition.py b/devito/data/decomposition.py index 7faa0753c0..72c6de39c3 100644 --- a/devito/data/decomposition.py +++ b/devito/data/decomposition.py @@ -109,17 +109,17 @@ def __eq__(self, o): if not isinstance(o, Decomposition): return False return self.local == o.local and len(self) == len(o) and\ - all(np.all(i == j) for i, j in zip(self, o)) + all(np.all(i == j) for i, j in zip(self, o, strict=True)) def __repr__(self): ret = [] for i, v in enumerate(self): bounds = (min(v, default=None), max(v, default=None)) - item = '[]' if bounds == (None, None) else '[%d,%d]' % bounds + item = '[]' if bounds == (None, None) else f'[{bounds[0]},{bounds[1]}]' if self.local == i: - item = "<<%s>>" % item + item = f"<<{item}>>" ret.append(item) - return 'Decomposition(%s)' % ', '.join(ret) + return f'Decomposition({", ".join(ret)})' def __call__(self, *args, mode='glb_to_loc', rel=True): """ @@ -248,7 +248,7 @@ def index_glb_to_loc(self, *args, rel=True): # index_glb_to_loc(slice(...)) if isinstance(glb_idx, tuple): if len(glb_idx) != 2: - raise TypeError("Cannot convert index from `%s`" % type(glb_idx)) + raise TypeError(f"Cannot convert index from `{type(glb_idx)}`") if self.loc_empty: return (-1, -3) glb_idx_min, glb_idx_max = glb_idx @@ -275,7 +275,7 @@ def index_glb_to_loc(self, *args, rel=True): else glb_idx.start retfunc = lambda a, b: slice(b, a - 1, glb_idx.step) else: - raise TypeError("Cannot convert index from `%s`" % type(glb_idx)) + raise TypeError(f"Cannot convert index from `{type(glb_idx)}`") # -> Handle negative min/max if glb_idx_min is not None and glb_idx_min < 0: glb_idx_min = glb_max + glb_idx_min + 1 @@ -356,7 +356,7 @@ def index_glb_to_loc(self, *args, rel=True): else: return None else: - raise TypeError("Expected 1 or 2 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 or 2 arguments, found {len(args)}') def index_loc_to_glb(self, *args): """ @@ -415,7 +415,7 @@ def index_loc_to_glb(self, *args): # index_loc_to_glb((min, max)) if isinstance(loc_idx, tuple): if len(loc_idx) != 2: - raise TypeError("Cannot convert index from `%s`" % type(loc_idx)) + raise TypeError(f"Cannot convert index from `{type(loc_idx)}`") shifted = [slice(-1, -2, 1) if (i < 0 or i > rank_length) else i + self.loc_abs_min for i in loc_idx] return as_tuple(shifted) @@ -448,7 +448,7 @@ def index_loc_to_glb(self, *args): glb_stop = loc_idx.stop + self.loc_abs_min return slice(glb_start, glb_stop, loc_idx.step) else: - raise TypeError("Expected 1 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 arguments, found {len(args)}') def reshape(self, *args): """ @@ -520,7 +520,7 @@ def reshape(self, *args): elif len(args) == 2: nleft, nright = args else: - raise TypeError("Expected 1 or 2 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 or 2 arguments, found {len(args)}') items = list(self) diff --git a/devito/data/utils.py b/devito/data/utils.py index ab8db341e6..c2f044ddaf 100644 --- a/devito/data/utils.py +++ b/devito/data/utils.py @@ -54,7 +54,7 @@ def index_apply_modulo(idx, modulo): elif isinstance(idx, np.ndarray): return idx else: - raise ValueError("Cannot apply modulo to index of type `%s`" % type(idx)) + raise ValueError(f"Cannot apply modulo to index of type `{type(idx)}`") def index_dist_to_repl(idx, decomposition): @@ -64,16 +64,13 @@ def index_dist_to_repl(idx, decomposition): # Derive shift value if isinstance(idx, slice): - if idx.step is None or idx.step >= 0: - value = idx.start - else: - value = idx.stop + value = idx.start if idx.step is None or idx.step >= 0 else idx.stop else: value = idx if value is None: value = 0 elif not is_integer(value): - raise ValueError("Cannot derive shift value from type `%s`" % type(value)) + raise ValueError(f"Cannot derive shift value from type `{type(value)}`") if value < 0: value += decomposition.glb_max + 1 @@ -90,12 +87,11 @@ def index_dist_to_repl(idx, decomposition): elif isinstance(idx, np.ndarray): return idx - value elif isinstance(idx, slice): - if idx.step is not None and idx.step < 0: - if idx.stop is None: - return slice(idx.start - value, None, idx.step) + if idx.step is not None and idx.step < 0 and idx.stop is None: + return slice(idx.start - value, None, idx.step) return slice(idx.start - value, idx.stop - value, idx.step) else: - raise ValueError("Cannot apply shift to type `%s`" % type(idx)) + raise ValueError(f"Cannot apply shift to type `{type(idx)}`") def convert_index(idx, decomposition, mode='glb_to_loc'): @@ -107,7 +103,7 @@ def convert_index(idx, decomposition, mode='glb_to_loc'): elif isinstance(idx, np.ndarray): return np.vectorize(lambda i: decomposition(i, mode=mode))(idx).astype(idx.dtype) else: - raise ValueError("Cannot convert index of type `%s` " % type(idx)) + raise ValueError(f"Cannot convert index of type `{type(idx)}` ") def index_handle_oob(idx): @@ -341,7 +337,7 @@ def mpi_index_maps(loc_idx, shape, topology, coords, comm): owner = owners[index] my_slice = n_rank_slice[owner] rnorm_index = [] - for j, k in zip(my_slice, index): + for j, k in zip(my_slice, index, strict=True): rnorm_index.append(k-j.start) local_si[index] = as_tuple(rnorm_index) it.iternext() @@ -387,7 +383,7 @@ def flip_idx(idx, decomposition): (slice(8, 11, 1),) """ processed = [] - for i, j in zip(as_tuple(idx), decomposition): + for i, j in zip(as_tuple(idx), decomposition, strict=False): if isinstance(i, slice) and i.step is not None and i.step < 0: if i.start is None: stop = None @@ -407,7 +403,7 @@ def flip_idx(idx, decomposition): start = i.start + j.glb_max + 1 else: start = i.start - if i.stop is not None and i.stop < 0: + if i.stop is not None and i.stop < 0: # noqa: SIM108 stop = i.stop + j.glb_max + 1 else: stop = i.stop diff --git a/devito/finite_differences/coefficients.py b/devito/finite_differences/coefficients.py index 543bb5c9ba..8a9b66b826 100644 --- a/devito/finite_differences/coefficients.py +++ b/devito/finite_differences/coefficients.py @@ -5,7 +5,7 @@ class Coefficient: def __init__(self, deriv_order, function, dimension, weights): - deprecations.coeff_warn + _ = deprecations.coeff_warn self._weights = weights self._deriv_order = deriv_order self._function = function @@ -34,7 +34,7 @@ def weights(self): class Substitutions: def __init__(self, *args): - deprecations.coeff_warn + _ = deprecations.coeff_warn if any(not isinstance(arg, Coefficient) for arg in args): raise TypeError("Non Coefficient object within input") diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index 8d572e2a69..ecb1060631 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -1,5 +1,6 @@ from collections import defaultdict from collections.abc import Iterable +from contextlib import suppress from functools import cached_property from itertools import chain @@ -283,7 +284,7 @@ def _validate_fd_order(fd_order, expr, dims, dcounter): expr.time_order if getattr(d, 'is_Time', False) else expr.space_order - for d in dcounter.keys() + for d in dcounter ) return fd_order @@ -467,10 +468,7 @@ def T(self): This is really useful for more advanced FD definitions. For example the conventional Laplacian is `.dxl.T * .dxl` """ - if self._transpose == direct: - adjoint = transpose - else: - adjoint = direct + adjoint = transpose if self._transpose == direct else direct return self._rebuild(transpose=adjoint) @@ -493,7 +491,7 @@ def _eval_at(self, func): x0 = func.indices_ref.getters psubs = {} nx0 = x0.copy() - for d, d0 in x0.items(): + for d, _ in x0.items(): if d in self.dims: # d is a valid Derivative dimension continue @@ -570,10 +568,8 @@ def _eval_fd(self, expr, **kwargs): expr = interp_for_fd(expr, x0_interp, **kwargs) # Step 2: Evaluate derivatives within expression - try: + with suppress(AttributeError): expr = expr._evaluate(**kwargs) - except AttributeError: - pass # If True, the derivative will be fully expanded as a sum of products, # otherwise an IndexSum will returned diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index f0d2b77d1f..63f5e9e5df 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -170,7 +170,9 @@ def _eval_at(self, func): if not func.is_Staggered: # Cartesian grid, do no waste time return self - return self.func(*[getattr(a, '_eval_at', lambda x: a)(func) for a in self.args]) + return self.func(*[ + getattr(a, '_eval_at', lambda x: a)(func) for a in self.args # noqa: B023 + ]) # false positive def _subs(self, old, new, **hints): if old == self: @@ -449,11 +451,12 @@ def has(self, *pattern): """ for p in pattern: # Following sympy convention, return True if any is found - if isinstance(p, type) and issubclass(p, sympy.Symbol): + if isinstance(p, type) \ + and issubclass(p, sympy.Symbol) \ + and any(isinstance(i, p) for i in self.free_symbols): # Symbols (and subclasses) are the leaves of an expression, and they # are promptly available via `free_symbols`. So this is super quick - if any(isinstance(i, p) for i in self.free_symbols): - return True + return True return super().has(*pattern) def has_free(self, *patterns): @@ -500,8 +503,10 @@ def __new__(cls, *args, **kwargs): return obj def subs(self, *args, **kwargs): - return self.func(*[getattr(a, 'subs', lambda x: a)(*args, **kwargs) - for a in self.args], evaluate=False) + return self.func( + *[getattr(a, 'subs', lambda x: a)(*args, **kwargs) # noqa: B023 + for a in self.args], evaluate=False + ) # false positive _subs = Differentiable._subs @@ -592,10 +597,7 @@ def __new__(cls, *args, **kwargs): return sympy.S.Zero # a*1 -> a - if scalar - 1 == 0: - args = others - else: - args = [scalar] + others + args = others if scalar - 1 == 0 else [scalar] + others # Reorder for homogeneity with pure SymPy types _mulsort(args) @@ -636,7 +638,9 @@ def _gather_for_diff(self): ref_inds = func_args.indices_ref.getters for f in self.args: - if f not in self._args_diff or f is func_args or isinstance(f, DifferentiableFunction): + if f not in self._args_diff \ + or f is func_args \ + or isinstance(f, DifferentiableFunction): new_args.append(f) else: ind_f = f.indices_ref.getters @@ -743,20 +747,20 @@ def __new__(cls, expr, dimensions, **kwargs): except AttributeError: pass raise ValueError("Expected Dimension with numeric size, " - "got `%s` instead" % d) + f"got `{d}` instead") # TODO: `has_free` only available with SymPy v>=1.10 # We should start using `not expr.has_free(*dimensions)` once we drop # support for SymPy 1.8<=v<1.0 if not all(d in expr.free_symbols for d in dimensions): - raise ValueError("All Dimensions `%s` must appear in `expr` " - "as free variables" % str(dimensions)) + raise ValueError(f"All Dimensions `{str(dimensions)}` must appear in `expr` " + "as free variables") for i in expr.find(IndexSum): for d in dimensions: if d in i.dimensions: - raise ValueError("Dimension `%s` already appears in a " - "nested tensor contraction" % d) + raise ValueError(f"Dimension `{d}` already appears in a " + "nested tensor contraction") obj = sympy.Expr.__new__(cls, expr) obj._expr = expr @@ -765,8 +769,11 @@ def __new__(cls, expr, dimensions, **kwargs): return obj def __repr__(self): - return "%s(%s, (%s))" % (self.__class__.__name__, self.expr, - ', '.join(d.name for d in self.dimensions)) + return "{}({}, ({}))".format( + self.__class__.__name__, + self.expr, + ', '.join(d.name for d in self.dimensions) + ) __str__ = __repr__ @@ -800,7 +807,7 @@ def _evaluate(self, **kwargs): values = product(*[list(d.range) for d in self.dimensions]) terms = [] for i in values: - mapper = dict(zip(self.dimensions, i)) + mapper = dict(zip(self.dimensions, i, strict=True)) terms.append(expr.xreplace(mapper)) return sum(terms) @@ -840,7 +847,7 @@ def __init_finalize__(self, *args, **kwargs): assert isinstance(weights, (list, tuple, np.ndarray)) # Normalize `weights` - from devito.symbolics import pow_to_mul # noqa, sigh + from devito.symbolics import pow_to_mul weights = tuple(pow_to_mul(sympy.sympify(i)) for i in weights) kwargs['scope'] = kwargs.get('scope', 'stack') @@ -879,7 +886,9 @@ def _xreplace(self, rule): return self, False else: try: - weights, flags = zip(*[i._xreplace(rule) for i in self.weights]) + weights, flags = zip( + *[i._xreplace(rule) for i in self.weights], strict=True + ) if any(flags): return self.func(initvalue=weights, function=None), True except AttributeError: @@ -925,7 +934,7 @@ def __new__(cls, expr, mapper, **kwargs): # Sanity check if not (expr.is_Mul and len(weightss) == 1): - raise ValueError("Expect `expr*weights`, got `%s` instead" % str(expr)) + raise ValueError(f"Expect `expr*weights`, got `{str(expr)}` instead") weights = weightss.pop() obj = super().__new__(cls, expr, dimensions) diff --git a/devito/finite_differences/finite_difference.py b/devito/finite_differences/finite_difference.py index 77545552a1..30199fb3d8 100644 --- a/devito/finite_differences/finite_difference.py +++ b/devito/finite_differences/finite_difference.py @@ -1,4 +1,5 @@ from collections.abc import Iterable +from contextlib import suppress from sympy import sympify @@ -92,7 +93,7 @@ def cross_derivative(expr, dims, fd_order, deriv_order, x0=None, side=None, **kw f(x + 2*h_x, y + 2*h_y)*g(x + 2*h_x, y + 2*h_y)/h_x)/h_y """ x0 = x0 or {} - for d, fd, dim in zip(deriv_order, fd_order, dims): + for d, fd, dim in zip(deriv_order, fd_order, dims, strict=True): expr = generic_derivative(expr, dim=dim, fd_order=fd, deriv_order=d, x0=x0, side=side, **kwargs) @@ -143,10 +144,7 @@ def generic_derivative(expr, dim, fd_order, deriv_order, matvec=direct, x0=None, return expr # Enforce stable time coefficients - if dim.is_Time: - coefficients = 'taylor' - else: - coefficients = expr.coefficients + coefficients = 'taylor' if dim.is_Time else expr.coefficients return make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coefficients, expand, weights) @@ -184,10 +182,7 @@ def make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coeffici weights = [weights._subs(wdim, i) for i in range(len(indices))] # Enforce fixed precision FD coefficients to avoid variations in results - if scale: - scale = dim.spacing**(-deriv_order) - else: - scale = 1 + scale = dim.spacing**(-deriv_order) if scale else 1 weights = [sympify(scale * w).evalf(_PRECISION) for w in weights] # Transpose the FD, if necessary @@ -210,26 +205,21 @@ def make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coeffici expr = expr._subs(dim, indices.expr) # Re-evaluate any off-the-grid Functions potentially impacted by the FD - try: + # unless a pure number + with suppress(AttributeError): expr = expr._evaluate(expand=False) - except AttributeError: - # Pure number - pass deriv = DiffDerivative(expr*weights, {dim: indices.free_dim}) else: terms = [] - for i, c in zip(indices, weights): + for i, c in zip(indices, weights, strict=True): # The FD term term = expr._subs(dim, i) * c # Re-evaluate any off-the-grid Functions potentially impacted by the FD - try: + # unless a pure number + with suppress(AttributeError): term = term.evaluate - except AttributeError: - # Pure number - pass - terms.append(term) deriv = EvalDerivative(*terms, base=expr) diff --git a/devito/finite_differences/operators.py b/devito/finite_differences/operators.py index f3f159f4ad..156466324e 100644 --- a/devito/finite_differences/operators.py +++ b/devito/finite_differences/operators.py @@ -70,7 +70,9 @@ def grad(func, shift=None, order=None, method='FD', side=None, **kwargs): try: return func.grad(shift=shift, order=order, method=method, side=side, w=w) except AttributeError: - raise AttributeError("Gradient not supported for class %s" % func.__class__) + raise AttributeError( + f"Gradient not supported for class {func.__class__}" + ) from None def grad45(func, shift=None, order=None): @@ -116,7 +118,9 @@ def curl(func, shift=None, order=None, method='FD', side=None, **kwargs): try: return func.curl(shift=shift, order=order, method=method, side=side, w=w) except AttributeError: - raise AttributeError("Curl only supported for 3D VectorFunction") + raise AttributeError( + "Curl only supported for 3D VectorFunction" + ) from None def curl45(func, shift=None, order=None): diff --git a/devito/finite_differences/tools.py b/devito/finite_differences/tools.py index 91c8b43c85..3fe9b4f4ab 100644 --- a/devito/finite_differences/tools.py +++ b/devito/finite_differences/tools.py @@ -51,8 +51,9 @@ def wrapper(expr, *args, **kwargs): try: return S.Zero if expr.is_Number else func(expr, *args, **kwargs) except AttributeError: - raise ValueError("'%s' must be of type Differentiable, not %s" - % (expr, type(expr))) + raise ValueError( + f"'{expr}' must be of type Differentiable, not {type(expr)}" + ) from None return wrapper @@ -73,9 +74,9 @@ def dim_with_order(dims, orders): def deriv_name(dims, orders): name = [] - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): name_dim = 't' if d.is_Time else d.root.name - name.append('d%s%s' % (name_dim, o) if o > 1 else 'd%s' % name_dim) + name.append(f'd{name_dim}{o}' if o > 1 else f'd{name_dim}') return ''.join(name) @@ -102,41 +103,41 @@ def diff_f(expr, deriv_order, dims, fd_order, side=None, **kwargs): # All conventional FD shortcuts for o in all_combs: - fd_dims = tuple(d for d, o_d in zip(dims, o) if o_d > 0) - d_orders = tuple(o_d for d, o_d in zip(dims, o) if o_d > 0) + fd_dims = tuple(d for d, o_d in zip(dims, o, strict=True) if o_d > 0) + d_orders = tuple(o_d for d, o_d in zip(dims, o, strict=True) if o_d > 0) fd_orders = tuple(to if d.is_Time else so for d in fd_dims) deriv = partial(diff_f, deriv_order=d_orders, dims=fd_dims, fd_order=fd_orders) name_fd = deriv_name(fd_dims, d_orders) dname = (d.root.name for d in fd_dims) - description = 'derivative of order %s w.r.t dimension %s' % (d_orders, dname) + description = f'derivative of order {d_orders} w.r.t dimension {dname}' derivatives[name_fd] = (deriv, description) # Add non-conventional, non-centered first-order FDs - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): name = 't' if d.is_Time else d.root.name # Add centered first derivatives deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=centered) - name_fd = 'd%sc' % name - description = 'centered derivative staggered w.r.t dimension %s' % d.name + name_fd = f'd{name}c' + description = f'centered derivative staggered w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Left deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=left) - name_fd = 'd%sl' % name - description = 'left first order derivative w.r.t dimension %s' % d.name + name_fd = f'd{name}l' + description = f'left first order derivative w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Right deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=right) - name_fd = 'd%sr' % name - description = 'right first order derivative w.r.t dimension %s' % d.name + name_fd = f'd{name}r' + description = f'right first order derivative w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Add RSFD for first order derivatives - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): if not d.is_Time: name = d.root.name deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, method='RSFD') - name_fd = 'd%s45' % name - description = 'Derivative w.r.t %s with rotated 45 degree FD' % d.name + name_fd = f'd{name}45' + description = f'Derivative w.r.t {d.name} with rotated 45 degree FD' derivatives[name_fd] = (deriv, description) return derivatives @@ -171,7 +172,7 @@ def __new__(cls, dim, indices=None, expr=None, fd=None): return obj def __repr__(self): - return "IndexSet(%s)" % ", ".join(str(i) for i in self) + return "IndexSet({})".format(", ".join(str(i) for i in self)) @property def spacing(self): @@ -224,7 +225,7 @@ def make_stencil_dimension(expr, _min, _max): Create a StencilDimension for `expr` with unique name. """ n = len(expr.find(StencilDimension)) - return StencilDimension('i%d' % n, _min, _max) + return StencilDimension(f'i{n}', _min, _max) @cacheit @@ -322,7 +323,7 @@ def make_shift_x0(shift, ndim): else: raise ValueError("ndim length must be equal to 1 or 2") raise ValueError("shift parameter must be one of the following options: " - "None, float or tuple with shape equal to %s" % (ndim,)) + f"None, float or tuple with shape equal to {ndim}") def process_weights(weights, expr, dim): diff --git a/devito/ir/cgen/printer.py b/devito/ir/cgen/printer.py index fc5c23b904..120d49aea7 100644 --- a/devito/ir/cgen/printer.py +++ b/devito/ir/cgen/printer.py @@ -1,6 +1,8 @@ """ Utilities to turn SymPy objects into C strings. """ +from contextlib import suppress + import numpy as np import sympy from mpmath.libmp import prec_to_dps, to_str @@ -115,10 +117,8 @@ def _print_PyCPointerType(self, expr): return f'{ctype} *' def _print_type(self, expr): - try: + with suppress(TypeError): expr = dtype_to_ctype(expr) - except TypeError: - pass try: return self.type_mappings[expr] except KeyError: @@ -308,10 +308,7 @@ def _print_Float(self, expr): """Print a Float in C-like scientific notation.""" prec = expr._prec - if prec < 5: - dps = 0 - else: - dps = prec_to_dps(expr._prec) + dps = 0 if prec < 5 else prec_to_dps(expr._prec) if self._settings["full_prec"] is True: strip = False diff --git a/devito/ir/clusters/algorithms.py b/devito/ir/clusters/algorithms.py index c6c76567aa..083eb36354 100644 --- a/devito/ir/clusters/algorithms.py +++ b/devito/ir/clusters/algorithms.py @@ -197,7 +197,7 @@ def _break_for_parallelism(self, scope, candidates, i): if d.is_local or d.is_storage_related(candidates): # Would break a dependence on storage return False - if any(d.is_carried(i) for i in candidates): + if any(d.is_carried(i) for i in candidates): # noqa: SIM102 if (d.is_flow and d.is_lex_negative) or (d.is_anti and d.is_lex_positive): # Would break a data dependence return False @@ -229,10 +229,7 @@ def guard(clusters): for cd in cds: # `BOTTOM` parent implies a guard that lives outside of # any iteration space, which corresponds to the placeholder None - if cd.parent is BOTTOM: - d = None - else: - d = cd.parent + d = None if cd.parent is BOTTOM else cd.parent # If `cd` uses, as condition, an arbitrary SymPy expression, then # we must ensure to nest it inside the last of the Dimensions @@ -326,7 +323,7 @@ def callback(self, clusters, prefix): # SymPy's index ordering (t, t-1, t+1) after modulo replacement so # that associativity errors are consistent. This corresponds to # sorting offsets {-1, 0, 1} as {0, -1, 1} assigning -inf to 0 - key = lambda i: -np.inf if i - si == 0 else (i - si) + key = lambda i: -np.inf if i - si == 0 else (i - si) # noqa: B023 siafs = sorted(iafs, key=key) for iaf in siafs: @@ -435,7 +432,7 @@ def callback(self, clusters, prefix, seen=None): # Construct the HaloTouch Cluster expr = Eq(self.B, HaloTouch(*points, halo_scheme=hs)) - key = lambda i: i in prefix[:-1] or i in hs.loc_indices + key = lambda i: i in prefix[:-1] or i in hs.loc_indices # noqa: B023 ispace = c.ispace.project(key) # HaloTouches are not parallel properties = c.properties.sequentialize() @@ -463,7 +460,7 @@ def _update(reductions): for c in clusters: # Schedule the global distributed reductions encountered before `c`, # if `c`'s IterationSpace is such that the reduction can be carried out - found, fifo = split(fifo, lambda dr: dr.ispace.is_subset(c.ispace)) + found, fifo = split(fifo, lambda dr: dr.ispace.is_subset(c.ispace)) # noqa: B023 _update(found) # Detect the global distributed reductions in `c` @@ -478,7 +475,7 @@ def _update(reductions): continue # Is Inc/Max/Min/... actually used for a reduction? - ispace = c.ispace.project(lambda d: d in var.free_symbols) + ispace = c.ispace.project(lambda d: d in var.free_symbols) # noqa: B023 if ispace.itdims == c.ispace.itdims: continue @@ -492,7 +489,7 @@ def _update(reductions): # The IterationSpace within which the global distributed reduction # must be carried out - ispace = c.ispace.prefix(lambda d: d in var.free_symbols) + ispace = c.ispace.prefix(lambda d: d in var.free_symbols) # noqa: B023 expr = [Eq(var, DistReduce(var, op=op, grid=grid, ispace=ispace))] fifo.append(c.rebuild(exprs=expr, ispace=ispace)) diff --git a/devito/ir/clusters/cluster.py b/devito/ir/clusters/cluster.py index a0283865a4..6c65f4b97a 100644 --- a/devito/ir/clusters/cluster.py +++ b/devito/ir/clusters/cluster.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import cached_property from itertools import chain @@ -60,7 +61,7 @@ def __init__(self, exprs, ispace=null_ispace, guards=None, properties=None, self._halo_scheme = halo_scheme def __repr__(self): - return "Cluster([%s])" % ('\n' + ' '*9).join('%s' % i for i in self.exprs) + return "Cluster([{}])".format(('\n' + ' '*9).join(f'{i}' for i in self.exprs)) @classmethod def from_clusters(cls, *clusters): @@ -96,9 +97,11 @@ def from_clusters(cls, *clusters): try: syncs = normalize_syncs(*[c.syncs for c in clusters]) - except ValueError: - raise ValueError("Cannot build a Cluster from Clusters with " - "non-compatible synchronization operations") + except ValueError as e: + raise ValueError( + "Cannot build a Cluster from Clusters with " + "non-compatible synchronization operations" + ) from e halo_scheme = HaloScheme.union([c.halo_scheme for c in clusters]) @@ -186,10 +189,8 @@ def dist_dimensions(self): """ ret = set() for f in self.functions: - try: + with suppress(AttributeError): ret.update(f._dist_dimensions) - except AttributeError: - pass return frozenset(ret) @cached_property @@ -396,10 +397,7 @@ def dspace(self): oobs = set() for f, v in parts.items(): for i in v: - if i.dim.is_Sub: - d = i.dim.parent - else: - d = i.dim + d = i.dim.parent if i.dim.is_Sub else i.dim try: if i.lower < 0 or \ i.upper > f._size_nodomain[d].left + f._size_halo[d].right: diff --git a/devito/ir/clusters/visitors.py b/devito/ir/clusters/visitors.py index 694de41142..98ffbad36d 100644 --- a/devito/ir/clusters/visitors.py +++ b/devito/ir/clusters/visitors.py @@ -141,7 +141,7 @@ def __new__(cls, *args, mode='dense'): elif len(args) == 2: func, mode = args else: - assert False + raise AssertionError('Too many args') obj = object.__new__(cls) obj.__init__(func, mode) return obj diff --git a/devito/ir/equations/algorithms.py b/devito/ir/equations/algorithms.py index 00228c7cbb..b3a78c0ebe 100644 --- a/devito/ir/equations/algorithms.py +++ b/devito/ir/equations/algorithms.py @@ -126,11 +126,11 @@ def _lower_exprs(expressions, subs): # Introduce shifting to align with the computational domain indices = [_lower_exprs(a, subs) + o for a, o in - zip(i.indices, f._size_nodomain.left)] + zip(i.indices, f._size_nodomain.left, strict=True)] # Substitute spacing (spacing only used in own dimension) indices = [i.xreplace({d.spacing: 1, -d.spacing: -1}) - for i, d in zip(indices, f.dimensions)] + for i, d in zip(indices, f.dimensions, strict=True)] # Apply substitutions, if necessary if dimension_map: @@ -140,7 +140,7 @@ def _lower_exprs(expressions, subs): if isinstance(f, Array) and f.initvalue is not None: initvalue = [_lower_exprs(i, subs) for i in f.initvalue] # TODO: fix rebuild to avoid new name - f = f._rebuild(name='%si' % f.name, initvalue=initvalue) + f = f._rebuild(name=f'{f.name}i', initvalue=initvalue) mapper[i] = f.indexed[indices] # Add dimensions map to the mapper in case dimensions are used @@ -319,8 +319,10 @@ def _(d, mapper, rebuilt, sregistry): # Warn the user if name has been changed, since this will affect overrides if fname != d.functions.name: fkwargs['name'] = fname - warning("%s <%s> renamed as '%s'. Consider assigning a unique name to %s." % - (str(d.functions), id(d.functions), fname, d.functions.name)) + warning( + f"{str(d.functions)} <{id(d.functions)}> renamed as '{fname}'. " + "Consider assigning a unique name to {d.functions.name}." + ) fkwargs.update({'function': None, 'halo': None, diff --git a/devito/ir/equations/equation.py b/devito/ir/equations/equation.py index 8a8e821d94..29945903a9 100644 --- a/devito/ir/equations/equation.py +++ b/devito/ir/equations/equation.py @@ -92,9 +92,9 @@ def __repr__(self): if not self.is_Reduction: return super().__repr__() elif self.operation is OpInc: - return '%s += %s' % (self.lhs, self.rhs) + return f'{self.lhs} += {self.rhs}' else: - return '%s = %s(%s)' % (self.lhs, self.operation, self.rhs) + return f'{self.lhs} = {self.operation}({self.rhs})' # Pickling support __reduce_ex__ = Pickable.__reduce_ex__ @@ -174,7 +174,7 @@ def __new__(cls, *args, **kwargs): input_expr = args[0] expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.get(i) or getattr(input_expr, i)) + setattr(expr, f'_{i}', kwargs.get(i) or getattr(input_expr, i)) return expr elif len(args) == 1 and isinstance(args[0], Eq): # origin: LoweredEq(devito.Eq) @@ -182,11 +182,11 @@ def __new__(cls, *args, **kwargs): elif len(args) == 2: expr = sympy.Eq.__new__(cls, *args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.pop(i)) + setattr(expr, f'_{i}', kwargs.pop(i)) return expr else: - raise ValueError("Cannot construct LoweredEq from args=%s " - "and kwargs=%s" % (str(args), str(kwargs))) + raise ValueError(f"Cannot construct LoweredEq from args={str(args)} " + f"and kwargs={str(kwargs)}") # Well-defined dimension ordering ordering = dimension_sort(expr) @@ -294,7 +294,7 @@ def __new__(cls, *args, **kwargs): v = kwargs[i] except KeyError: v = getattr(input_expr, i, None) - setattr(expr, '_%s' % i, v) + setattr(expr, f'_{i}', v) else: expr._ispace = kwargs['ispace'] expr._conditionals = kwargs.get('conditionals', frozendict()) @@ -304,10 +304,10 @@ def __new__(cls, *args, **kwargs): # origin: ClusterizedEq(lhs, rhs, **kwargs) expr = sympy.Eq.__new__(cls, *args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.pop(i)) + setattr(expr, f'_{i}', kwargs.pop(i)) else: - raise ValueError("Cannot construct ClusterizedEq from args=%s " - "and kwargs=%s" % (str(args), str(kwargs))) + raise ValueError(f"Cannot construct ClusterizedEq from args={str(args)} " + f"and kwargs={str(kwargs)}") return expr func = IREq._rebuild @@ -330,5 +330,5 @@ def __new__(cls, *args, **kwargs): elif len(args) == 2: obj = LoweredEq(Eq(*args, evaluate=False)) else: - raise ValueError("Cannot construct DummyEq from args=%s" % str(args)) + raise ValueError(f"Cannot construct DummyEq from args={str(args)}") return ClusterizedEq.__new__(cls, obj, ispace=obj.ispace) diff --git a/devito/ir/iet/algorithms.py b/devito/ir/iet/algorithms.py index 1715c9c2d1..e805c34f42 100644 --- a/devito/ir/iet/algorithms.py +++ b/devito/ir/iet/algorithms.py @@ -48,7 +48,7 @@ def iet_build(stree): uindices=i.sub_iterators) elif i.is_Section: - body = Section('section%d' % nsections, body=queues.pop(i)) + body = Section(f'section{nsections}', body=queues.pop(i)) nsections += 1 elif i.is_Halo: @@ -62,7 +62,7 @@ def iet_build(stree): queues.setdefault(i.parent, []).append(body) - assert False + raise AssertionError('This function did not return') def _unpack_switch_case(bundle): diff --git a/devito/ir/iet/efunc.py b/devito/ir/iet/efunc.py index c2207094c8..1a17202140 100644 --- a/devito/ir/iet/efunc.py +++ b/devito/ir/iet/efunc.py @@ -36,12 +36,14 @@ def __init__(self, name, arguments=None, mapper=None, dynamic_args_mapper=None, # Sanity check if k not in self._mapper: - raise ValueError("`k` is not a dynamic parameter" % k) + raise ValueError("`k` is not a dynamic parameter") if len(self._mapper[k]) != len(tv): - raise ValueError("Expected %d values for dynamic parameter `%s`, given %d" - % (len(self._mapper[k]), k, len(tv))) + raise ValueError( + f'Expected {len(self._mapper[k])} values for dynamic parameter ' + f'`{k}`, given {len(tv)}' + ) # Create the argument list - for i, j in zip(self._mapper[k], tv): + for i, j in zip(self._mapper[k], tv, strict=True): arguments[i] = j if incr is False else (arguments[i] + j) super().__init__(name, arguments, retobj, is_indirect) @@ -216,8 +218,10 @@ def __init__(self, name, grid, block, shm=0, stream=None, self.stream = stream def __repr__(self): - return 'Launch[%s]<<<(%s)>>>' % (self.name, - ','.join(str(i.name) for i in self.writes)) + return 'Launch[{}]<<<({})>>>'.format( + self.name, + ','.join(str(i.name) for i in self.writes) + ) @cached_property def functions(self): diff --git a/devito/ir/iet/nodes.py b/devito/ir/iet/nodes.py index 1c8ff5fbff..a8a3a35b00 100644 --- a/devito/ir/iet/nodes.py +++ b/devito/ir/iet/nodes.py @@ -5,6 +5,7 @@ import inspect from collections import OrderedDict, namedtuple from collections.abc import Iterable +from contextlib import suppress from functools import cached_property import cgen as c @@ -103,11 +104,13 @@ def __new__(cls, *args, **kwargs): obj = super().__new__(cls) argnames, _, _, defaultvalues, _, _, _ = inspect.getfullargspec(cls.__init__) try: - defaults = dict(zip(argnames[-len(defaultvalues):], defaultvalues)) + defaults = dict( + zip(argnames[-len(defaultvalues):], defaultvalues, strict=True) + ) except TypeError: # No default kwarg values defaults = {} - obj._args = {k: v for k, v in zip(argnames[1:], args)} + obj._args = {k: v for k, v in zip(argnames[1:], args, strict=False)} obj._args.update(kwargs.items()) obj._args.update({k: defaults.get(k) for k in argnames[1:] if k not in obj._args}) return obj @@ -116,7 +119,7 @@ def _rebuild(self, *args, **kwargs): """Reconstruct ``self``.""" handle = self._args.copy() # Original constructor arguments argnames = [i for i in self._traversable if i not in kwargs] - handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)])) + handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args, strict=False)])) handle.update(kwargs) return type(self)(**handle) @@ -251,8 +254,8 @@ def __init__(self, header=None, body=None, footer=None, inline=False): self.inline = inline def __repr__(self): - return "<%s (%d, %d, %d)>" % (self.__class__.__name__, len(self.header), - len(self.body), len(self.footer)) + return f'<{self.__class__.__name__} ({len(self.header)}, {len(self.body)}, ' + \ + f'{len(self.footer)})>' class EmptyList(List): @@ -318,8 +321,8 @@ def __init__(self, name, arguments=None, retobj=None, is_indirect=False, self.templates = as_tuple(templates) def __repr__(self): - ret = "" if self.retobj is None else "%s = " % self.retobj - return "%sCall::\n\t%s(...)" % (ret, self.name) + ret = "" if self.retobj is None else f"{self.retobj} = " + return f"{ret}Call::\n\t{self.name}(...)" def _rebuild(self, *args, **kwargs): if args: @@ -327,7 +330,7 @@ def _rebuild(self, *args, **kwargs): # have nested Calls/Lambdas among its `arguments`, and these might # change, and we are in such a case *if and only if* we have `args` assert len(args) == len(self.children) - mapper = dict(zip(self.children, args)) + mapper = dict(zip(self.children, args, strict=True)) kwargs['arguments'] = [mapper.get(i, i) for i in self.arguments] return super()._rebuild(**kwargs) @@ -375,10 +378,8 @@ def expr_symbols(self): elif isinstance(i, Call): retval.extend(i.expr_symbols) else: - try: + with suppress(AttributeError): retval.extend(i.free_symbols) - except AttributeError: - pass if self.base is not None: retval.append(self.base) @@ -428,9 +429,11 @@ def __init__(self, expr, pragmas=None, init=False, operation=None): self.operation = operation def __repr__(self): - return "<%s::%s=%s>" % (self.__class__.__name__, - type(self.write), - ','.join('%s' % type(f) for f in self.functions)) + return "<{}::{}={}>".format( + self.__class__.__name__, + type(self.write), + ','.join(f'{type(f)}' for f in self.functions) + ) @property def dtype(self): @@ -481,8 +484,10 @@ def is_initializable(self): """ True if it can be an initializing assignment, False otherwise. """ - return ((self.is_scalar and not self.is_reduction) or - (self.is_tensor and isinstance(self.expr.rhs, ListInitializer))) + return ( + (self.is_scalar and not self.is_reduction) or + (self.is_tensor and isinstance(self.expr.rhs, ListInitializer)) + ) @property def defines(self): @@ -575,7 +580,7 @@ def __init__(self, nodes, dimension, limits, direction=None, properties=None, # Generate loop limits if isinstance(limits, Iterable): - assert(len(limits) == 3) + assert len(limits) == 3 self.limits = tuple(limits) elif self.dim.is_Incr: self.limits = (self.dim.symbolic_min, limits, self.dim.step) @@ -594,11 +599,11 @@ def __repr__(self): properties = "" if self.properties: properties = [str(i) for i in self.properties] - properties = "WithProperties[%s]::" % ",".join(properties) + properties = "WithProperties[{}]::".format(",".join(properties)) index = self.index if self.uindices: - index += '[%s]' % ','.join(i.name for i in self.uindices) - return "<%sIteration %s; %s>" % (properties, index, self.limits) + index += '[{}]'.format(','.join(i.name for i in self.uindices)) + return f"<{properties}Iteration {index}; {self.limits}>" @property def is_Affine(self): @@ -715,10 +720,8 @@ def __init__(self, condition): def functions(self): ret = [] for i in self.condition.free_symbols: - try: + with suppress(AttributeError): ret.append(i.function) - except AttributeError: - pass return tuple(ret) @property @@ -748,7 +751,7 @@ def __init__(self, condition, body=None): self.body = as_tuple(body) def __repr__(self): - return "" % (self.condition, len(self.body)) + return f'' class Callable(Node): @@ -795,8 +798,11 @@ def __init__(self, name, body, retval, parameters=None, prefix=None, def __repr__(self): param_types = [ctypes_to_cstr(i._C_ctype) for i in self.parameters] - return "%s[%s]<%s; %s>" % (self.__class__.__name__, self.name, self.retval, - ",".join(param_types)) + return "{}[{}]<{}; {}>".format( + self.__class__.__name__, + self.name, self.retval, + ",".join(param_types) + ) @property def all_parameters(self): @@ -893,11 +899,10 @@ def __init__(self, body, init=(), standalones=(), unpacks=(), strides=(), self.retstmt = as_tuple(retstmt) def __repr__(self): - return (" >" % - (len(self.unpacks), len(self.allocs), len(self.casts), - len(self.maps), len(self.objs), len(self.unmaps), - len(self.frees))) + return ' ' + \ + f'>' class Conditional(DoIf): @@ -926,10 +931,10 @@ def __init__(self, condition, then_body, else_body=None): def __repr__(self): if self.else_body: - return "<[%s] ? [%s] : [%s]>" %\ - (ccode(self.condition), repr(self.then_body), repr(self.else_body)) + return f'<[{ccode(self.condition)}] ? [{repr(self.then_body)}] ' + \ + f': [{repr(self.else_body)}]>' else: - return "<[%s] ? [%s]" % (ccode(self.condition), repr(self.then_body)) + return f'<[{ccode(self.condition)}] ? [{repr(self.then_body)}]' class Switch(DoIf): @@ -970,7 +975,7 @@ def ncases(self): @property def as_mapper(self): - retval = dict(zip(self.cases, self.nodes)) + retval = dict(zip(self.cases, self.nodes, strict=True)) if self.default: retval['default'] = self.default return retval @@ -997,9 +1002,9 @@ def __init__(self, timer, lname, body): self._name = lname self._timer = timer - super().__init__(header=c.Line('START(%s)' % lname), + super().__init__(header=c.Line(f'START({lname})'), body=body, - footer=c.Line('STOP(%s,%s)' % (lname, timer.name))) + footer=c.Line(f'STOP({lname},{timer.name})')) @classmethod def _start_timer_header(cls): @@ -1037,7 +1042,7 @@ def __init__(self, function): self.function = function def __repr__(self): - return "" % self.function + return f"" @property def functions(self): @@ -1060,19 +1065,15 @@ def expr_symbols(self): f = self.function if f.is_LocalObject: ret = set(flatten(i.free_symbols for i in f.cargs)) - try: + with suppress(AttributeError): ret.update(f.initvalue.free_symbols) - except AttributeError: - pass return tuple(ret) elif f.is_Array and f.initvalue is not None: # These are just a handful of values so it's OK to iterate them over ret = set() for i in f.initvalue: - try: + with suppress(AttributeError): ret.update(i.free_symbols) - except AttributeError: - pass return tuple(ret) else: return () @@ -1094,7 +1095,7 @@ def __init__(self, function, obj=None, alignment=True, flat=None): self.flat = flat def __repr__(self): - return "" % self.function + return f"" @property def castshape(self): @@ -1148,7 +1149,7 @@ def __init__(self, pointee, pointer, flat=None, offset=None): self.offset = offset def __repr__(self): - return "" % (self.pointee, self.pointer) + return f"" @property def functions(self): @@ -1171,7 +1172,7 @@ def expr_symbols(self): ret.extend(flatten(i.free_symbols for i in self.pointee.symbolic_shape[1:])) else: - assert False, f"Unexpected pointer type {type(self.pointer)}" + raise AssertionError(f'Unexpected pointer type {type(self.pointer)}') if self.offset is not None: ret.append(self.offset) @@ -1223,7 +1224,7 @@ def __init__(self, body, captures=None, parameters=None, special=None, self.attributes = as_tuple(attributes) def __repr__(self): - return "Lambda[%s](%s)" % (self.captures, self.parameters) + return f"Lambda[{self.captures}]({self.parameters})" @property def functions(self): @@ -1259,7 +1260,7 @@ def __init__(self, name, body=None, is_subsection=False): self.is_subsection = is_subsection def __repr__(self): - return "
" % self.name + return f"
" @property def roots(self): @@ -1281,7 +1282,7 @@ def __init__(self, ispace, ops, traffic, body=None): self.traffic = traffic def __repr__(self): - return "" % len(self.exprs) + return f'' @property def exprs(self): @@ -1330,7 +1331,7 @@ def __init__(self, name): self.name = name def __repr__(self): - return "" % self.name + return f"" class UsingNamespace(Node): @@ -1343,7 +1344,7 @@ def __init__(self, namespace): self.namespace = namespace def __repr__(self): - return "" % self.namespace + return f"" class Pragma(Node): @@ -1356,7 +1357,7 @@ def __init__(self, pragma, arguments=None): super().__init__() if not isinstance(pragma, str): - raise TypeError("Pragma name must be a string, not %s" % type(pragma)) + raise TypeError(f"Pragma name must be a string, not {type(pragma)}") self.pragma = pragma self.arguments = as_tuple(arguments) @@ -1512,7 +1513,7 @@ def __init__(self, sync_ops, body=None): self.sync_ops = sync_ops def __repr__(self): - return "" % ",".join(str(i) for i in self.sync_ops) + return "".format(",".join(str(i) for i in self.sync_ops)) @property def is_async_op(self): @@ -1589,8 +1590,8 @@ def __init__(self, body, halo_scheme): self._halo_scheme = halo_scheme def __repr__(self): - functions = "(%s)" % ",".join(i.name for i in self.functions) - return "<%s%s>" % (self.__class__.__name__, functions) + functions = "({})".format(",".join(i.name for i in self.functions)) + return f"<{self.__class__.__name__}{functions}>" @property def halo_scheme(self): diff --git a/devito/ir/iet/utils.py b/devito/ir/iet/utils.py index 1f693a7299..86b23adbbf 100644 --- a/devito/ir/iet/utils.py +++ b/devito/ir/iet/utils.py @@ -32,7 +32,7 @@ def dimensions(self): return [i.dim for i in self] def __repr__(self): - return "IterationTree%s" % super().__repr__() + return f"IterationTree{super().__repr__()}" def __getitem__(self, key): ret = super().__getitem__(key) diff --git a/devito/ir/iet/visitors.py b/devito/ir/iet/visitors.py index 31b9356dce..aafbeec2fc 100644 --- a/devito/ir/iet/visitors.py +++ b/devito/ir/iet/visitors.py @@ -64,7 +64,7 @@ def maybe_rebuild(self, o, *args, **kwargs): """A visit method that rebuilds nodes if their children have changed.""" ops, okwargs = o.operands() new_ops = [self._visit(op, *args, **kwargs) for op in ops] - if all(a is b for a, b in zip(ops, new_ops)): + if all(a is b for a, b in zip(ops, new_ops, strict=True)): return o return o._rebuild(*new_ops, **okwargs) @@ -103,7 +103,7 @@ def lookup_method(self, instance) \ def _visit(self, o, *args, **kwargs) -> LazyVisit[YieldType, FlagType]: meth = self.lookup_method(o) flag = yield from meth(o, *args, **kwargs) - return flag + return flag # noqa: B901 def _post_visit(self, ret: LazyVisit[YieldType, FlagType]) -> ResultType: return list(ret) @@ -113,13 +113,13 @@ def visit_object(self, o: object, **kwargs) -> LazyVisit[YieldType, FlagType]: def visit_Node(self, o: Node, **kwargs) -> LazyVisit[YieldType, FlagType]: flag = yield from self._visit(o.children, **kwargs) - return flag + return flag # noqa: B901 def visit_tuple(self, o: Sequence[Any], **kwargs) -> LazyVisit[YieldType, FlagType]: flag: FlagType = None for i in o: flag = yield from self._visit(i, **kwargs) - return flag + return flag # noqa: B901 visit_list = visit_tuple @@ -296,7 +296,7 @@ def _gen_struct_decl(self, obj, masked=()): fields = (None,)*len(ctype._fields_) entries = [] - for i, (n, ct) in zip(fields, ctype._fields_): + for i, (n, ct) in zip(fields, ctype._fields_, strict=True): try: entries.append(self._gen_value(i, 0, masked=('const',))) except AttributeError: @@ -327,9 +327,10 @@ def _gen_value(self, obj, mode=1, masked=()): else: strtype = self.ccode(obj._C_ctype) strshape = '' - if isinstance(obj, (AbstractFunction, IndexedData)) and mode >= 1: - if not obj._mem_stack: - strtype = f'{strtype}{self._restrict_keyword}' + if isinstance(obj, (AbstractFunction, IndexedData)) \ + and mode >= 1 \ + and not obj._mem_stack: + strtype = f'{strtype}{self._restrict_keyword}' strtype = ' '.join(qualifiers + [strtype]) if obj.is_LocalObject and obj._C_modifier is not None and mode == 2: @@ -400,12 +401,9 @@ def _gen_signature(self, o, is_declaration=False): prefix = ' '.join(o.prefix + (self._gen_rettype(o.retval),)) - if o.attributes: - # NOTE: ugly, but I can't bother extending `c.FunctionDeclaration` - # for such a tiny thing - v = f"{' '.join(o.attributes)} {o.name}" - else: - v = o.name + # NOTE: ugly, but I can't bother extending `c.FunctionDeclaration` + # for such a tiny thing + v = f"{' '.join(o.attributes)} {o.name}" if o.attributes else o.name signature = c.FunctionDeclaration(c.Value(prefix, v), decls) @@ -481,7 +479,7 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, IndexedData): v = f._C_name else: - assert False + raise AssertionError('rvalue is not a recognised type') rvalue = f'({cstr}**) {v}' else: @@ -510,14 +508,11 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, DeviceMap): v = f._C_field_dmap else: - assert False + raise AssertionError('rvalue is not a recognised type') rvalue = f'({cstr} {rshape}) {f._C_name}->{v}' else: - if isinstance(o.obj, Pointer): - v = o.obj.name - else: - v = f._C_name + v = o.obj.name if isinstance(o.obj, Pointer) else f._C_name rvalue = f'({cstr} {rshape}) {v}' @@ -526,10 +521,7 @@ def visit_PointerCast(self, o): def visit_Dereference(self, o): a0, a1 = o.functions - if o.offset: - ptr = f'({a1.name} + {o.offset})' - else: - ptr = a1.name + ptr = f'({a1.name} + {o.offset})' if o.offset else a1.name if a0.is_AbstractFunction: cstr = self.ccode(a0.indexed._C_typedata) @@ -549,10 +541,7 @@ def visit_Dereference(self, o): lvalue = c.Value(cstr, f'*{self._restrict_keyword} {a0.name}') else: - if a1.is_Symbol: - rvalue = f'*{ptr}' - else: - rvalue = f'{ptr}->{a0._C_name}' + rvalue = f'*{ptr}' if a1.is_Symbol else f'{ptr}->{a0._C_name}' lvalue = self._gen_value(a0, 0) return c.Initializer(lvalue, rvalue) @@ -563,10 +552,7 @@ def visit_Block(self, o): def visit_List(self, o): body = flatten(self._visit(i) for i in self._blankline_logic(o.children)) - if o.inline: - body = c.Line(' '.join(str(i) for i in body)) - else: - body = c.Collection(body) + body = c.Line(' '.join(str(i) for i in body)) if o.inline else c.Collection(body) return c.Module(o.header + (body,) + o.footer) def visit_Section(self, o): @@ -744,10 +730,7 @@ def visit_HaloSpot(self, o): return c.Collection(body) def visit_KernelLaunch(self, o): - if o.templates: - templates = f"<{','.join([str(i) for i in o.templates])}>" - else: - templates = '' + templates = f"<{','.join([str(i) for i in o.templates])}>" if o.templates else '' launch_args = [o.grid, o.block] if o.shm is not None: @@ -778,7 +761,7 @@ def _operator_includes(self, o): """ Generate cgen includes from an iterable of symbols and expressions. """ - return [c.Include(i, system=(False if i.endswith('.h') else True)) + return [c.Include(i, system=(not i.endswith('.h'))) for i in o.includes] + [blankline] def _operator_namespaces(self, o): @@ -839,10 +822,7 @@ def visit_Operator(self, o, mode='all'): signature = self._gen_signature(o) # Honor the `retstmt` flag if set - if o.body.retstmt: - retval = [] - else: - retval = [c.Line(), c.Statement("return 0")] + retval = [] if o.body.retstmt else [c.Line(), c.Statement("return 0")] kernel = c.FunctionBody(signature, c.Block(body + retval)) @@ -1206,14 +1186,14 @@ def __init__(self, match: type, start: Node, stop: Node | None = None) -> None: def visit_object(self, o: object, flag: bool = False) -> LazyVisit[Node, bool]: yield from () - return flag + return flag # noqa: B901 def visit_tuple(self, o: Sequence[Any], flag: bool = False) -> LazyVisit[Node, bool]: for el in o: # Yield results from visiting this element, and update the flag flag = yield from self._visit(el, flag=flag) - return flag + return flag # noqa: B901 visit_list = visit_tuple @@ -1236,7 +1216,7 @@ def visit_Node(self, o: Node, flag: bool = False) -> LazyVisit[Node, bool]: # Update the flag if we found a stop flag &= (o is not self.stop) - return flag + return flag # noqa: B901 ApplicationType = TypeVar('ApplicationType') @@ -1580,8 +1560,7 @@ def generate(self): lines = list(i.generate()) if len(lines) > 1: yield tip + ",".join(processed + [lines[0]]) - for line in lines[1:-1]: - yield line + yield from lines[1:-1] tip = "" processed = [lines[-1]] else: diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index 6fb229fdc3..f5e76ca209 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -46,7 +46,7 @@ def stree_build(clusters, profiler=None, **kwargs): maybe_reusable = [] index = 0 - for it0, it1 in zip(c.itintervals, maybe_reusable): + for it0, it1 in zip(c.itintervals, maybe_reusable, strict=False): if it0 != it1: break @@ -204,7 +204,9 @@ def preprocess(clusters, options=None, **kwargs): syncs = normalize_syncs(*[c1.syncs for c1 in found]) if syncs: - ispace = c.ispace.prefix(lambda d: d._defines.intersection(syncs)) + ispace = c.ispace.prefix( + lambda d: d._defines.intersection(syncs) # noqa: B023 + ) processed.append(c.rebuild(exprs=[], ispace=ispace, syncs=syncs)) if all(c1.ispace.is_subset(c.ispace) for c1 in found): @@ -228,9 +230,9 @@ def preprocess(clusters, options=None, **kwargs): # Sanity check! try: assert not queue - except AssertionError: + except AssertionError as e: if options['mpi']: - raise RuntimeError("Unsupported MPI for the given equations") + raise RuntimeError("Unsupported MPI for the given equations") from e return processed @@ -290,7 +292,7 @@ def reuse_section(candidate, section): # * Same set of iteration Dimensions key = lambda i: i.interval.promote(lambda d: d.is_Block).dim test00 = len(iters0) == len(iters1) - test01 = all(key(i) is key(j) for i, j in zip(iters0, iters1)) + test01 = all(key(i) is key(j) for i, j in zip(iters0, iters1, strict=False)) # * All subtrees use at least one local SubDimension (i.e., BCs) key = lambda iters: any(i.dim.is_Sub and i.dim.local for i in iters) diff --git a/devito/ir/stree/tree.py b/devito/ir/stree/tree.py index 6e4b48ec3e..e033c9fd15 100644 --- a/devito/ir/stree/tree.py +++ b/devito/ir/stree/tree.py @@ -29,8 +29,7 @@ def __repr__(self): return render(self) def visit(self): - for i in PostOrderIter(self): - yield i + yield from PostOrderIter(self) @property def last(self): @@ -79,7 +78,7 @@ def sub_iterators(self): @property def __repr_render__(self): - return "%s%s" % (self.dim, self.direction) + return f"{self.dim}{self.direction}" class NodeConditional(ScheduleTree): @@ -105,7 +104,7 @@ def __init__(self, sync_ops, parent=None): @property def __repr_render__(self): - return "Sync[%s]" % ",".join(i.__class__.__name__ for i in self.sync_ops) + return "Sync[{}]".format(",".join(i.__class__.__name__ for i in self.sync_ops)) @property def is_async(self): @@ -129,8 +128,8 @@ def __repr_render__(self): threshold = 2 n = len(self.exprs) ret = ",".join("Eq" for i in range(min(n, threshold))) - ret = ("%s,..." % ret) if n > threshold else ret - return "[%s]" % ret + ret = (f"{ret},...") if n > threshold else ret + return f"[{ret}]" class NodeHalo(ScheduleTree): diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index d3f7d00247..4405cb3b63 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -1,4 +1,5 @@ from collections.abc import Callable, Iterable +from contextlib import suppress from functools import cached_property from itertools import chain, product @@ -90,7 +91,7 @@ def __new__(cls, access): except AttributeError: # E.g., `access` is a FieldFromComposite rather than an Indexed indices = (S.Infinity,)*len(findices) - return super().__new__(cls, list(zip(findices, indices))) + return super().__new__(cls, list(zip(findices, indices, strict=False))) def __hash__(self): return super().__hash__() @@ -98,7 +99,7 @@ def __hash__(self): @cached_property def index_mode(self): retval = [] - for i, fi in zip(self, self.findices): + for i, fi in zip(self, self.findices, strict=True): dims = {j for j in i.free_symbols if isinstance(j, Dimension)} if len(dims) == 0 and q_constant(i): retval.append(AFFINE) @@ -108,11 +109,8 @@ def index_mode(self): # q_affine -- ultimately it should get quicker! sdims = {d for d in dims if d.is_Stencil} - if dims == sdims: - candidates = sdims - else: - # E.g. `x + i0 + i1` -> `candidates = {x}` - candidates = dims - sdims + # E.g. `x + i0 + i1` -> `candidates = {x}` + candidates = sdims if dims == sdims else dims - sdims if len(candidates) == 1: candidate = candidates.pop() @@ -130,7 +128,7 @@ def index_mode(self): @cached_property def aindices(self): retval = [] - for i, fi in zip(self, self.findices): + for i, _ in zip(self, self.findices, strict=True): dims = set(d.root if d.indirect else d for d in i.atoms(Dimension)) sdims = {d for d in dims if d.is_Stencil} candidates = dims - sdims @@ -149,12 +147,12 @@ def findices(self): @cached_property def index_map(self): - return dict(zip(self.aindices, self.findices)) + return dict(zip(self.aindices, self.findices, strict=True)) @cached_property def defined_findices_affine(self): ret = set() - for fi, im in zip(self.findices, self.index_mode): + for fi, im in zip(self.findices, self.index_mode, strict=True): if im is AFFINE: ret.update(fi._defines) return ret @@ -162,7 +160,7 @@ def defined_findices_affine(self): @cached_property def defined_findices_irregular(self): ret = set() - for fi, im in zip(self.findices, self.index_mode): + for fi, im in zip(self.findices, self.index_mode, strict=True): if im is IRREGULAR: ret.update(fi._defines) return ret @@ -338,7 +336,7 @@ def distance(self, other): return Vector(S.ImaginaryUnit) ret = [] - for sit, oit in zip(self.itintervals, other.itintervals): + for sit, oit in zip(self.itintervals, other.itintervals, strict=False): n = len(ret) try: @@ -385,9 +383,8 @@ def distance(self, other): # Case 3: `self` and `other` have some special form such that # it's provable that they never intersect - if sai and sit == oit: - if disjoint_test(self[n], other[n], sai, sit): - return Vector(S.ImaginaryUnit) + if sai and sit == oit and disjoint_test(self[n], other[n], sai, sit): + return Vector(S.ImaginaryUnit) # Compute the distance along the current IterationInterval if self.function._mem_shared: @@ -441,7 +438,7 @@ def distance(self, other): # It still could be an imaginary dependence, e.g. `a[3] -> a[4]` or, more # nasty, `a[i+1, 3] -> a[i, 4]` - for i, j in zip(self[n:], other[n:]): + for i, j in zip(self[n:], other[n:], strict=True): if i == j: ret.append(S.Zero) else: @@ -572,7 +569,7 @@ def _defined_findices(self): @cached_property def distance_mapper(self): retval = {} - for i, j in zip(self.findices, self.distance): + for i, j in zip(self.findices, self.distance, strict=False): for d in i._defines: retval[d] = j return retval @@ -646,7 +643,7 @@ def __repr__(self): @cached_property def cause(self): """Return the findex causing the dependence.""" - for i, j in zip(self.findices, self.distance): + for i, j in zip(self.findices, self.distance, strict=False): try: if j > 0: return i._defines @@ -778,7 +775,7 @@ def is_storage_related(self, dims=None): cause the access of the same memory location, False otherwise. """ for d in self.findices: - if d._defines & set(as_tuple(dims)): + if d._defines & set(as_tuple(dims)): # noqa: SIM102 if any(i.is_NonlinearDerived for i in d._defines) or \ self.is_const(d): return True @@ -863,16 +860,12 @@ def writes_gen(self): for i, e in enumerate(self.exprs): terminals = retrieve_accesses(e.lhs) if q_routine(e.rhs): - try: + with suppress(AttributeError): + # Everything except: foreign routines, such as `cos` or `sin` etc. terminals.update(e.rhs.writes) - except AttributeError: - # E.g., foreign routines, such as `cos` or `sin` - pass + for j in terminals: - if e.is_Reduction: - mode = 'WR' - else: - mode = 'W' + mode = 'WR' if e.is_Reduction else 'W' yield TimedAccess(j, mode, i, e.ispace) # Objects altering the control flow (e.g., synchronization barriers, @@ -910,15 +903,10 @@ def reads_explicit_gen(self): for i, e in enumerate(self.exprs): # Reads terminals = retrieve_accesses(e.rhs, deep=True) - try: + with suppress(AttributeError): terminals.update(retrieve_accesses(e.lhs.indices)) - except AttributeError: - pass for j in terminals: - if j.function is e.lhs.function and e.is_Reduction: - mode = 'RR' - else: - mode = 'R' + mode = 'RR' if j.function is e.lhs.function and e.is_Reduction else 'R' yield TimedAccess(j, mode, i, e.ispace) # If a reduction, we got one implicit read @@ -1066,7 +1054,7 @@ def __repr__(self): shifted = f"{chr(10) if shifted else ''}{shifted}" writes[i] = f'\033[1;37;31m{first + shifted}\033[0m' return "\n".join([out.format(i.name, w, '', r) - for i, r, w in zip(tracked, reads, writes)]) + for i, r, w in zip(tracked, reads, writes, strict=True)]) @cached_property def accesses(self): @@ -1254,7 +1242,7 @@ def __init__(self, expr, indexeds=None, bases=None, offsets=None): for ii in self.iinstances: base = [] offset = [] - for e, fi, ai in zip(ii, ii.findices, ii.aindices): + for e, fi, ai in zip(ii, ii.findices, ii.aindices, strict=True): if ai is None: base.append((fi, e)) else: @@ -1324,9 +1312,8 @@ def translated(self, other, dims=None): return {} v = distance.pop() - if not d._defines & dims: - if v != 0: - return {} + if not d._defines & dims and v != 0: + return {} distances[d] = v @@ -1351,7 +1338,7 @@ def dimensions(self): @cached_property def aindices(self): try: - return tuple(zip(*self.Toffsets))[0] + return tuple(zip(*self.Toffsets, strict=True))[0] except IndexError: return () @@ -1377,7 +1364,7 @@ def retrieve_accesses(exprs, **kwargs): if not compaccs: return retrieve_terminals(exprs, **kwargs) - subs = {i: Symbol('dummy%d' % n) for n, i in enumerate(compaccs)} + subs = {i: Symbol(f'dummy{n}') for n, i in enumerate(compaccs)} exprs1 = uxreplace(exprs, subs) return compaccs | retrieve_terminals(exprs1, **kwargs) - set(subs.values()) diff --git a/devito/ir/support/guards.py b/devito/ir/support/guards.py index 574fe490ec..deba5be148 100644 --- a/devito/ir/support/guards.py +++ b/devito/ir/support/guards.py @@ -454,7 +454,9 @@ def simplify_and(relation, v): covered = True try: - if type(a) in (Gt, Ge) and v.rhs > a.rhs or type(a) in (Lt, Le) and v.rhs < a.rhs: + if type(a) in (Gt, Ge) \ + and v.rhs > a.rhs or type(a) in (Lt, Le) \ + and v.rhs < a.rhs: new_args.append(v) else: new_args.append(a) diff --git a/devito/ir/support/space.py b/devito/ir/support/space.py index dda334d3d1..0fcaf8f423 100644 --- a/devito/ir/support/space.py +++ b/devito/ir/support/space.py @@ -97,7 +97,7 @@ class NullInterval(AbstractInterval): is_Null = True def __repr__(self): - return "%s[Null]%s" % (self.dim, self.stamp) + return f"{self.dim}[Null]{self.stamp}" def __hash__(self): return hash(self.dim) @@ -149,7 +149,7 @@ def __init__(self, dim, lower=0, upper=0, stamp=S0): self.upper = upper def __repr__(self): - return "%s[%s,%s]%s" % (self.dim, self.lower, self.upper, self.stamp) + return f"{self.dim}[{self.lower},{self.upper}]{self.stamp}" def __hash__(self): return hash((self.dim, self.offsets)) @@ -247,8 +247,9 @@ def union(self, o): ovl, ovu = Vector(o.lower, smart=True), Vector(o.upper, smart=True) return Interval(self.dim, vmin(svl, ovl)[0], vmax(svu, ovu)[0], self.stamp) else: - raise ValueError("Cannot compute union of non-compatible Intervals (%s, %s)" % - (self, o)) + raise ValueError( + f"Cannot compute union of non-compatible Intervals ({self}, {o})" + ) def add(self, o): if not self.is_compatible(o): @@ -312,8 +313,10 @@ class IntervalGroup(Ordering): @classmethod def reorder(cls, items, relations): if not all(isinstance(i, AbstractInterval) for i in items): - raise ValueError("Cannot create IntervalGroup from objs of type [%s]" % - ', '.join(str(type(i)) for i in items)) + raise ValueError( + 'Cannot create IntervalGroup from objs of type ' + f'[{", ".join(str(type(i)) for i in items)}]' + ) if len(relations) == 1: # Special case: avoid expensive topological sorting if possible @@ -332,7 +335,7 @@ def simplify_relations(cls, relations, items, mode): return super().simplify_relations(relations, items, mode) def __eq__(self, o): - return len(self) == len(o) and all(i == j for i, j in zip(self, o)) + return len(self) == len(o) and all(i == j for i, j in zip(self, o, strict=True)) def __contains__(self, d): return any(i.dim is d for i in self) @@ -341,7 +344,7 @@ def __hash__(self): return hash(tuple(self)) def __repr__(self): - return "IntervalGroup[%s]" % (', '.join([repr(i) for i in self])) + return "IntervalGroup[{}]".format(', '.join([repr(i) for i in self])) @cached_property def dimensions(self): @@ -426,7 +429,7 @@ def is_compatible(self, o): """ if set(self) != set(o): return False - if all(i == j for i, j in zip(self, o)): + if all(i == j for i, j in zip(self, o, strict=True)): # Same input ordering, definitely compatible return True try: @@ -550,7 +553,7 @@ def index(self, key): return super().index(key) elif isinstance(key, Dimension): return super().index(self[key]) - raise ValueError("Expected Interval or Dimension, got `%s`" % type(key)) + raise ValueError(f"Expected Interval or Dimension, got `{type(key)}`") def __getitem__(self, key): if is_integer(key): @@ -621,7 +624,7 @@ def __init__(self, interval, sub_iterators=(), direction=Forward): self.direction = direction def __repr__(self): - return "%s%s" % (super().__repr__(), self.direction) + return f"{super().__repr__()}{self.direction}" def __eq__(self, other): if not isinstance(other, IterationInterval): @@ -654,8 +657,10 @@ def __init__(self, intervals): self._intervals = IntervalGroup(as_tuple(intervals)) def __repr__(self): - return "%s[%s]" % (self.__class__.__name__, - ", ".join(repr(i) for i in self.intervals)) + return "{}[{}]".format( + self.__class__.__name__, + ", ".join(repr(i) for i in self.intervals) + ) def __eq__(self, other): return isinstance(other, Space) and self.intervals == other.intervals @@ -667,8 +672,7 @@ def __len__(self): return len(self.intervals) def __iter__(self): - for i in self.intervals: - yield i + yield from self.intervals @property def intervals(self): @@ -719,7 +723,7 @@ def __init__(self, intervals, parts=None): parts = {k: v.expand() for k, v in (parts or {}).items()} for k, v in list(parts.items()): dims = set().union(*[d._defines for d in k.dimensions]) - parts[k] = v.drop(lambda d: d not in dims) + parts[k] = v.drop(lambda d: d not in dims) # noqa: B023 self._parts = frozendict(parts) def __eq__(self, other): @@ -798,9 +802,9 @@ def __init__(self, intervals, sub_iterators=None, directions=None): self._directions = frozendict(directions) def __repr__(self): - ret = ', '.join(["%s%s" % (repr(i), repr(self.directions[i.dim])) + ret = ', '.join([f"{repr(i)}{repr(self.directions[i.dim])}" for i in self.intervals]) - return "IterationSpace[%s]" % ret + return f"IterationSpace[{ret}]" def __eq__(self, other): if self is other: @@ -853,8 +857,8 @@ def generate(self, op, *others, relations=None): directions[k] = v elif v is not Any: # Clash detected - raise ValueError("Cannot compute %s of `IterationSpace`s " - "with incompatible directions" % op) + raise ValueError(f"Cannot compute {op} of `IterationSpace`s " + "with incompatible directions") sub_iterators = {} for i in others: @@ -922,10 +926,7 @@ def project(self, cond, strict=True): * either `cond(d)` is true (`cond` is a callable), * or `d in cond` is true (`cond` is an iterable) """ - if callable(cond): - func = cond - else: - func = lambda i: i in cond + func = cond if callable(cond) else lambda i: i in cond dims = [i.dim for i in self if not func(i.dim)] intervals = self.intervals.drop(dims, strict=strict) diff --git a/devito/ir/support/symregistry.py b/devito/ir/support/symregistry.py index aca49757a6..d7fca20f7c 100644 --- a/devito/ir/support/symregistry.py +++ b/devito/ir/support/symregistry.py @@ -45,7 +45,7 @@ def make_name(self, prefix=None, increment_first=True): if not increment_first: return prefix - return "%s%d" % (prefix, counter()) + return f'{prefix}{counter()}' def make_npthreads(self, size): name = self.make_name(prefix='npthreads') diff --git a/devito/ir/support/syncs.py b/devito/ir/support/syncs.py index 753b8f28fb..67623e5fe0 100644 --- a/devito/ir/support/syncs.py +++ b/devito/ir/support/syncs.py @@ -57,7 +57,7 @@ def __hash__(self): self.function, self.findex, self.dim, self.size, self.origin)) def __repr__(self): - return "%s<%s>" % (self.__class__.__name__, self.handle.name) + return f"{self.__class__.__name__}<{self.handle.name}>" __str__ = __repr__ @@ -75,7 +75,7 @@ def lock(self): class SyncCopyOut(SyncOp): def __repr__(self): - return "%s<%s->%s>" % (self.__class__.__name__, self.target, self.function) + return f"{self.__class__.__name__}<{self.target}->{self.function}>" __str__ = __repr__ @@ -98,7 +98,7 @@ def imask(self): class SyncCopyIn(SyncOp): def __repr__(self): - return "%s<%s->%s>" % (self.__class__.__name__, self.function, self.target) + return f"{self.__class__.__name__}<{self.function}->{self.target}>" __str__ = __repr__ diff --git a/devito/ir/support/utils.py b/devito/ir/support/utils.py index 41805e0ba8..53f6a55bb0 100644 --- a/devito/ir/support/utils.py +++ b/devito/ir/support/utils.py @@ -1,4 +1,5 @@ from collections import defaultdict, namedtuple +from contextlib import suppress from itertools import product from devito.finite_differences import IndexDerivative @@ -139,7 +140,7 @@ def detect_accesses(exprs): for e in retrieve_indexed(exprs, deep=True): f = e.function - for a, d0 in zip(e.indices, f.dimensions): + for a, d0 in zip(e.indices, f.dimensions, strict=True): if isinstance(a, Indirection): a = a.mapped @@ -160,7 +161,7 @@ def detect_accesses(exprs): # accesses (e.g., a[b[x, y] + 1, y]) or 2) as a result of # skewing-based optimizations, such as time skewing (e.g., # `x - time + 1`) or CIRE rotation (e.g., `x + xx - 4`) - d, others = split(dims, lambda i: d0 in i._defines) + d, others = split(dims, lambda i: d0 in i._defines) # noqa: B023 if any(i.is_Indexed for i in a.args) or len(d) != 1: # Case 1) -- with indirect accesses there's not much we can infer @@ -197,11 +198,9 @@ def detect_accesses(exprs): other_dims = set() for e in as_tuple(exprs): other_dims.update(i for i in e.free_symbols if isinstance(i, Dimension)) - try: + with suppress(AttributeError): + # Unless not a types.Eq other_dims.update(e.implicit_dims or {}) - except AttributeError: - # Not a types.Eq - pass other_dims = filter_sorted(other_dims) mapper[None] = Stencil([(i, 0) for i in other_dims]) @@ -243,10 +242,8 @@ def detect_io(exprs, relax=False): terminals = flatten(retrieve_terminals(i, deep=True) for i in roots) for i in terminals: candidates = set(i.free_symbols) - try: + with suppress(AttributeError): candidates.update({i.function}) - except AttributeError: - pass for j in candidates: try: if rule(j): @@ -368,10 +365,8 @@ def minmax_index(expr, d): """ indices = set() for i in retrieve_indexed(expr): - try: + with suppress(KeyError): indices.add(i.indices[d]) - except KeyError: - pass return Extrema(min(minimum(i) for i in indices), max(maximum(i) for i in indices)) @@ -388,6 +383,6 @@ def erange(expr): sdims = [d for d in udims if d.is_Stencil] ranges = [i.range for i in sdims] - mappers = [dict(zip(sdims, i)) for i in product(*ranges)] + mappers = [dict(zip(sdims, i, strict=True)) for i in product(*ranges)] return tuple(expr.subs(m) for m in mappers) diff --git a/devito/ir/support/vector.py b/devito/ir/support/vector.py index 4aa74ba060..02e26e2a02 100644 --- a/devito/ir/support/vector.py +++ b/devito/ir/support/vector.py @@ -74,7 +74,9 @@ def __hash__(self): @_asvector() def __add__(self, other): - return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart) + return Vector( + *[i + j for i, j in zip(self, other, strict=True)], smart=self.smart + ) @_asvector() def __radd__(self, other): @@ -82,7 +84,9 @@ def __radd__(self, other): @_asvector() def __sub__(self, other): - return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart) + return Vector( + *[i - j for i, j in zip(self, other, strict=True)], smart=self.smart + ) @_asvector() def __rsub__(self, other): @@ -109,7 +113,7 @@ def __lt__(self, other): return True elif val > 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i < 0) == true: return True @@ -124,7 +128,7 @@ def __lt__(self, other): return True elif q_positive(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e return False @@ -145,7 +149,7 @@ def __gt__(self, other): return True elif val < 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i > 0) == true: return True @@ -160,7 +164,7 @@ def __gt__(self, other): return True elif q_negative(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e return False @@ -184,7 +188,7 @@ def __le__(self, other): return True elif val > 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i < 0) == true: return True @@ -199,7 +203,7 @@ def __le__(self, other): return True elif q_positive(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e # Note: unlike `__lt__`, if we end up here, then *it is* <=. For example, # with `v0` and `v1` as above, we would get here @@ -214,7 +218,7 @@ def __getitem__(self, key): return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret def __repr__(self): - return "(%s)" % ','.join(str(i) for i in self) + return "({})".format(','.join(str(i) for i in self)) @property def rank(self): @@ -253,7 +257,9 @@ def distance(self, other): """ try: # Handle quickly the special (yet relevant) cases `other == 0` - if is_integer(other) and other == 0 or all(i == 0 for i in other) and self.rank == other.rank: + if is_integer(other) \ + and other == 0 or all(i == 0 for i in other) \ + and self.rank == other.rank: return self except TypeError: pass @@ -269,12 +275,14 @@ class LabeledVector(Vector): def __new__(cls, items=None): try: - labels, values = zip(*items) + labels, values = zip(*items, strict=True) except (ValueError, TypeError): labels, values = (), () if not all(isinstance(i, Dimension) for i in labels): - raise ValueError("All labels must be of type Dimension, got [%s]" - % ','.join(i.__class__.__name__ for i in labels)) + raise ValueError( + 'All labels must be of type Dimension, got ' + f'[{", ".join(i.__class__.__name__ for i in labels)}]' + ) obj = super().__new__(cls, *values) obj.labels = labels return obj @@ -287,16 +295,20 @@ def transpose(cls, *vectors): if len(vectors) == 0: return LabeledVector() if not all(isinstance(v, LabeledVector) for v in vectors): - raise ValueError("All items must be of type LabeledVector, got [%s]" - % ','.join(i.__class__.__name__ for i in vectors)) + raise ValueError( + 'All items must be of type LabeledVector, got ' + f'[{", ".join(i.__class__.__name__ for i in vectors)}]' + ) T = OrderedDict() for v in vectors: - for l, i in zip(v.labels, v): + for l, i in zip(v.labels, v, strict=True): T.setdefault(l, []).append(i) return tuple((l, Vector(*i)) for l, i in T.items()) def __repr__(self): - return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self)) + return "({})".format( + ','.join(f'{l}:{i}' for l, i in zip(self.labels, self, strict=True)) + ) def __hash__(self): return hash((tuple(self), self.labels)) @@ -333,14 +345,15 @@ def __getitem__(self, index): return super().__getitem__(i) return None else: - raise TypeError("Indices must be integers, slices, or Dimensions, not %s" - % type(index)) + raise TypeError( + f"Indices must be integers, slices, or Dimensions, not {type(index)}" + ) def fromlabel(self, label, v=None): return self[label] if label in self.labels else v def items(self): - return zip(self.labels, self) + return zip(self.labels, self, strict=True) @memoized_meth def distance(self, other): @@ -356,7 +369,7 @@ def distance(self, other): raise TypeError("Cannot compute distance from obj of type %s", type(other)) if self.labels != other.labels: raise TypeError("Cannot compute distance due to mismatching `labels`") - return LabeledVector(list(zip(self.labels, self - other))) + return LabeledVector(list(zip(self.labels, self - other, strict=True))) # Utility functions diff --git a/devito/mpatches/rationaltools.py b/devito/mpatches/rationaltools.py index 93b0d14ed6..7469cc34f4 100644 --- a/devito/mpatches/rationaltools.py +++ b/devito/mpatches/rationaltools.py @@ -74,10 +74,7 @@ def _together(expr): elif expr.is_Pow: base = _together(expr.base) - if deep: - exp = _together(expr.exp) - else: - exp = expr.exp + exp = _together(expr.exp) if deep else expr.exp return expr.func(base, exp) else: diff --git a/devito/mpi/distributed.py b/devito/mpi/distributed.py index 480609cd2f..01edaaaa69 100644 --- a/devito/mpi/distributed.py +++ b/devito/mpi/distributed.py @@ -80,8 +80,8 @@ def devito_mpi_init(): if not MPI.Is_initialized(): try: thread_level = mpi4py_thread_levels[mpi4py.rc.thread_level] - except KeyError: - assert False + except KeyError as e: + raise AssertionError('mpi4py thread levels not accessible') from e MPI.Init_thread(thread_level) @@ -117,7 +117,7 @@ def __init__(self, shape, dimensions): self._dimensions = as_tuple(dimensions) def __repr__(self): - return "%s(nprocs=%d)" % (self.__class__.__name__, self.nprocs) + return f'{self.__class__.__name__}(nprocs={self.nprocs})' @abstractmethod def comm(self): @@ -147,7 +147,7 @@ def is_parallel(self): def glb_numb(self): """The global indices owned by the calling MPI rank.""" assert len(self.mycoords) == len(self.decomposition) - glb_numb = [i[j] for i, j in zip(self.decomposition, self.mycoords)] + glb_numb = [i[j] for i, j in zip(self.decomposition, self.mycoords, strict=True)] return EnrichedTuple(*glb_numb, getters=self.dimensions) @cached_property @@ -157,7 +157,7 @@ def glb_slices(self): Dimensions to slices. """ return {d: slice(min(i), max(i) + 1) if len(i) > 0 else slice(0, -1) - for d, i in zip(self.dimensions, self.glb_numb)} + for d, i in zip(self.dimensions, self.glb_numb, strict=True)} @property def glb_shape(self): @@ -202,7 +202,7 @@ def glb_to_loc(self, dim, *args, strict=True): """ if dim not in self.dimensions: if strict: - raise ValueError("`%s` must be one of the Distributor dimensions" % dim) + raise ValueError(f"`{dim}` must be one of the Distributor dimensions") else: return args[0] return self.decomposition[dim].index_glb_to_loc(*args) @@ -284,7 +284,7 @@ def all_numb(self): """The global numbering of all MPI ranks.""" ret = [] for c in self.all_coords: - glb_numb = [i[j] for i, j in zip(self.decomposition, c)] + glb_numb = [i[j] for i, j in zip(self.decomposition, c, strict=True)] ret.append(EnrichedTuple(*glb_numb, getters=self.dimensions)) return tuple(ret) @@ -370,16 +370,20 @@ def __init__(self, shape, dimensions, input_comm=None, topology=None): self._topology = tuple(1 for _ in range(len(shape))) # The domain decomposition - self._decomposition = [Decomposition(np.array_split(range(i), j), c) - for i, j, c in zip(shape, self.topology, self.mycoords)] + self._decomposition = [ + Decomposition(np.array_split(range(i), j), c) + for i, j, c in zip(shape, self.topology, self.mycoords, strict=True) + ] @cached_property def is_boundary_rank(self): """ MPI rank interfaces with the boundary of the domain. """ - return any([i == 0 or i == j-1 for i, j in - zip(self.mycoords, self.topology)]) + return any([ + i == 0 or i == j-1 + for i, j in zip(self.mycoords, self.topology, strict=True) + ]) @cached_property def glb_pos_map(self): @@ -388,7 +392,7 @@ def glb_pos_map(self): MPI rank in the decomposed domain. """ ret = {} - for d, i, s in zip(self.dimensions, self.mycoords, self.topology): + for d, i, s in zip(self.dimensions, self.mycoords, self.topology, strict=True): v = [] if i == 0: v.append(LEFT) @@ -461,9 +465,9 @@ def neighborhood(self): # Set up diagonal neighbours for i in product([LEFT, CENTER, RIGHT], repeat=self.ndim): - neighbor = [c + s.val for c, s in zip(self.mycoords, i)] + neighbor = [c + s.val for c, s in zip(self.mycoords, i, strict=True)] - if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology)): + if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology, strict=True)): ret[i] = MPI.PROC_NULL else: ret[i] = self.comm.Get_cart_rank(neighbor) @@ -499,9 +503,11 @@ def __init__(self, subdomain): super().__init__(subdomain.shape, subdomain.dimensions) self._subdomain_name = subdomain.name - self._dimension_map = frozendict({pd: sd for pd, sd - in zip(subdomain.grid.dimensions, - subdomain.dimensions)}) + self._dimension_map = frozendict({ + pd: sd for pd, sd in zip( + subdomain.grid.dimensions, subdomain.dimensions, strict=True + ) + }) self._parent = subdomain.grid.distributor self._comm = self.parent.comm @@ -514,16 +520,21 @@ def __decomposition_setup__(self): Set up the decomposition, aligned with that of the parent Distributor. """ decompositions = [] - for dec, i in zip(self.parent._decomposition, self.subdomain_interval): + for dec, i in zip( + self.parent._decomposition, self.subdomain_interval, strict=True + ): if i is None: decompositions.append(dec) else: start, end = _interval_bounds(i) - decompositions.append([d[np.logical_and(d >= start, d <= end)] - for d in dec]) + decompositions.append( + [d[np.logical_and(d >= start, d <= end)] for d in dec] + ) - self._decomposition = [Decomposition(d, c) - for d, c in zip(decompositions, self.mycoords)] + self._decomposition = [ + Decomposition(d, c) + for d, c in zip(decompositions, self.mycoords, strict=True) + ] @property def parent(self): @@ -558,8 +569,10 @@ def subdomain_interval(self): """The interval spanned by the SubDomain.""" # Assumes no override of x_m and x_M supplied to operator bounds_map = {d.symbolic_min: 0 for d in self.p.dimensions} - bounds_map.update({d.symbolic_max: s-1 for d, s in zip(self.p.dimensions, - self.p.glb_shape)}) + bounds_map.update({ + d.symbolic_max: s - 1 + for d, s in zip(self.p.dimensions, self.p.glb_shape, strict=True) + }) sd_interval = [] # The Interval of SubDimension indices for d in self.dimensions: @@ -576,8 +589,10 @@ def subdomain_interval(self): @cached_property def intervals(self): """The interval spanned by the SubDomain in each dimension on this rank.""" - return tuple(d if s is None else d.intersect(s) - for d, s in zip(self.domain_interval, self.subdomain_interval)) + return tuple( + d if s is None else d.intersect(s) + for d, s in zip(self.domain_interval, self.subdomain_interval, strict=True) + ) @cached_property def crosses(self): @@ -608,18 +623,27 @@ def get_crosses(d, di, si): if di.issuperset(si) or di.isdisjoint(si): return {LEFT: False, RIGHT: False} elif d.local: - raise ValueError("SubDimension %s is local and cannot be" - " decomposed across MPI ranks" % d) + raise ValueError(f"SubDimension {d} is local and cannot be" + " decomposed across MPI ranks") return {LEFT: si.left < di.left, RIGHT: si.right > di.right} - crosses = {d: get_crosses(d, di, si) for d, di, si - in zip(self.dimensions, self.domain_interval, - self.subdomain_interval)} + crosses = { + d: get_crosses(d, di, si) + for d, di, si in zip( + self.dimensions, + self.domain_interval, + self.subdomain_interval, + strict=True + ) + } for i in product([LEFT, CENTER, RIGHT], repeat=len(self.dimensions)): - crosses[i] = all(crosses[d][s] for d, s in zip(self.dimensions, i) - if s in crosses[d]) # Skip over CENTER + crosses[i] = all( + crosses[d][s] + for d, s in zip(self.dimensions, i, strict=True) + if s in crosses[d] + ) # Skip over CENTER return frozendict(crosses) @@ -664,10 +688,11 @@ def neighborhood(self): # Set up diagonal neighbours for i in product([LEFT, CENTER, RIGHT], repeat=self.ndim): - neighbor = [c + s.val for c, s in zip(self.mycoords, i)] + neighbor = [c + s.val for c, s in zip(self.mycoords, i, strict=True)] - if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology)) \ - or not self.crosses[i]: + if any( + c < 0 or c >= s for c, s in zip(neighbor, self.topology, strict=True) + ) or not self.crosses[i]: ret[i] = MPI.PROC_NULL else: ret[i] = self.comm.Get_cart_rank(neighbor) @@ -678,7 +703,7 @@ def neighborhood(self): def rank_populated(self): """Constant symbol for a switch indicating that data is allocated on this rank""" return Constant(name=f'rank_populated_{self._subdomain_name}', dtype=np.int8, - value=int(not(self.loc_empty))) + value=int(not self.loc_empty)) def _interval_bounds(interval): @@ -742,8 +767,10 @@ def decompose(cls, npoint, distributor): # The i-th entry in `npoint` tells how many sparse points the # i-th MPI rank has if len(npoint) != nprocs: - raise ValueError('The `npoint` tuple must have as many entries as ' - 'MPI ranks (got `%d`, need `%d`)' % (npoint, nprocs)) + raise ValueError( + 'The `npoint` tuple must have as many entries as ' + f'MPI ranks (got `{npoint}`, need `{nprocs}`)' + ) elif any(i < 0 for i in npoint): raise ValueError('All entries in `npoint` must be >= 0') glb_npoint = npoint @@ -850,13 +877,13 @@ def _C_typedecl(self): # # With this override, we generate the one on the right groups = [list(g) for k, g in groupby(self.pfields, key=lambda x: x[0][0])] - groups = [(j[0], i) for i, j in [zip(*g) for g in groups]] + groups = [(j[0], i) for i, j in [zip(*g, strict=True) for g in groups]] return Struct(self.pname, [Value(ctypes_to_cstr(i), ', '.join(j)) for i, j in groups]) def _arg_defaults(self): values = super()._arg_defaults() - for name, i in zip(self.fields, self.entries): + for name, i in zip(self.fields, self.entries, strict=True): setattr(values[self.name]._obj, name, self.neighborhood[i]) return values @@ -959,7 +986,7 @@ def __new__(cls, items, input_comm): star_vals = [int(np.prod(s)) for s in split] # Apply computed star values to the processed - for index, value in zip(star_pos, star_vals): + for index, value in zip(star_pos, star_vals, strict=True): processed[index] = value # Final check that topology matches the communicator size @@ -979,7 +1006,7 @@ def compute_dims(nprocs, ndim): if not v.is_integer(): # Since pow(64, 1/3) == 3.999..4 v = int(ceil(v)) - if not v**ndim == nprocs: + if v**ndim != nprocs: # Fallback return tuple(MPI.Compute_dims(nprocs, ndim)) else: diff --git a/devito/mpi/halo_scheme.py b/devito/mpi/halo_scheme.py index 4bc3c44b51..4d12299025 100644 --- a/devito/mpi/halo_scheme.py +++ b/devito/mpi/halo_scheme.py @@ -1,4 +1,5 @@ from collections import OrderedDict, defaultdict, namedtuple +from contextlib import suppress from functools import cached_property from itertools import product from operator import attrgetter @@ -38,7 +39,7 @@ def __new__(cls, loc_indices, loc_dirs, halos, dims, bundle=None, getters=None): getters = cls.__rargs__ + cls.__rkwargs__ items = [frozendict(loc_indices), frozendict(loc_dirs), frozenset(halos), frozenset(dims), bundle] - kwargs = dict(zip(getters, items)) + kwargs = dict(zip(getters, items, strict=True)) return super().__new__(cls, *items, getters=getters, **kwargs) def __hash__(self): @@ -151,7 +152,7 @@ def __init__(self, exprs, ispace): def __repr__(self): fnames = ",".join(i.name for i in set(self._mapper)) - return "HaloScheme<%s>" % fnames + return f"HaloScheme<{fnames}>" def __eq__(self, other): return (isinstance(other, HaloScheme) and @@ -401,7 +402,7 @@ def owned_size(self): mapper = {} for f, v in self.halos.items(): dimensions = filter_ordered(flatten(i.dim for i in v)) - for d, s in zip(f.dimensions, f._size_owned): + for d, s in zip(f.dimensions, f._size_owned, strict=True): if d in dimensions: maxl, maxr = mapper.get(d, (0, 0)) mapper[d] = (max(maxl, s.left), max(maxr, s.right)) @@ -532,8 +533,11 @@ def classify(exprs, ispace): # practically subjected to domain decomposition dist = f.grid.distributor try: - ignored = [d for i, d in zip(dist.topology_logical, dist.dimensions) - if i == 1] + ignored = [ + d + for i, d in zip(dist.topology_logical, dist.dimensions, strict=True) + if i == 1 + ] except TypeError: ignored = [] @@ -569,7 +573,10 @@ def classify(exprs, ispace): combs.remove((CENTER,)*len(f._dist_dimensions)) for c in combs: key = (f._dist_dimensions, c) - if all(v.get((d, s)) is STENCIL or s is CENTER for d, s in zip(*key)): + if all( + v.get((d, s)) is STENCIL or s is CENTER + for d, s in zip(*key, strict=True) + ): v[key] = STENCIL # Finally update the `halo_labels` @@ -597,16 +604,13 @@ def classify(exprs, ispace): # Separate halo-exchange Dimensions from `loc_indices` raw_loc_indices, halos = defaultdict(list), [] for (d, s), hl in halo_labels.items(): - try: + with suppress(KeyError): hl.remove(IDENTITY) - except KeyError: - pass if not hl: continue elif len(hl) > 1: raise HaloSchemeException("Inconsistency found while building a halo " - "scheme for `%s` along Dimension `%s`" - % (f, d)) + f"scheme for `{f}` along Dimension `{d}`") elif hl.pop() is STENCIL: halos.append(Halo(d, s)) else: @@ -683,7 +687,7 @@ def __new__(cls, *args, halo_scheme=None, **kwargs): return obj def __repr__(self): - return "HaloTouch(%s)" % ",".join(f.name for f in self.halo_scheme.fmapper) + return "HaloTouch({})".format(",".join(f.name for f in self.halo_scheme.fmapper)) __str__ = __repr__ diff --git a/devito/mpi/reduction_scheme.py b/devito/mpi/reduction_scheme.py index f3a412f07d..43dfe59a50 100644 --- a/devito/mpi/reduction_scheme.py +++ b/devito/mpi/reduction_scheme.py @@ -22,7 +22,7 @@ def __new__(cls, var, op=None, grid=None, ispace=None, **kwargs): return obj def __repr__(self): - return "DistReduce(%s,%s)" % (self.var, self.op) + return f"DistReduce({self.var},{self.op})" __str__ = __repr__ diff --git a/devito/mpi/routines.py b/devito/mpi/routines.py index 9fa639a7a1..74c788fa0e 100644 --- a/devito/mpi/routines.py +++ b/devito/mpi/routines.py @@ -119,7 +119,7 @@ def make(self, hs): # Callables haloupdates = [] halowaits = [] - for i, (f, hse) in enumerate(hs.fmapper.items()): + for f, hse in hs.fmapper.items(): msg = self._msgs[(f, hse)] haloupdate, halowait = mapper[(f, hse)] haloupdates.append(self._call_haloupdate(haloupdate.name, f, hse, msg)) @@ -315,12 +315,12 @@ def _make_bundles(self, hs): # We recast everything else as Bags for simplicity -- worst case # scenario all Bags only have one component. try: - name = "bag_%s" % "".join(f.name for f in components) + name = "bag_{}".format("".join(f.name for f in components)) bag = Bag(name=name, components=components) halo_scheme = halo_scheme.add(bag, hse) except ValueError: for i in components: - name = "bag_%s" % i.name + name = f"bag_{i.name}" bag = Bag(name=name, components=i) halo_scheme = halo_scheme.add(bag, hse) @@ -354,11 +354,13 @@ def _make_all(self, f, hse, msg): def _make_copy(self, f, hse, key, swap=False): dims = [d.root for d in f.dimensions if d not in hse.loc_indices] - ofs = [Symbol(name='o%s' % d.root, is_const=True) for d in f.dimensions] + ofs = [Symbol(name=f'o{d.root}', is_const=True) for d in f.dimensions] - bshape = [Symbol(name='b%s' % d.symbolic_size) for d in dims] - bdims = [CustomDimension(name=d.name, parent=d, symbolic_size=s) - for d, s in zip(dims, bshape)] + bshape = [Symbol(name=f'b{d.symbolic_size}') for d in dims] + bdims = [ + CustomDimension(name=d.name, parent=d, symbolic_size=s) + for d, s in zip(dims, bshape, strict=True) + ] eqns = [] eqns.extend([Eq(d.symbolic_min, 0) for d in bdims]) @@ -368,16 +370,18 @@ def _make_copy(self, f, hse, key, swap=False): buf = Array(name='buf', dimensions=[vd] + bdims, dtype=f.c0.dtype, padding=0) - mapper = dict(zip(dims, bdims)) - findices = [o - h + mapper.get(d.root, 0) - for d, o, h in zip(f.dimensions, ofs, f._size_nodomain.left)] + mapper = dict(zip(dims, bdims, strict=True)) + findices = [ + o - h + mapper.get(d.root, 0) + for d, o, h in zip(f.dimensions, ofs, f._size_nodomain.left, strict=True) + ] if swap is False: swap = lambda i, j: (i, j) - name = 'gather%s' % key + name = f'gather{key}' else: swap = lambda i, j: (j, i) - name = 'scatter%s' % key + name = f'scatter{key}' if isinstance(f, Bag): for i, c in enumerate(f.components): @@ -410,8 +414,8 @@ def _make_sendrecv(self, f, hse, key, **kwargs): bufs = Array(name='bufs', dimensions=bdims, dtype=f.c0.dtype, padding=0, liveness='eager') - ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions] - ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions] + ofsg = [Symbol(name=f'og{d.root}') for d in f.dimensions] + ofss = [Symbol(name=f'os{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') torank = Symbol(name='torank') @@ -419,9 +423,9 @@ def _make_sendrecv(self, f, hse, key, **kwargs): shape = [d.symbolic_size for d in dims] arguments = [bufg] + shape + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) arguments = [bufs] + shape + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) # The `gather` is unnecessary if sending to MPI.PROC_NULL gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) @@ -445,7 +449,7 @@ def _make_sendrecv(self, f, hse, key, **kwargs): parameters = (list(f.handles) + shape + ofsg + ofss + [fromrank, torank, comm]) - return SendRecv('sendrecv%s' % key, iet, parameters, bufg, bufs) + return SendRecv(f'sendrecv{key}', iet, parameters, bufg, bufs) def _call_sendrecv(self, name, *args, **kwargs): args = list(args[0].handles) + flatten(args[1:]) @@ -456,7 +460,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): nb = distributor._obj_neighborhood comm = distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Build a mapper `(dim, side, region) -> (size, ofs)` for `f`. `size` and # `ofs` are symbolic objects. This mapper tells what data values should be @@ -504,7 +508,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): parameters = list(f.handles) + [comm, nb] + list(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) def _call_haloupdate(self, name, f, hse, *args): comm = f.grid.distributor._obj_comm @@ -566,7 +570,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): nb = distributor._obj_neighborhood comm = distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Only retain the halos required by the Diag scheme # Note: `sorted` is only for deterministic code generation @@ -574,7 +578,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): body = [] for dims, tosides in halos: - mapper = OrderedDict(zip(dims, tosides)) + mapper = OrderedDict(zip(dims, tosides, strict=True)) sizes = [f._C_get_field(OWNED, d, s).size for d, s in mapper.items()] @@ -582,7 +586,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): ofsg = [fixed.get(d, f._C_get_field(OWNED, d, mapper.get(d)).offset) for d in f.dimensions] - mapper = OrderedDict(zip(dims, [i.flip() for i in tosides])) + mapper = OrderedDict(zip(dims, [i.flip() for i in tosides], strict=True)) fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb) ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset) for d in f.dimensions] @@ -596,7 +600,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): parameters = list(f.handles) + [comm, nb] + list(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) class ComputeCall(ElementalCall): @@ -624,7 +628,7 @@ class OverlapHaloExchangeBuilder(DiagHaloExchangeBuilder): def _make_msg(self, f, hse, key): # Only retain the halos required by the Diag scheme halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple)) - return MPIMsg('msg%d' % key, f, halos) + return MPIMsg(f'msg{key}', f, halos) def _make_sendrecv(self, f, hse, key, msg=None): fcast = cast(f.c0.dtype, '*') @@ -633,16 +637,18 @@ def _make_sendrecv(self, f, hse, key, msg=None): bufg = FieldFromPointer(msg._C_field_bufg, msg) bufs = FieldFromPointer(msg._C_field_bufs, msg) - ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions] + ofsg = [Symbol(name=f'og{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') torank = Symbol(name='torank') - sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromPointer(f'{msg._C_field_sizes}[{i}]', msg) + for i in range(len(f._dist_dimensions)) + ] arguments = [fcast(bufg)] + sizes + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) # The `gather` is unnecessary if sending to MPI.PROC_NULL gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) @@ -658,7 +664,7 @@ def _make_sendrecv(self, f, hse, key, msg=None): parameters = list(f.handles) + ofsg + [fromrank, torank, comm, msg] - return SendRecv('sendrecv%s' % key, iet, parameters, bufg, bufs) + return SendRecv(f'sendrecv{key}', iet, parameters, bufg, bufs) def _call_sendrecv(self, name, *args, msg=None, haloid=None): # Drop `sizes` as this HaloExchangeBuilder conveys them through `msg` @@ -682,8 +688,12 @@ def _make_compute(self, hs, key, *args): if hs.body.is_Call: return None else: - return make_efunc('compute%d' % key, hs.body, hs.arguments, - efunc_type=ComputeFunction) + return make_efunc( + f'compute{key}', + hs.body, + hs.arguments, + efunc_type=ComputeFunction + ) def _call_compute(self, hs, compute, *args): if compute is None: @@ -697,14 +707,16 @@ def _make_wait(self, f, hse, key, msg=None): bufs = FieldFromPointer(msg._C_field_bufs, msg) - ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions] + ofss = [Symbol(name=f'os{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') - sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromPointer(f'{msg._C_field_sizes}[{i}]', msg) + for i in range(len(f._dist_dimensions)) + ] arguments = [fcast(bufs)] + sizes + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) # The `scatter` must be guarded as we must not alter the halo values along # the domain boundary, where the sender is actually MPI.PROC_NULL @@ -719,12 +731,12 @@ def _make_wait(self, f, hse, key, msg=None): parameters = (list(f.handles) + ofss + [fromrank, msg]) - return Callable('wait_%s' % key, iet, 'void', parameters, ('static',)) + return Callable(f'wait_{key}', iet, 'void', parameters, ('static',)) def _make_halowait(self, f, hse, key, wait, msg=None): nb = f.grid.distributor._obj_neighborhood - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Only retain the halos required by the Diag scheme # Note: `sorted` is only for deterministic code generation @@ -732,7 +744,7 @@ def _make_halowait(self, f, hse, key, wait, msg=None): body = [] for dims, tosides in halos: - mapper = OrderedDict(zip(dims, [i.flip() for i in tosides])) + mapper = OrderedDict(zip(dims, [i.flip() for i in tosides], strict=True)) fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb) ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset) for d in f.dimensions] @@ -756,7 +768,7 @@ def _call_halowait(self, name, f, hse, msg): def _make_remainder(self, hs, key, callcompute, *args): assert callcompute.is_Call body = [callcompute._rebuild(dynamic_args_mapper=i) for _, i in hs.omapper.owned] - return Remainder.make('remainder%d' % key, body) + return Remainder.make(f'remainder{key}', body) def _call_remainder(self, remainder): efunc = remainder.make_call() @@ -797,7 +809,7 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): fcast = cast(f.c0.dtype, '*') comm = f.grid.distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} dim = Dimension(name='i') @@ -809,15 +821,19 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): fromrank = FieldFromComposite(msg._C_field_from, msgi) torank = FieldFromComposite(msg._C_field_to, msgi) - sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi) - for i in range(len(f._dist_dimensions))] - ofsg = [FieldFromComposite('%s[%d]' % (msg._C_field_ofsg, i), msgi) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromComposite(f'{msg._C_field_sizes}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] + ofsg = [ + FieldFromComposite(f'{msg._C_field_ofsg}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] ofsg = [fixed.get(d) or ofsg.pop(0) for d in f.dimensions] # The `gather` is unnecessary if sending to MPI.PROC_NULL arguments = [fcast(bufg)] + sizes + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) # Make Irecv/Isend @@ -833,7 +849,7 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): ncomms = Symbol(name='ncomms') iet = Iteration([recv, gather, send], dim, ncomms - 1) parameters = f.handles + (comm, msg, ncomms) + tuple(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) def _call_haloupdate(self, name, f, hse, msg): comm = f.grid.distributor._obj_comm @@ -843,7 +859,7 @@ def _call_haloupdate(self, name, f, hse, msg): def _make_halowait(self, f, hse, key, *args, msg=None): fcast = cast(f.c0.dtype, '*') - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} dim = Dimension(name='i') @@ -853,16 +869,20 @@ def _make_halowait(self, f, hse, key, *args, msg=None): fromrank = FieldFromComposite(msg._C_field_from, msgi) - sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi) - for i in range(len(f._dist_dimensions))] - ofss = [FieldFromComposite('%s[%d]' % (msg._C_field_ofss, i), msgi) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromComposite(f'{msg._C_field_sizes}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] + ofss = [ + FieldFromComposite(f'{msg._C_field_ofss}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] ofss = [fixed.get(d) or ofss.pop(0) for d in f.dimensions] # The `scatter` must be guarded as we must not alter the halo values along # the domain boundary, where the sender is actually MPI.PROC_NULL arguments = [fcast(bufs)] + sizes + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) scatter = Conditional(CondNe(fromrank, Macro('MPI_PROC_NULL')), scatter) rrecv = Byref(FieldFromComposite(msg._C_field_rrecv, msgi)) @@ -904,7 +924,7 @@ def _make_remainder(self, hs, key, callcompute, region): # The -1 below is because an Iteration, by default, generates <= iet = Iteration(iet, dim, region.nregions - 1) - return Remainder.make('remainder%d' % key, iet) + return Remainder.make(f'remainder{key}', iet) class Diag2HaloExchangeBuilder(Overlap2HaloExchangeBuilder): @@ -988,8 +1008,12 @@ def _make_compute(self, hs, key, msgs, callpoke): mapper = {i: List(body=[callpoke, i]) for i in FindNodes(ExpressionBundle).visit(hs.body)} iet = Transformer(mapper).visit(hs.body) - return make_efunc('compute%d' % key, iet, hs.arguments, - efunc_type=ComputeFunction) + return make_efunc( + f'compute{key}', + iet, + hs.arguments, + efunc_type=ComputeFunction + ) def _make_poke(self, hs, key, msgs): lflag = Symbol(name='lflag') @@ -1017,7 +1041,11 @@ def _make_poke(self, hs, key, msgs): body.append(Return(gflag)) - return make_efunc('pokempi%d' % key, List(body=body), retval='int') + return make_efunc( + f'pokempi{key}', + List(body=body), + retval='int' + ) def _call_poke(self, poke): return Prodder(poke.name, poke.parameters, single_thread=True, periodic=True) @@ -1219,7 +1247,7 @@ def _arg_defaults(self, alias, args=None): # Buffer shape for this peer shape = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: shape.append(getattr(f._size_owned[dim], side.name)) except AttributeError: @@ -1292,7 +1320,7 @@ def _arg_defaults(self, alias=None, args=None): # `torank` peer + gather offsets entry.torank = neighborhood[halo.side] ofsg = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: v = getattr(f._offset_owned[dim], side.name) ofsg.append(self._as_number(v, args)) @@ -1304,7 +1332,7 @@ def _arg_defaults(self, alias=None, args=None): # `fromrank` peer + scatter offsets entry.fromrank = neighborhood[tuple(i.flip() for i in halo.side)] ofss = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: v = getattr(f._offset_halo[dim], side.flip().name) ofss.append(self._as_number(v, args)) @@ -1342,8 +1370,8 @@ def __init__(self, prefix, key, arguments, owned): # Sorting for deterministic codegen self._arguments = sorted(arguments, key=lambda i: i.name) - name = "%s%d" % (prefix, key) - pname = "region%d" % key + name = f'{prefix}{key}' + pname = f'region{key}' fields = [] for i in self.arguments: diff --git a/devito/operations/interpolators.py b/devito/operations/interpolators.py index 9104fb426c..1ac838c050 100644 --- a/devito/operations/interpolators.py +++ b/devito/operations/interpolators.py @@ -233,7 +233,7 @@ def _rdim(self, subdomain=None): rdims = [] pos = self.sfunction._position_map.values() - for (d, rd, p) in zip(gdims, self._cdim, pos): + for (d, rd, p) in zip(gdims, self._cdim, pos, strict=True): # Add conditional to avoid OOB lb = sympy.And(rd + p >= d.symbolic_min - self.r, evaluate=False) ub = sympy.And(rd + p <= d.symbolic_max + self.r, evaluate=False) @@ -301,21 +301,28 @@ def _interp_idx(self, variables, implicit_dims=None, pos_only=(), subdomain=None mapper = self._rdim(subdomain=subdomain).getters # Index substitution to make in variables - subs = {ki: c + p for ((k, c), p) - in zip(mapper.items(), pos) for ki in {k, k.root}} + subs = { + ki: c + p + for ((k, c), p) in zip(mapper.items(), pos, strict=True) + for ki in {k, k.root} + } idx_subs = {v: v.subs(subs) for v in variables} # Position only replacement, not radius dependent. # E.g src.inject(vp(x)*src) needs to use vp[posx] at all points # not vp[posx + rx] - idx_subs.update({v: v.subs({k: p for (k, p) in zip(mapper, pos)}) - for v in pos_only}) + idx_subs.update({ + v: v.subs({ + k: p + for (k, p) in zip(mapper, pos, strict=True) + }) for v in pos_only + }) return idx_subs, temps @check_radius - def interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): + def interpolate(self, expr, increment=False, self_subs=None, implicit_dims=None): """ Generate equations interpolating an arbitrary expression into ``self``. @@ -330,6 +337,8 @@ def interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): interpolation expression, but that should be honored when constructing the operator. """ + if self_subs is None: + self_subs = {} return Interpolation(expr, increment, implicit_dims, self_subs, self) @check_radius @@ -350,7 +359,7 @@ def inject(self, field, expr, implicit_dims=None): """ return Injection(field, expr, implicit_dims, self) - def _interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): + def _interpolate(self, expr, increment=False, self_subs=None, implicit_dims=None): """ Generate equations interpolating an arbitrary expression into ``self``. @@ -372,6 +381,9 @@ def _interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): # E.g., a generic SymPy expression or a number _expr = expr + if self_subs is None: + self_subs = {} + variables = list(retrieve_function_carriers(_expr)) subdomain = _extract_subdomain(variables) @@ -450,7 +462,7 @@ def _inject(self, field, expr, implicit_dims=None): eqns = [Inc(_field.xreplace(idx_subs), (self._weights(subdomain=subdomain) * _expr).xreplace(idx_subs), implicit_dims=implicit_dims) - for (_field, _expr) in zip(fields, _exprs)] + for (_field, _expr) in zip(fields, _exprs, strict=True)] return temps + eqns @@ -471,7 +483,7 @@ class LinearInterpolator(WeightedInterpolator): def _weights(self, subdomain=None): rdim = self._rdim(subdomain=subdomain) c = [(1 - p) * (1 - r) + p * r - for (p, d, r) in zip(self._point_symbols, self._gdims, rdim)] + for (p, d, r) in zip(self._point_symbols, self._gdims, rdim, strict=True)] return Mul(*c) @cached_property @@ -487,7 +499,7 @@ def _coeff_temps(self, implicit_dims): pmap = self.sfunction._position_map poseq = [Eq(self._point_symbols[d], pos - floor(pos), implicit_dims=implicit_dims) - for (d, pos) in zip(self._gdims, pmap.keys())] + for (d, pos) in zip(self._gdims, pmap.keys(), strict=True)] return poseq @@ -567,8 +579,10 @@ def interpolation_coeffs(self): @memoized_meth def _weights(self, subdomain=None): rdims = self._rdim(subdomain=subdomain) - return Mul(*[w._subs(rd, rd-rd.parent.symbolic_min) - for (rd, w) in zip(rdims, self.interpolation_coeffs)]) + return Mul(*[ + w._subs(rd, rd-rd.parent.symbolic_min) + for (rd, w) in zip(rdims, self.interpolation_coeffs, strict=True) + ]) def _arg_defaults(self, coords=None, sfunc=None): args = {} diff --git a/devito/operations/solve.py b/devito/operations/solve.py index 6545fc4ead..8e8541bf09 100644 --- a/devito/operations/solve.py +++ b/devito/operations/solve.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import singledispatch import sympy @@ -32,10 +33,8 @@ def solve(eq, target, **kwargs): Symbolic optimizations applied while rearranging the equation. For more information. refer to ``sympy.solve.__doc__``. """ - try: + with suppress(AttributeError): eq = eq.lhs - eq.rhs if eq.rhs != 0 else eq.lhs - except AttributeError: - pass eqs, targets = as_tuple(eq), as_tuple(target) if len(eqs) == 0: @@ -43,7 +42,7 @@ def solve(eq, target, **kwargs): return None sols = [] - for e, t in zip(eqs, targets): + for e, t in zip(eqs, targets, strict=True): # Try first linear solver try: sols.append(linsolve(eval_time_derivatives(e), t)) diff --git a/devito/operator/operator.py b/devito/operator/operator.py index a0f3d06410..779106d75a 100644 --- a/devito/operator/operator.py +++ b/devito/operator/operator.py @@ -1,6 +1,7 @@ import ctypes import shutil from collections import OrderedDict, namedtuple +from contextlib import suppress from functools import cached_property from math import ceil from operator import attrgetter @@ -400,10 +401,8 @@ def _lower_clusters(cls, expressions, profiler=None, **kwargs): # Operation count after specialization final_ops = sum(estimate_cost(c.exprs) for c in clusters if c.is_dense) - try: + with suppress(AttributeError): profiler.record_ops_variation(init_ops, final_ops) - except AttributeError: - pass # Generate implicit Clusters from higher level abstractions clusters = generate_implicit(clusters) @@ -468,10 +467,8 @@ def _lower_uiet(cls, stree, profiler=None, **kwargs): uiet = iet_build(stree) # Analyze the IET Sections for C-level profiling - try: + with suppress(AttributeError): profiler.analyze(uiet) - except AttributeError: - pass return uiet @@ -626,11 +623,11 @@ def _prepare_arguments(self, autotune=None, estimate_memory=False, **kwargs): args.update(p._arg_values(estimate_memory=estimate_memory, **kwargs)) try: args.reduce_inplace() - except ValueError: + except ValueError as e: v = [i for i in overrides if i.name in args] raise InvalidArgument( f"Override `{p}` is incompatible with overrides `{v}`" - ) + ) from e # Process data-carrier defaults for p in defaults: @@ -756,10 +753,8 @@ def _known_arguments(self): ret = set() for i in self.input: ret.update(i._arg_names) - try: + with suppress(AttributeError): ret.update(i.grid._arg_names) - except AttributeError: - pass for d in self.dimensions: ret.update(d._arg_names) ret.update(p.name for p in self.parameters) @@ -1009,11 +1004,10 @@ def apply(self, **kwargs): except ctypes.ArgumentError as e: if e.args[0].startswith("argument "): argnum = int(e.args[0][9:].split(':')[0]) - 1 - newmsg = "error in argument '%s' with value '%s': %s" % ( - self.parameters[argnum].name, - arg_values[argnum], - e.args[0]) - raise ctypes.ArgumentError(newmsg) from e + raise ctypes.ArgumentError( + f"error in argument '{self.parameters[argnum].name}' with value" + f" '{arg_values[argnum]}': {e.args[0]}" + ) from e else: raise @@ -1064,7 +1058,7 @@ def _emit_timings(timings, indent=''): _emit_timings(timings, ' * ') if self._profiler._ops: - ops = ['%d --> %d' % i for i in self._profiler._ops] + ops = [f'{i[0]} --> {i[1]}' for i in self._profiler._ops] perf(f"Flops reduction after symbolic optimization: [{' ; '.join(ops)}]") def _emit_apply_profiling(self, args): @@ -1410,12 +1404,12 @@ def _physical_deviceid(self): else: try: return visible_devices[logical_deviceid] - except IndexError: + except IndexError as e: errmsg = (f"A deviceid value of {logical_deviceid} is not valid " f"with {visible_device_var}={visible_devices}. Note that " "deviceid corresponds to the logical index within the " "visible devices, not the physical device index.") - raise ValueError(errmsg) + raise ValueError(errmsg) from e else: return None @@ -1444,10 +1438,9 @@ def nbytes_avail_mapper(self): mapper[host_layer] = int(ANYCPU.memavail() / nproc) for layer in (host_layer, device_layer): - try: + with suppress(KeyError): + # Since might not have this layer in the mapper mapper[layer] -= self.nbytes_consumed_operator.get(layer, 0) - except KeyError: # Might not have this layer in the mapper - pass mapper = {k: int(v) for k, v in mapper.items()} @@ -1510,10 +1503,7 @@ def nbytes_consumed_arrays(self): or not i.is_regular: continue - if i.is_regular: - nbytes = i.nbytes - else: - nbytes = i.nbytes_max + nbytes = i.nbytes if i.is_regular else i.nbytes_max v = subs_op_args(nbytes, self) if not is_integer(v): # E.g. the Arrays used to store the MPI halo exchanges diff --git a/devito/operator/profiling.py b/devito/operator/profiling.py index b00c5cf04b..6a82928277 100644 --- a/devito/operator/profiling.py +++ b/devito/operator/profiling.py @@ -196,7 +196,7 @@ def summary(self, args, dtype, reduce_over=None): comm = args.comm summary = PerformanceSummary() - for name, data in self._sections.items(): + for name in self._sections: # Time to run the section time = max(getattr(args[self.name]._obj, name), 10e-7) @@ -275,7 +275,7 @@ def _allgather_from_comm(self, comm, time, ops, points, traffic, sops, itershape sops = [sops]*comm.size itershapess = comm.allgather(itershapes) - return list(zip(times, opss, pointss, traffics, sops, itershapess)) + return list(zip(times, opss, pointss, traffics, sops, itershapess, strict=True)) # Override basic summary so that arguments other than runtime are computed. def summary(self, args, dtype, reduce_over=None): @@ -318,7 +318,7 @@ def summary(self, args, dtype, reduce_over=None): # Same as above but without setup overheads (e.g., host-device # data transfers) mapper = defaultdict(list) - for (name, rank), v in summary.items(): + for (name, _), v in summary.items(): mapper[name].append(v.time) reduce_over_nosetup = sum(max(i) for i in mapper.values()) if reduce_over_nosetup == 0: @@ -460,10 +460,7 @@ def add_glb_vanilla(self, key, time): gflops = float(ops)/10**9 gflopss = gflops/time - if np.isnan(traffic) or traffic == 0: - oi = None - else: - oi = float(ops/traffic) + oi = None if np.isnan(traffic) or traffic == 0 else float(ops / traffic) self.globals[key] = PerfEntry(time, gflopss, None, oi, None, None) diff --git a/devito/operator/registry.py b/devito/operator/registry.py index c8aac315b7..35e5646204 100644 --- a/devito/operator/registry.py +++ b/devito/operator/registry.py @@ -45,15 +45,16 @@ def fetch(self, platform=None, mode=None, language='C', **kwargs): mode = 'custom' if language not in OperatorRegistry._languages: - raise ValueError("Unknown language `%s`" % language) + raise ValueError(f"Unknown language `{language}`") for cls in platform._mro(): for (p, m, l), kls in self.items(): if issubclass(p, cls) and m == mode and l == language: return kls - raise InvalidOperator("Cannot compile an Operator for `%s`" - % str((platform, mode, language))) + raise InvalidOperator( + f'Cannot compile an Operator for `{str((platform, mode, language))}`' + ) operator_registry = OperatorRegistry() diff --git a/devito/parameters.py b/devito/parameters.py index f545139649..0412380533 100644 --- a/devito/parameters.py +++ b/devito/parameters.py @@ -197,7 +197,7 @@ def init_configuration(configuration=configuration, env_vars_mapper=env_vars_map try: items = v.split(';') # Env variable format: 'var=k1:v1;k2:v2:k3:v3:...' - keys, values = zip(*[i.split(':') for i in items]) + keys, values = zip(*[i.split(':') for i in items], strict=True) # Casting values = [eval(i) for i in values] except AttributeError: @@ -215,7 +215,7 @@ def init_configuration(configuration=configuration, env_vars_mapper=env_vars_map except (TypeError, ValueError): keys[i] = j if len(keys) == len(values): - configuration.update(k, dict(zip(keys, values))) + configuration.update(k, dict(zip(keys, values, strict=True))) elif len(keys) == 1: configuration.update(k, keys[0]) else: @@ -269,7 +269,7 @@ def __enter__(self): configuration[k] = v def __exit__(self, exc_type, exc_val, traceback): - for k, v in self.params.items(): + for k in self.params: try: configuration[k] = self.previous[k] except ValueError: diff --git a/devito/passes/__init__.py b/devito/passes/__init__.py index f4ac2783c3..c92c64481e 100644 --- a/devito/passes/__init__.py +++ b/devito/passes/__init__.py @@ -30,7 +30,7 @@ def is_on_device(obj, gpu_fit): if isinstance(f, TimeFunction) and is_integer(f.save)] if 'all-fallback' in gpu_fit and fsave: - warning("TimeFunction %s assumed to fit the GPU memory" % fsave) + warning(f"TimeFunction {fsave} assumed to fit the GPU memory") return True return all(f in gpu_fit for f in fsave) diff --git a/devito/passes/clusters/aliases.py b/devito/passes/clusters/aliases.py index 8c33913326..f0c25a74be 100644 --- a/devito/passes/clusters/aliases.py +++ b/devito/passes/clusters/aliases.py @@ -92,10 +92,7 @@ def cire(clusters, mode, sregistry, options, platform): # NOTE: Handle prematurely expanded derivatives -- current default on # several backends, but soon to become legacy if mode == 'sops': - if options['expand']: - mode = 'eval-derivs' - else: - mode = 'index-derivs' + mode = 'eval-derivs' if options['expand'] else 'index-derivs' for cls in modes[mode]: transformer = cls(sregistry, options, platform) @@ -207,7 +204,7 @@ def _do_generate(self, exprs, exclude, cbk_search, cbk_compose=None): Carry out the bulk of the work of ``_generate``. """ counter = generator() - make = lambda: Symbol(name='dummy%d' % counter(), dtype=np.float32) + make = lambda: Symbol(name=f'dummy{counter()}', dtype=np.float32) if cbk_compose is None: cbk_compose = lambda *args: None @@ -376,7 +373,9 @@ def _generate(self, cgroup, exclude): candidates = sorted(grank, reverse=True)[:2] for i in candidates: lower_pri_elems = flatten([grank[j] for j in candidates if j != i]) - cbk_search = lambda e: self._cbk_search2(e, grank[i] + lower_pri_elems) + cbk_search = lambda e: self._cbk_search2( + e, grank[i] + lower_pri_elems # noqa: B023 + ) yield self._do_generate(exprs, exclude, cbk_search, self._cbk_compose) def _lookup_key(self, c): @@ -386,11 +385,11 @@ def _select(self, variants): if isinstance(self.opt_schedule_strategy, int): try: return variants[self.opt_schedule_strategy] - except IndexError: + except IndexError as e: raise CompilationError( f"Illegal schedule {self.opt_schedule_strategy}; " f"generated {len(variants)} schedules in total" - ) + ) from e return pick_best(variants) @@ -525,10 +524,8 @@ def collect(extracted, ispace, minstorage): unseen.remove(u) group = Group(group, ispace=ispace) - if minstorage: - k = group.dimensions_translated - else: - k = group.dimensions + k = group.dimensions_translated if minstorage else group.dimensions + k = frozenset(d for d in k if not d.is_NonlinearDerived) mapper.setdefault(k, []).append(group) @@ -571,7 +568,7 @@ def collect(extracted, ispace, minstorage): # Heuristic: first try retaining the larger ones smallest = len(min(groups, key=len)) fallback = groups - groups, remainder = split(groups, lambda g: len(g) > smallest) + groups, remainder = split(groups, lambda g: len(g) > smallest) # noqa: B023 if groups: queue.append(remainder) elif len(remainder) > 1: @@ -590,7 +587,7 @@ def collect(extracted, ispace, minstorage): offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels]) for v in c.offsets] subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]] - for i, b, v in zip(c.indexeds, c.bases, offsets)} + for i, b, v in zip(c.indexeds, c.bases, offsets, strict=True)} pivot = uxreplace(c.expr, subs) # Distance of each aliased expression from the basis alias @@ -599,7 +596,9 @@ def collect(extracted, ispace, minstorage): for i in g._items: aliaseds.append(extracted[i.expr]) - distance = [o.distance(v) for o, v in zip(i.offsets, offsets)] + distance = [ + o.distance(v) for o, v in zip(i.offsets, offsets, strict=True) + ] distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)] distances.append(LabeledVector([(d, v.pop()) for d, v in distance])) @@ -726,14 +725,14 @@ def lower_aliases(aliases, meta, maxpar): m = i.dim.symbolic_min - i.dim.parent.symbolic_min else: m = 0 - d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m, + d = dmapper[i.dim] = IncrDimension(f"{i.dim.name}s", i.dim, m, dd.symbolic_size, 1, dd.step) sub_iterators[i.dim] = d else: d = i.dim # Given the iteration `interval`, lower distances to indices - for distance, indices in zip(a.distances, indicess): + for distance, indices in zip(a.distances, indicess, strict=True): v = distance[interval.dim] or 0 try: indices.append(d - interval.lower + v) @@ -797,12 +796,12 @@ def optimize_schedule_rotations(schedule, sregistry): iis = candidate.lower iib = candidate.upper - name = sregistry.make_name(prefix='%sii' % d.root.name) + name = sregistry.make_name(prefix=f'{d.root.name}ii') ii = ModuloDimension(name, ds, iis, incr=iib) - cd = CustomDimension(name='%sc' % d.root.name, symbolic_min=ii, + cd = CustomDimension(name=f'{d.root.name}c', symbolic_min=ii, symbolic_max=iib, symbolic_size=n) - dsi = ModuloDimension('%si' % ds.root.name, cd, cd + ds - iis, n) + dsi = ModuloDimension(f'{ds.root.name}i', cd, cd + ds - iis, n) mapper = OrderedDict() for i in g: @@ -813,11 +812,13 @@ def optimize_schedule_rotations(schedule, sregistry): try: md = mapper[v] except KeyError: - name = sregistry.make_name(prefix='%sr' % d.root.name) + name = sregistry.make_name(prefix=f'{d.root.name}r') md = mapper.setdefault(v, ModuloDimension(name, ds, v, n)) mds.append(md) - indicess = [indices[:ridx] + [md] + indices[ridx + 1:] - for md, indices in zip(mds, i.indicess)] + indicess = [ + indices[:ridx] + [md] + indices[ridx + 1:] + for md, indices in zip(mds, i.indicess, strict=True) + ] # Update `writeto` by switching `d` to `dsi` intervals = k.intervals.switch(d, dsi).zero(dsi) @@ -851,11 +852,8 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, """ Turn a Schedule into a sequence of Clusters. """ - if opt_ftemps: - make = TempFunction - else: - # Typical case -- the user does *not* "see" the CIRE-created temporaries - make = TempArray + # Typical case -- the user does *not* "see" the CIRE-created temporaries + make = TempFunction if opt_ftemps else TempArray clusters = [] subs = {} @@ -887,10 +885,7 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, # Functions to minimize support variables such as strides etc min_halo = {i.dim: Size(abs(i.lower), abs(i.upper)) for i in writeto} - if opt_minmem: - functions = [] - else: - functions = retrieve_functions(pivot) + functions = [] if opt_minmem else retrieve_functions(pivot) halo = dict(min_halo) for f in functions: @@ -906,7 +901,7 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, # The indices used to write into the Array indices = [] - for i, s in zip(writeto, shift): + for i, s in zip(writeto, shift, strict=True): try: # E.g., `xs` sub_iterators = writeto.sub_iterators[i.dim] @@ -921,7 +916,9 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, shift=shift) expression = Eq(obj[indices], uxreplace(pivot, subs)) - callback = lambda idx: obj[[i + s for i, s in zip(idx, shift)]] + callback = lambda idx: obj[ # noqa: B023 + [i + s for i, s in zip(idx, shift, strict=True)] # noqa: B023 + ] else: # Degenerate case: scalar expression assert writeto.size == 0 @@ -930,11 +927,13 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, obj = Temp(name=name, dtype=dtype) expression = Eq(obj, uxreplace(pivot, subs)) - callback = lambda idx: obj + callback = lambda idx: obj # noqa: B023 # Create the substitution rules for the aliasing expressions - subs.update({aliased: callback(indices) - for aliased, indices in zip(aliaseds, indicess)}) + subs.update({ + aliased: callback(indices) + for aliased, indices in zip(aliaseds, indicess, strict=True) + }) properties = dict(meta.properties) @@ -1094,8 +1093,8 @@ def __new__(cls, items, ispace=None): processed.append(c) continue - f0 = lambda e: minimum(e, sdims) - f1 = lambda e: maximum(e, sdims) + f0 = lambda e: minimum(e, sdims) # noqa: B023 + f1 = lambda e: maximum(e, sdims) # noqa: B023 for f in (f0, f1): expr = f(c.expr) @@ -1112,7 +1111,7 @@ def __new__(cls, items, ispace=None): return obj def __repr__(self): - return "Group(%s)" % ", ".join([str(i) for i in self]) + return "Group({})".format(", ".join([str(i) for i in self])) def find_rotation_distance(self, d, interval): """ @@ -1137,7 +1136,10 @@ def find_rotation_distance(self, d, interval): @cached_property def Toffsets(self): - return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])] + return [ + LabeledVector.transpose(*i) + for i in zip(*[i.offsets for i in self], strict=True) + ] @cached_property def diameter(self): @@ -1152,18 +1154,18 @@ def diameter(self): continue try: distance = int(max(v) - min(v)) - except TypeError: + except TypeError as e: # An entry in `v` has symbolic components, e.g. `x_m + 2` if len(set(v)) == 1: continue else: - # Worst-case scenario, we raraly end up here + # Worst-case scenario, we rarely end up here # Resort to the fast vector-based comparison machinery # (rather than the slower sympy.simplify) items = [Vector(i) for i in v] distance, = vmax(*items) - vmin(*items) if not is_integer(distance): - raise ValueError + raise ValueError('Distrance is not an integer') from e ret[d] = max(ret[d], distance) return ret @@ -1213,7 +1215,7 @@ def _pivot_legal_rotations(self): assert distance == mini - rotation.upper distances.append(distance) - ret[d] = list(zip(m, distances)) + ret[d] = list(zip(m, distances, strict=True)) return ret @@ -1227,7 +1229,7 @@ def _pivot_min_intervals(self): ret = defaultdict(lambda: [np.inf, -np.inf]) for i in self: - distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)] + distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets, strict=True)] distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)] for d, v in distance: @@ -1252,7 +1254,7 @@ def _pivot_legal_shifts(self): c = self.pivot ret = defaultdict(lambda: (-np.inf, np.inf)) - for i, ofs in zip(c.indexeds, c.offsets): + for i, ofs in zip(c.indexeds, c.offsets, strict=True): f = i.function for l in ofs.labels: @@ -1294,7 +1296,7 @@ def __init__(self, pivot, aliaseds, intervals, distances, score): self.score = score def __repr__(self): - return "Alias<<%s>>" % self.pivot + return f"Alias<<{self.pivot}>>" @property def free_symbols(self): @@ -1335,7 +1337,7 @@ def __init__(self, aliases=None): def __repr__(self): if self._list: - return "AliasList<\n %s\n>" % ",\n ".join(str(i) for i in self._list) + return "AliasList<\n {}\n>".format(",\n ".join(str(i) for i in self._list)) else: return "<>" @@ -1343,8 +1345,7 @@ def __len__(self): return self._list.__len__() def __iter__(self): - for i in self._list: - yield i + yield from self._list def add(self, pivot, aliaseds, intervals, distances, score): assert len(aliaseds) == len(distances) @@ -1396,7 +1397,7 @@ def cost(self): # Not just the sum for the individual items' cost! There might be # redundancies, which we factor out here... counter = generator() - make = lambda _: Symbol(name='dummy%d' % counter(), dtype=np.float32) + make = lambda _: Symbol(name=f'dummy{counter()}', dtype=np.float32) tot = 0 for v in as_mapper(self, lambda i: i.ispace).values(): @@ -1427,7 +1428,7 @@ def cit(ispace0, ispace1): The Common IterationIntervals of two IterationSpaces. """ found = [] - for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals): + for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals, strict=True): if it0 == it1: found.append(it0) else: diff --git a/devito/passes/clusters/asynchrony.py b/devito/passes/clusters/asynchrony.py index 8ee7792068..e32190ddef 100644 --- a/devito/passes/clusters/asynchrony.py +++ b/devito/passes/clusters/asynchrony.py @@ -249,10 +249,7 @@ def _actions_from_update_memcpy(c, d, clusters, actions, sregistry): else: assert tindex0.is_Modulo mapper = {(i.offset % i.modulo): i for i in c.sub_iterators[pd]} - if direction is Forward: - toffset = tindex0.offset + 1 - else: - toffset = tindex0.offset - 1 + toffset = tindex0.offset + 1 if direction is Forward else tindex0.offset - 1 try: tindex = mapper[toffset % tindex0.modulo] except KeyError: @@ -271,10 +268,7 @@ def _actions_from_update_memcpy(c, d, clusters, actions, sregistry): # Turn `c` into a prefetch Cluster `pc` expr = uxreplace(e, {tindex0: tindex, fetch: findex}) - if tindex is not tindex0: - ispace = c.ispace.augment({pd: tindex}) - else: - ispace = c.ispace + ispace = c.ispace.augment({pd: tindex}) if tindex is not tindex0 else c.ispace guard0 = c.guards.get(d, true)._subs(fetch, findex) guard1 = GuardBoundNext(function.indices[d], direction) diff --git a/devito/passes/clusters/blocking.py b/devito/passes/clusters/blocking.py index b279f5779c..d04962a52c 100644 --- a/devito/passes/clusters/blocking.py +++ b/devito/passes/clusters/blocking.py @@ -115,7 +115,7 @@ def _has_data_reuse(self, cluster): # If we are going to skew, then we might exploit reuse along an # otherwise SEQUENTIAL Dimension - if self.skewing: + if self.skewing: # noqa: SIM103 return True return False @@ -335,10 +335,7 @@ def __init__(self, sregistry, options): def process(self, clusters): # A tool to unroll the explicit integer block shapes, should there be any - if self.par_tile: - blk_size_gen = BlockSizeGenerator(self.par_tile) - else: - blk_size_gen = None + blk_size_gen = BlockSizeGenerator(self.par_tile) if self.par_tile else None return self._process_fdta(clusters, 1, blk_size_gen=blk_size_gen) @@ -558,12 +555,9 @@ def next(self, prefix, d, clusters): self.umt_small.iter() return self.umt_small.next() - if x: - item = self.umt.curitem() - else: - # We can't `self.umt.iter()` because we might still want to - # fallback to `self.umt_small` - item = self.umt.nextitem() + # We can't `self.umt.iter()` because we might still want to + # fallback to `self.umt_small` + item = self.umt.curitem() if x else self.umt.nextitem() # Handle user-provided rules # TODO: This is also rudimentary diff --git a/devito/passes/clusters/buffering.py b/devito/passes/clusters/buffering.py index 6d96de77bc..22f9004ee7 100644 --- a/devito/passes/clusters/buffering.py +++ b/devito/passes/clusters/buffering.py @@ -97,10 +97,7 @@ def key(f): assert callable(key) v1 = kwargs.get('opt_init_onwrite', False) - if callable(v1): - init_onwrite = v1 - else: - init_onwrite = lambda f: v1 + init_onwrite = v1 if callable(v1) else lambda f: v1 options = dict(options) options.update({ @@ -182,7 +179,7 @@ def callback(self, clusters, prefix): # If a buffer is read but never written, then we need to add # an Eq to step through the next slot # E.g., `ub[0, x] = usave[time+2, x]` - for b, v in descriptors.items(): + for _, v in descriptors.items(): if not v.is_readonly: continue if c is not v.firstread: @@ -225,7 +222,7 @@ def callback(self, clusters, prefix): # Append the copy-back if `c` is the last-write of some buffers # E.g., `usave[time+1, x] = ub[t1, x]` - for b, v in descriptors.items(): + for _, v in descriptors.items(): if v.is_readonly: continue if c is not v.lastwrite: @@ -278,7 +275,7 @@ def _optimize(self, clusters, descriptors): # "buffer-wise" splitting of the IterationSpaces (i.e., only # relevant if there are at least two read-only buffers) stamp = Stamp() - key0 = lambda: stamp + key0 = lambda: stamp # noqa: B023 else: continue @@ -288,7 +285,7 @@ def _optimize(self, clusters, descriptors): processed.append(c) continue - key1 = lambda d: not d._defines & v.dim._defines + key1 = lambda d: not d._defines & v.dim._defines # noqa: B023 dims = c.ispace.project(key1).itdims ispace = c.ispace.lift(dims, key0()) processed.append(c.rebuild(ispace=ispace)) @@ -384,10 +381,11 @@ def generate_buffers(clusters, key, sregistry, options, **kwargs): if async_degree is not None: if async_degree < size: - warning("Ignoring provided asynchronous degree as it'd be " - "too small for the required buffer (provided %d, " - "but need at least %d for `%s`)" - % (async_degree, size, f.name)) + warning( + 'Ignoring provided asynchronous degree as it would be ' + f'too small for the required buffer (provided {async_degree}, ' + f'but need at least {size} for `{f.name}`)' + ) else: size = async_degree @@ -405,7 +403,7 @@ def generate_buffers(clusters, key, sregistry, options, **kwargs): # Finally create the actual buffer cls = callback or Array - name = sregistry.make_name(prefix='%sb' % f.name) + name = sregistry.make_name(prefix=f'{f.name}b') # We specify the padding to match the input Function's one, so that # the array can be used in place of the Function with valid strides # Plain Array do not track mapped so we default to no padding @@ -445,7 +443,7 @@ def __init__(self, f, b, clusters): self.indices = extract_indices(f, self.dim, clusters) def __repr__(self): - return "Descriptor[%s -> %s]" % (self.f, self.b) + return f"Descriptor[{self.f} -> {self.b}]" @property def size(self): @@ -564,7 +562,7 @@ def write_to(self): # Analogous to the above, we need to include the halo region as well ihalo = IntervalGroup([ Interval(i.dim, -h.left, h.right, i.stamp) - for i, h in zip(ispace, self.b._size_halo) + for i, h in zip(ispace, self.b._size_halo, strict=True) ]) ispace = IterationSpace.union(ispace, IterationSpace(ihalo)) @@ -580,10 +578,7 @@ def step_to(self): # May be `db0` (e.g., for double buffering) or `time` dim = self.ispace[self.dim].dim - if self.is_forward_buffering: - direction = Forward - else: - direction = Backward + direction = Forward if self.is_forward_buffering else Backward return self.write_to.switch(self.xd, dim, direction) @@ -669,7 +664,7 @@ def make_mds(descriptors, prefix, sregistry): # follows SymPy's index ordering (time, time-1, time+1) after modulo # replacement, so that associativity errors are consistent. This very # same strategy is also applied in clusters/algorithms/Stepper - key = lambda i: -np.inf if i - p == 0 else (i - p) + key = lambda i: -np.inf if i - p == 0 else (i - p) # noqa: B023 indices = sorted(v.indices, key=key) for i in indices: @@ -804,8 +799,9 @@ def offset_from_centre(d, indices): if not ((p - v).is_Integer or (p - v).is_Symbol): raise ValueError except (IndexError, ValueError): - raise NotImplementedError("Cannot apply buffering with nonlinear " - "index functions (found `%s`)" % v) + raise NotImplementedError( + f'Cannot apply buffering with nonlinear index functions (found `{v}`)' + ) from None try: # Start assuming e.g. `indices = [time - 1, time + 2]` diff --git a/devito/passes/clusters/cse.py b/devito/passes/clusters/cse.py index 025acdfdee..d4d7f0a8b8 100644 --- a/devito/passes/clusters/cse.py +++ b/devito/passes/clusters/cse.py @@ -353,10 +353,7 @@ def catch(exprs, mode): candidates = [] for k, v in mapper.items(): - if mode in ('basic', 'smartsort'): - sources = [i for i in v if i == k.expr] - else: - sources = v + sources = [i for i in v if i == k.expr] if mode in ('basic', 'smartsort') else v if len(sources) > 1: candidates.append(Candidate(k.expr, k.conditionals, sources)) diff --git a/devito/passes/clusters/derivatives.py b/devito/passes/clusters/derivatives.py index 47607ae306..940d241343 100644 --- a/devito/passes/clusters/derivatives.py +++ b/devito/passes/clusters/derivatives.py @@ -48,10 +48,7 @@ def dump(exprs, c): for e in c.exprs: # Optimization 1: if the LHS is already a Symbol, then surely it's # usable as a temporary for one of the IndexDerivatives inside `e` - if e.lhs.is_Symbol and e.operation is None: - reusable = {e.lhs} - else: - reusable = set() + reusable = {e.lhs} if e.lhs.is_Symbol and e.operation is None else set() expr, v = _core(e, c, c.ispace, weights, reusable, mapper, **kwargs) @@ -110,7 +107,7 @@ def _(expr, c, ispace, weights, reusables, mapper, **kwargs): cbk0 = deriv_schedule_registry[options['deriv-schedule']] cbk1 = deriv_unroll_registry[options['deriv-unroll']] except KeyError: - raise ValueError("Unknown derivative lowering mode") + raise ValueError("Unknown derivative lowering mode") from None # Lower the IndexDerivative init, ideriv = cbk0(expr) diff --git a/devito/passes/clusters/factorization.py b/devito/passes/clusters/factorization.py index 812dc9c180..3d157048c3 100644 --- a/devito/passes/clusters/factorization.py +++ b/devito/passes/clusters/factorization.py @@ -56,7 +56,9 @@ def collect_special(expr, strategy): Factorize elemental functions, pows, and other special symbolic objects, prioritizing the most expensive entities. """ - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) candidates = ReducerMap.fromdicts(*candidates) funcs = candidates.getall('funcs', []) @@ -168,11 +170,9 @@ def collect_const(expr): # Back to the running example # -> (a + c) add = Add(*v) - if add == 0: - mul = S.Zero - else: - # -> 3.*(a + c) - mul = Mul(k, add, evaluate=False) + + # -> 3.*(a + c) + mul = S.Zero if add == 0 else Mul(k, add, evaluate=False) terms.append(mul) @@ -200,7 +200,9 @@ def _collect_nested(expr, strategy): return expr, {'coeffs': expr} elif q_routine(expr): # E.g., a DefFunction - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) return expr.func(*args, evaluate=False), {} elif expr.is_Function: return expr, {'funcs': expr} @@ -212,7 +214,9 @@ def _collect_nested(expr, strategy): elif expr.is_Add: return strategies[strategy](expr, strategy), {} elif expr.is_Mul: - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) expr = reuse_if_untouched(expr, args, evaluate=True) return expr, ReducerMap.fromdicts(*candidates) elif expr.is_Equality: @@ -220,7 +224,9 @@ def _collect_nested(expr, strategy): expr = reuse_if_untouched(expr, (expr.lhs, rhs)) return expr, {} else: - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) return expr.func(*args), ReducerMap.fromdicts(*candidates) diff --git a/devito/passes/clusters/implicit.py b/devito/passes/clusters/implicit.py index af19e1d3c5..36058f6393 100644 --- a/devito/passes/clusters/implicit.py +++ b/devito/passes/clusters/implicit.py @@ -225,8 +225,10 @@ def _lower_msd(dim, cluster): @_lower_msd.register(MultiSubDimension) def _(dim, cluster): i_dim = dim.implicit_dimension - mapper = {tkn: dim.functions[i_dim, mM] - for tkn, mM in zip(dim.tkns, dim.bounds_indices)} + mapper = { + tkn: dim.functions[i_dim, mM] + for tkn, mM in zip(dim.tkns, dim.bounds_indices, strict=True) + } return mapper, i_dim @@ -258,10 +260,7 @@ def reduce(m0, m1, edims, prefix): raise NotImplementedError d, = edims - if prefix[d].direction is Forward: - func = max - else: - func = min + func = max if prefix[d].direction is Forward else min def key(i): try: diff --git a/devito/passes/clusters/misc.py b/devito/passes/clusters/misc.py index d2b4a2c9f2..0324f03eae 100644 --- a/devito/passes/clusters/misc.py +++ b/devito/passes/clusters/misc.py @@ -100,10 +100,7 @@ def callback(self, clusters, prefix): # Lifted scalar clusters cannot be guarded # as they would not be in the scope of the guarded clusters # unless the guard is for an outer dimension - if c.is_scalar and not (prefix[:-1] and c.guards): - guards = {} - else: - guards = c.guards + guards = {} if c.is_scalar and not (prefix[:-1] and c.guards) else c.guards lifted.append(c.rebuild(ispace=ispace, properties=properties, guards=guards)) @@ -144,7 +141,7 @@ def callback(self, cgroups, prefix): # Fusion processed = [] - for k, group in groupby(clusters, key=self._key): + for _, group in groupby(clusters, key=self._key): g = list(group) for maybe_fusible in self._apply_heuristics(g): @@ -348,7 +345,7 @@ def is_cross(source, sink): # True if a cross-ClusterGroup dependence, False otherwise t0 = source.timestamp t1 = sink.timestamp - v = len(cg0.exprs) + v = len(cg0.exprs) # noqa: B023 return t0 < v <= t1 or t1 < v <= t0 for n1, cg1 in enumerate(cgroups[n+1:], start=n+1): @@ -369,10 +366,12 @@ def is_cross(source, sink): # Any anti- and iaw-dependences impose that `cg1` follows `cg0` # and forbid any sort of fusion. Fences have the same effect - elif (any(scope.d_anti_gen()) or - any(i.is_iaw for i in scope.d_output_gen()) or - any(c.is_fence for c in flatten(cgroups[n:n1+1]))) or any(not (i.cause and i.cause & prefix) - for i in scope.d_flow_gen()) or any(scope.d_output_gen()): + elif ( + any(scope.d_anti_gen()) or + any(i.is_iaw for i in scope.d_output_gen()) or + any(c.is_fence for c in flatten(cgroups[n:n1+1])) + ) or any(not (i.cause and i.cause & prefix) for i in scope.d_flow_gen()) \ + or any(scope.d_output_gen()): dag.add_edge(cg0, cg1) return dag @@ -397,7 +396,7 @@ def fuse(clusters, toposort=False, options=None): nxt = clusters while True: nxt = fuse(clusters, toposort='nofuse', options=options) - if all(c0 is c1 for c0, c1 in zip(clusters, nxt)): + if all(c0 is c1 for c0, c1 in zip(clusters, nxt, strict=True)): break clusters = nxt clusters = fuse(clusters, toposort=False, options=options) diff --git a/devito/passes/equations/linearity.py b/devito/passes/equations/linearity.py index 9c5bfc1d28..d2914769be 100644 --- a/devito/passes/equations/linearity.py +++ b/devito/passes/equations/linearity.py @@ -1,4 +1,5 @@ from collections import Counter +from contextlib import suppress from functools import singledispatch from itertools import product @@ -58,10 +59,8 @@ def inspect(expr): m = inspect(a) mapper.update(m) - try: + with suppress(KeyError): counter.update(m[a]) - except KeyError: - pass mapper[expr] = counter diff --git a/devito/passes/iet/asynchrony.py b/devito/passes/iet/asynchrony.py index 9d0c3387c8..aa205e818c 100644 --- a/devito/passes/iet/asynchrony.py +++ b/devito/passes/iet/asynchrony.py @@ -160,7 +160,7 @@ def _(iet, key=None, tracker=None, sregistry=None, **kwargs): wrap = While(CondNe(FieldFromPointer(sdata.symbolic_flag, sbase), 0), wrap) # pthread functions expect exactly one argument of type void* - tparameter = Pointer(name='_%s' % sdata.name) + tparameter = Pointer(name=f'_{sdata.name}') # Unpack `sdata` unpacks = [PointerCast(sdata, tparameter), BlankLine] @@ -184,7 +184,7 @@ def _(iet, key=None, tracker=None, sregistry=None, **kwargs): callback = lambda body: Iteration(list(body) + footer, d, threads.size - 1) # Create an efunc to initialize `sdata` and tear up the pthreads - name = 'init_%s' % sdata.name + name = f'init_{sdata.name}' body = [] for i in sdata.cfields: if i.is_AbstractFunction: @@ -230,7 +230,7 @@ def inject_async_tear_updown(iet, tracker=None, **kwargs): tearup = [] teardown = [] - for sdata, threads, init, shutdown in tracker.values(): + for _, threads, init, shutdown in tracker.values(): # Tear-up arguments = list(init.parameters) for n, a in enumerate(list(arguments)): diff --git a/devito/passes/iet/definitions.py b/devito/passes/iet/definitions.py index 0a5416d6a2..29cc8c9787 100644 --- a/devito/passes/iet/definitions.py +++ b/devito/passes/iet/definitions.py @@ -98,10 +98,7 @@ def _alloc_object_on_low_lat_mem(self, site, obj, storage): """ decl = Definition(obj) - if obj._C_init: - definition = (decl, obj._C_init) - else: - definition = (decl) + definition = (decl, obj._C_init) if obj._C_init else (decl) frees = obj._C_free @@ -130,7 +127,7 @@ def _alloc_array_on_global_mem(self, site, obj, storage): return # Create input array - name = '%s_init' % obj.name + name = f'{obj.name}_init' initvalue = np.array([unevaluate(pow_to_mul(i)) for i in obj.initvalue]) src = Array(name=name, dtype=obj.dtype, dimensions=obj.dimensions, space='host', scope='stack', initvalue=initvalue) @@ -693,7 +690,9 @@ def process(self, graph): def make_zero_init(obj, rcompile, sregistry): cdims = [] - for d, (h0, h1), s in zip(obj.dimensions, obj._size_halo, obj.symbolic_shape): + for d, (h0, h1), s in zip( + obj.dimensions, obj._size_halo, obj.symbolic_shape, strict=True + ): if d.is_NonlinearDerived: assert h0 == h1 == 0 m = 0 diff --git a/devito/passes/iet/engine.py b/devito/passes/iet/engine.py index d119fbb664..5a383332e3 100644 --- a/devito/passes/iet/engine.py +++ b/devito/passes/iet/engine.py @@ -1,4 +1,5 @@ from collections import defaultdict +from contextlib import suppress from functools import partial, singledispatch, wraps import numpy as np @@ -118,11 +119,9 @@ def sync_mapper(self): continue for j in dag.all_predecessors(i.name): - try: + with suppress(KeyError): + # In the case where `j` is a foreign Callable v.extend(FindNodes(Iteration).visit(self.efuncs[j])) - except KeyError: - # `j` is a foreign Callable - pass return found @@ -217,10 +216,7 @@ def iet_pass(func): @wraps(func) def wrapper(*args, **kwargs): - if timed_pass.is_enabled(): - maybe_timed = timed_pass - else: - maybe_timed = lambda func, name: func + maybe_timed = timed_pass if timed_pass.is_enabled() else lambda func, name: func try: # If the pass has been disabled, skip it if not kwargs['options'][func.__name__]: @@ -316,7 +312,7 @@ def reuse_compounds(efuncs, sregistry=None): mapper.update({i0: i1, b0: b1}) - for f0, f1 in zip(i0.fields, i1.fields): + for f0, f1 in zip(i0.fields, i1.fields, strict=True): for cls in (FieldFromComposite, FieldFromPointer): if f0.is_AbstractFunction: mapper[cls(f0._C_symbol, b0)] = cls(f1._C_symbol, b1) @@ -395,7 +391,7 @@ def abstract_component_accesses(efuncs): f_flatten = f.func(name='flat_data', components=f.c0) subs = {} - for ca, o in zip(compaccs, compoff_params): + for ca, o in zip(compaccs, compoff_params, strict=True): indices = [Mul(arity_param, i, evaluate=False) for i in ca.indices] indices[-1] += o subs[ca] = f_flatten.indexed[indices] @@ -626,7 +622,7 @@ def _(i, mapper, sregistry): name0 = pp.name base = sregistry.make_name(prefix=name0) - name1 = sregistry.make_name(prefix='%s_blk' % base) + name1 = sregistry.make_name(prefix=f'{base}_blk') bd = i.parent._rebuild(name1, pp) d = i._rebuild(name0, bd, i._min.subs(p, bd), i._max.subs(p, bd)) diff --git a/devito/passes/iet/langbase.py b/devito/passes/iet/langbase.py index da28115d91..d4d00f6e8a 100644 --- a/devito/passes/iet/langbase.py +++ b/devito/passes/iet/langbase.py @@ -1,4 +1,3 @@ -from abc import ABC from functools import singledispatch from itertools import takewhile @@ -30,7 +29,7 @@ class LangMeta(type): def __getitem__(self, k): if k not in self.mapper: - raise NotImplementedError("Missing required mapping for `%s`" % k) + raise NotImplementedError(f"Missing required mapping for `{k}`") return self.mapper[k] def get(self, k, v=None): @@ -149,7 +148,7 @@ def _map_delete(cls, f, imask=None, devicerm=None): raise NotImplementedError -class LangTransformer(ABC): +class LangTransformer: """ Abstract base class defining a series of methods capable of specializing @@ -473,13 +472,13 @@ def _(iet): if objcomm is not None: body = _make_setdevice_mpi(iet, objcomm, nodes=lang_init) - header = c.Comment('Beginning of %s+MPI setup' % self.langbb['name']) - footer = c.Comment('End of %s+MPI setup' % self.langbb['name']) + header = c.Comment(f'Beginning of {self.langbb["name"]}+MPI setup') + footer = c.Comment(f'End of {self.langbb["name"]}+MPI setup') else: body = _make_setdevice_seq(iet, nodes=lang_init) - header = c.Comment('Beginning of %s setup' % self.langbb['name']) - footer = c.Comment('End of %s setup' % self.langbb['name']) + header = c.Comment(f'Beginning of {self.langbb["name"]} setup') + footer = c.Comment(f'End of {self.langbb["name"]} setup') init = List(header=header, body=body, footer=footer) iet = iet._rebuild(body=iet.body._rebuild(init=init)) @@ -543,7 +542,7 @@ def make_sections_from_imask(f, imask=None): datashape = infer_transfer_datashape(f, imask) sections = [] - for i, j in zip(imask, datashape): + for i, j in zip(imask, datashape, strict=False): if i is FULL: start, size = 0, j else: diff --git a/devito/passes/iet/languages/CXX.py b/devito/passes/iet/languages/CXX.py index 5453ea58d6..c05ac37822 100644 --- a/devito/passes/iet/languages/CXX.py +++ b/devito/passes/iet/languages/CXX.py @@ -12,13 +12,11 @@ __all__ = ['CXXBB', 'CXXDataManager', 'CXXOrchestrator'] -def std_arith(prefix=None): +def std_arith(prefix=''): if prefix: # Method definition prefix, e.g. "__host__" # Make sure there is a space between the prefix and the method name - prefix = prefix if prefix.endswith(" ") else f"{prefix} " - else: - prefix = "" + prefix = prefix if prefix.endswith(' ') else f'{prefix} ' return f""" #include diff --git a/devito/passes/iet/languages/openacc.py b/devito/passes/iet/languages/openacc.py index cb924845de..0d860a6dc9 100644 --- a/devito/passes/iet/languages/openacc.py +++ b/devito/passes/iet/languages/openacc.py @@ -33,9 +33,9 @@ def _make_clauses(cls, ncollapsed=0, reduction=None, tile=None, **kwargs): if tile: stile = [str(tile[i]) for i in range(ncollapsed)] - clauses.append('tile(%s)' % ','.join(stile)) + clauses.append('tile({})'.format(','.join(stile))) elif ncollapsed > 1: - clauses.append('collapse(%d)' % ncollapsed) + clauses.append(f'collapse({ncollapsed})') if reduction: clauses.append(cls._make_clause_reduction_from_imask(reduction)) @@ -49,10 +49,10 @@ def _make_clauses(cls, ncollapsed=0, reduction=None, tile=None, **kwargs): # The NVC 20.7 and 20.9 compilers have a bug which triggers data movement for # indirectly indexed arrays (e.g., a[b[i]]) unless a present clause is used if presents: - clauses.append("present(%s)" % ",".join(presents)) + clauses.append("present({})".format(",".join(presents))) if deviceptrs: - clauses.append("deviceptr(%s)" % ",".join(deviceptrs)) + clauses.append("deviceptr({})".format(",".join(deviceptrs))) return clauses diff --git a/devito/passes/iet/languages/openmp.py b/devito/passes/iet/languages/openmp.py index e7027ac272..32ace9d473 100644 --- a/devito/passes/iet/languages/openmp.py +++ b/devito/passes/iet/languages/openmp.py @@ -45,8 +45,8 @@ class OmpRegion(ParallelBlock): @classmethod def _make_header(cls, nthreads, private=None): - private = ('private(%s)' % ','.join(private)) if private else '' - return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private)) + private = ('private({})'.format(','.join(private))) if private else '' + return c.Pragma(f'omp parallel num_threads({nthreads.name}) {private}') class OmpIteration(PragmaIteration): @@ -64,14 +64,16 @@ def _make_clauses(cls, ncollapsed=0, chunk_size=None, nthreads=None, clauses = [] if ncollapsed > 1: - clauses.append('collapse(%d)' % ncollapsed) + clauses.append(f'collapse({ncollapsed})') if chunk_size is not False: - clauses.append('schedule(%s,%s)' % (schedule or 'dynamic', - chunk_size or 1)) + clauses.append('schedule({},{})'.format( + schedule or 'dynamic', + chunk_size or 1 + )) if nthreads: - clauses.append('num_threads(%s)' % nthreads) + clauses.append(f'num_threads({nthreads})') if reduction: clauses.append(cls._make_clause_reduction_from_imask(reduction)) @@ -93,7 +95,7 @@ def _make_clauses(cls, **kwargs): indexeds = FindSymbols('indexeds').visit(kwargs['nodes']) deviceptrs = filter_ordered(i.name for i in indexeds if i.function._mem_local) if deviceptrs: - clauses.append("is_device_ptr(%s)" % ",".join(deviceptrs)) + clauses.append("is_device_ptr({})".format(",".join(deviceptrs))) return clauses @@ -261,21 +263,18 @@ def _support_array_reduction(cls, compiler): if isinstance(compiler, GNUCompiler) and \ compiler.version < Version("6.0"): return False - elif isinstance(compiler, NvidiaCompiler): - # NVC++ does not support array reduction and leads to segfault - return False else: - return True + # NVC++ does not support array reduction and leads to segfault + return not isinstance(compiler, NvidiaCompiler) @classmethod def _support_complex_reduction(cls, compiler): # In case we have a CustomCompiler if isinstance(compiler, CustomCompiler): compiler = compiler._base() - if isinstance(compiler, GNUCompiler): + else: # Gcc doesn't supports complex reduction - return False - return True + return not isinstance(compiler, GNUCompiler) class Ompizer(AbstractOmpizer): diff --git a/devito/passes/iet/linearization.py b/devito/passes/iet/linearization.py index 0d371ccb8e..aca2485444 100644 --- a/devito/passes/iet/linearization.py +++ b/devito/passes/iet/linearization.py @@ -34,10 +34,7 @@ def linearize(graph, **kwargs): else: key = lambda f: f.is_AbstractFunction and f.ndim > 1 and not f._mem_stack - if options['index-mode'] == 'int32': - dtype = np.int32 - else: - dtype = np.int64 + dtype = np.int32 if options['index-mode'] == 'int32' else np.int64 # NOTE: Even if `mode=False`, `key` may still want to enforce linearization # of some Functions, so it takes precedence and we then attempt to linearize @@ -157,7 +154,7 @@ def add(self, f): k = key1(f, d) if not k or k in self.sizes: continue - name = self.sregistry.make_name(prefix='%s_fsz' % d.name) + name = self.sregistry.make_name(prefix=f'{d.name}_fsz') self.sizes[k] = Size(name=name, dtype=dtype, is_const=True) # Update unique strides table @@ -168,7 +165,7 @@ def add(self, f): continue if k in self.strides: continue - name = self.sregistry.make_name(prefix='%s_stride' % d.name) + name = self.sregistry.make_name(prefix=f'{d.name}_stride') self.strides[k] = Stride(name=name, dtype=dtype, is_const=True) def update(self, functions): @@ -192,7 +189,7 @@ def map_strides(self, f): sizes = self.get_sizes(f) return {d: self.strides[sizes[n:]] for n, d in enumerate(dims)} elif f in self.strides_dynamic: - return {d: i for d, i in zip(dims, self.strides_dynamic[f])} + return {d: i for d, i in zip(dims, self.strides_dynamic[f], strict=True)} else: return {} @@ -270,9 +267,8 @@ def linearize_accesses(iet, key0, tracker=None): # 4) What `strides` can indeed be constructed? mapper = {} for sizes, stride in tracker.strides.items(): - if stride in candidates: - if set(sizes).issubset(instances): - mapper[stride] = sizes + if stride in candidates and set(sizes).issubset(instances): + mapper[stride] = sizes # 5) Construct what needs to *and* can be constructed stmts, stmts1 = [], [] @@ -316,7 +312,7 @@ def _(f, d): @singledispatch def _generate_linearization_basic(f, i, tracker): - assert False + raise AssertionError('This is not allowed') @_generate_linearization_basic.register(DiscreteFunction) @@ -397,7 +393,7 @@ def linearize_transfers(iet, sregistry=None, **kwargs): start, size = imask[0], 1 if start != 0: # Spare the ugly generated code if unnecessary (occurs often) - name = sregistry.make_name(prefix='%s_ofs' % n.function.name) + name = sregistry.make_name(prefix=f'{n.function.name}_ofs') wildcard = Wildcard(name=name, dtype=np.int32, is_const=True) symsect = n._rebuild(imask=imask).sections diff --git a/devito/passes/iet/misc.py b/devito/passes/iet/misc.py index dbfcbd8394..1dd2a7ad52 100644 --- a/devito/passes/iet/misc.py +++ b/devito/passes/iet/misc.py @@ -119,7 +119,7 @@ def relax_incr_dimensions(iet, options=None, **kwargs): roots_max = {i.dim.root: i.symbolic_max for i in outer} # Process inner iterations and adjust their bounds - for n, i in enumerate(inner): + for _, i in enumerate(inner): # If definitely in-bounds, as ensured by a prior compiler pass, then # we can skip this step if i.is_Inbound: @@ -193,7 +193,7 @@ def _generate_macros_findexeds(iet, sregistry=None, tracker=None, **kwargs): except KeyError: pass - pname = sregistry.make_name(prefix='%sL' % i.name) + pname = sregistry.make_name(prefix=f'{i.name}L') header, v = i.bind(pname) subs[i] = v @@ -284,7 +284,7 @@ def remove_redundant_moddims(iet): subs = {d: sympy.S.Zero for d in degenerates} redundants = as_mapper(others, key=lambda d: d.offset % d.modulo) - for k, v in redundants.items(): + for _, v in redundants.items(): chosen = v.pop(0) subs.update({d: chosen for d in v}) diff --git a/devito/passes/iet/mpi.py b/devito/passes/iet/mpi.py index 9dcad26008..3a3d354905 100644 --- a/devito/passes/iet/mpi.py +++ b/devito/passes/iet/mpi.py @@ -243,10 +243,7 @@ def _drop_if_unwritten(iet, options=None, **kwargs): which would call the generated library directly. """ drop_unwritten = options['dist-drop-unwritten'] - if not callable(drop_unwritten): - key = lambda f: drop_unwritten - else: - key = drop_unwritten + key = (lambda f: drop_unwritten) if not callable(drop_unwritten) else drop_unwritten # Analysis writes = {i.write for i in FindNodes(Expression).visit(iet)} @@ -525,7 +522,7 @@ def _semantical_eq_loc_indices(hsf0, hsf1): if hsf0.loc_indices != hsf1.loc_indices: return False - for v0, v1 in zip(hsf0.loc_values, hsf1.loc_values): + for v0, v1 in zip(hsf0.loc_values, hsf1.loc_values, strict=False): if v0 is v1: continue diff --git a/devito/passes/iet/orchestration.py b/devito/passes/iet/orchestration.py index b2bc5b0caf..3bea70fb0f 100644 --- a/devito/passes/iet/orchestration.py +++ b/devito/passes/iet/orchestration.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from contextlib import suppress from functools import singledispatch from sympy import Or @@ -95,11 +96,11 @@ def _make_syncarray(self, iet, sync_ops, layer): qid = None body = list(iet.body) - try: - body.extend([self.langbb._map_update_device(s.target, s.imask, qid=qid) - for s in sync_ops]) - except NotImplementedError: - pass + with suppress(NotImplementedError): + body.extend([ + self.langbb._map_update_device(s.target, s.imask, qid=qid) + for s in sync_ops + ]) iet = List(body=body) return iet, [] @@ -212,11 +213,11 @@ def _(layer, iet, sync_ops, lang, sregistry): body.extend([DummyExpr(s.handle, 1) for s in sync_ops]) body.append(BlankLine) - name = 'copy_to_%s' % layer.suffix + name = f'copy_to_{layer.suffix}' except NotImplementedError: # A non-device backend body = [] - name = 'copy_from_%s' % layer.suffix + name = f'copy_from_{layer.suffix}' body.extend(list(iet.body)) @@ -243,10 +244,10 @@ def _(layer, iet, sync_ops, lang, sregistry): body.append(lang._map_wait(qid)) body.append(BlankLine) - name = 'prefetch_from_%s' % layer.suffix + name = f'prefetch_from_{layer.suffix}' except NotImplementedError: body = [] - name = 'prefetch_to_%s' % layer.suffix + name = f'prefetch_to_{layer.suffix}' body.extend([DummyExpr(s.handle, 2) for s in sync_ops]) diff --git a/devito/passes/iet/parpragma.py b/devito/passes/iet/parpragma.py index f03b8a3305..ec02a5e3cb 100644 --- a/devito/passes/iet/parpragma.py +++ b/devito/passes/iet/parpragma.py @@ -1,4 +1,5 @@ from collections import defaultdict +from contextlib import suppress from functools import cached_property import cgen as c @@ -190,21 +191,21 @@ def _make_clause_reduction_from_imask(cls, reductions): if i.is_Indexed: f = i.function bounds = [] - for k, d in zip(imask, f.dimensions): + for k, d in zip(imask, f.dimensions, strict=False): if is_integer(k): - bounds.append('[%s]' % k) + bounds.append(f'[{k}]') elif k is FULL: # Lower FULL Dimensions into a range spanning the entire # Dimension space, e.g. `reduction(+:f[0:f_vec->size[1]])` - bounds.append('[0:%s]' % f._C_get_field(FULL, d).size) + bounds.append(f'[0:{f._C_get_field(FULL, d).size}]') else: assert isinstance(k, tuple) and len(k) == 2 - bounds.append('[%s:%s]' % k) - mapper[r.name].append('%s%s' % (i.name, ''.join(bounds))) + bounds.append('[{}:{}]'.format(*k)) + mapper[r.name].append('{}{}'.format(i.name, ''.join(bounds))) else: mapper[r.name].append(str(i)) - args = ['reduction(%s:%s)' % (k, ','.join(v)) for k, v in mapper.items()] + args = ['reduction({}:{})'.format(k, ','.join(v)) for k, v in mapper.items()] return ' '.join(args) @@ -275,10 +276,7 @@ def _make_partree(self, candidates, nthreads=None): if all(i.is_Affine for i in candidates): bundles = FindNodes(ExpressionBundle).visit(root) sops = sum(i.ops for i in bundles) - if sops >= self.dynamic_work: - schedule = 'dynamic' - else: - schedule = 'static' + schedule = 'dynamic' if sops >= self.dynamic_work else 'static' if nthreads is None: # pragma ... for ... schedule(..., 1) nthreads = self.nthreads @@ -473,16 +471,14 @@ def functions(self): def expr_symbols(self): retval = [self.function.indexed] for i in self.arguments + tuple(flatten(self.sections)): - try: + with suppress(AttributeError): retval.extend(i.free_symbols) - except AttributeError: - pass return tuple(retval) @cached_property def _generate(self): # Stringify sections - sections = ''.join(['[%s:%s]' % (ccode(i), ccode(j)) + sections = ''.join([f'[{ccode(i)}:{ccode(j)}]' for i, j in self.sections]) arguments = [ccode(i) for i in self.arguments] return self.pragma % (self.function.name, sections, *arguments) @@ -503,7 +499,7 @@ def __init__(self, sregistry, options, platform, compiler): self.par_tile = options['par-tile'].reset() self.par_disabled = options['par-disabled'] - def _score_candidate(self, n0, root, collapsable=()): + def _score_candidate(self, n0, root, collapsible=()): # `ndptrs`, the number of device pointers, part of the score too to # ensure the outermost loop is offloaded ndptrs = len(self._device_pointers(root)) diff --git a/devito/symbolics/extended_dtypes.py b/devito/symbolics/extended_dtypes.py index 1bb5fbb91b..29bab821ca 100644 --- a/devito/symbolics/extended_dtypes.py +++ b/devito/symbolics/extended_dtypes.py @@ -97,6 +97,6 @@ class VOID(BaseCast): name = base_name.upper() globals()[name] = type(name, (BaseCast,), {'_dtype': dtype}) for i in ['2', '3', '4']: - v = '%s%s' % (base_name, i) + v = f'{base_name}{i}' globals()[v.upper()] = cast(v) globals()[f'{v.upper()}P'] = cast(v, '*') diff --git a/devito/symbolics/extended_sympy.py b/devito/symbolics/extended_sympy.py index d47a063bac..2c352d50d4 100644 --- a/devito/symbolics/extended_sympy.py +++ b/devito/symbolics/extended_sympy.py @@ -2,6 +2,7 @@ Extended SymPy hierarchy. """ import re +from contextlib import suppress import numpy as np import sympy @@ -214,8 +215,8 @@ def __new__(cls, call, pointer, params=None, **kwargs): else: try: _params.append(Number(p)) - except TypeError: - raise ValueError("`params` must be Expr, numbers or str") + except TypeError as e: + raise ValueError("`params` must be Expr, numbers or str") from e params = Tuple(*_params) obj = sympy.Expr.__new__(cls, call, pointer, params) @@ -330,8 +331,8 @@ def __new__(cls, params, dtype=None): for p in as_tuple(params): try: args.append(sympify(p)) - except sympy.SympifyError: - raise ValueError(f"Illegal param `{p}`") + except sympy.SympifyError as e: + raise ValueError(f"Illegal param `{p}`") from e obj = sympy.Expr.__new__(cls, *args) obj.params = tuple(args) @@ -367,11 +368,8 @@ def __new__(cls, base, **kwargs): # If an AbstractFunction, pull the underlying Symbol base = base.indexed.label except AttributeError: - if isinstance(base, str): - base = Symbol(base) - else: - # Fallback: go plain sympy - base = sympify(base) + # Fallback: go plain sympy + base = Symbol(base) if isinstance(base, str) else sympify(base) obj = sympy.Expr.__new__(cls, base) obj._base = base @@ -476,10 +474,8 @@ def reinterpret(self): @property def _C_ctype(self): ctype = ctypes_vector_mapper.get(self.dtype, self.dtype) - try: + with suppress(TypeError): ctype = dtype_to_ctype(ctype) - except TypeError: - pass return ctype @property @@ -507,7 +503,7 @@ def __new__(cls, base, index, **kwargs): base = base.indexed.label except AttributeError: if not isinstance(base, sympy.Basic): - raise ValueError("`base` must be of type sympy.Basic") + raise ValueError("`base` must be of type sympy.Basic") from None index = Tuple(*[sympify(i) for i in as_tuple(index)]) @@ -674,10 +670,7 @@ def template(self): return self._template def __str__(self): - if self.template: - template = f"<{','.join(str(i) for i in self.template)}>" - else: - template = '' + template = f"<{','.join(str(i) for i in self.template)}>" if self.template else '' arguments = ', '.join(str(i) for i in self.arguments) return f"{self.name}{template}({arguments})" diff --git a/devito/symbolics/inspection.py b/devito/symbolics/inspection.py index d9902e8364..3147118de1 100644 --- a/devito/symbolics/inspection.py +++ b/devito/symbolics/inspection.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import singledispatch import numpy as np @@ -49,19 +50,18 @@ def compare_ops(e1, e2): """ if type(e1) is type(e2) and len(e1.args) == len(e2.args): if e1.is_Atom: - return True if e1 == e2 else False + return e1 == e2 elif isinstance(e1, IndexDerivative) and isinstance(e2, IndexDerivative): if e1.mapper == e2.mapper: return compare_ops(e1.expr, e2.expr) else: return False elif e1.is_Indexed and e2.is_Indexed: - return True if e1.base == e2.base else False + return e1.base == e2.base else: - for a1, a2 in zip(e1.args, e2.args): - if not compare_ops(a1, a2): - return False - return True + return all( + compare_ops(a1, a2) for a1, a2 in zip(e1.args, e2.args, strict=True) + ) else: return False @@ -110,7 +110,7 @@ def estimate_cost(exprs, estimate=False): return flops except: - warning("Cannot estimate cost of `%s`" % str(exprs)) + warning(f"Cannot estimate cost of `{str(exprs)}`") return 0 @@ -147,7 +147,9 @@ def _estimate_cost(expr, estimate, seen): # The flag tells whether it's an integer expression (implying flops==0) or not if not expr.args: return 0, False - flops, flags = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, flags = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) flops = sum(flops) if all(flags): # `expr` is an operation involving integer operands only @@ -162,7 +164,9 @@ def _estimate_cost(expr, estimate, seen): @_estimate_cost.register(CallFromPointer) def _(expr, estimate, seen): try: - flops, flags = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, flags = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) except ValueError: flops, flags = [], [] return sum(flops), all(flags) @@ -215,7 +219,9 @@ def _(expr, estimate, seen): @_estimate_cost.register(Application) def _(expr, estimate, seen): if q_routine(expr): - flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, _ = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) flops = sum(flops) if isinstance(expr, DefFunction): # Bypass user-defined or language-specific functions @@ -235,7 +241,7 @@ def _(expr, estimate, seen): @_estimate_cost.register(Pow) def _(expr, estimate, seen): - flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True) flops = sum(flops) if estimate: if expr.exp.is_Number: @@ -314,10 +320,8 @@ def sympy_dtype(expr, base=None, default=None, smin=None): dtypes = {base} - {None} for i in expr.free_symbols: - try: + with suppress(AttributeError): dtypes.add(i.dtype) - except AttributeError: - pass dtype = infer_dtype(dtypes) diff --git a/devito/symbolics/manipulation.py b/devito/symbolics/manipulation.py index 6574b9f096..d90c366bb2 100644 --- a/devito/symbolics/manipulation.py +++ b/devito/symbolics/manipulation.py @@ -301,7 +301,7 @@ def xreplace_indices(exprs, mapper, key=None): handle = [i for i in handle if i.base.label in key] elif callable(key): handle = [i for i in handle if key(i)] - mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle])) + mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle], strict=True)) replaced = [uxreplace(i, mapper) for i in as_tuple(exprs)] return replaced if isinstance(exprs, Iterable) else replaced[0] @@ -312,10 +312,7 @@ def _eval_numbers(expr, args): """ numbers, others = split(args, lambda i: i.is_Number) if len(numbers) > 1: - if isinstance(expr, UnevaluableMixin): - cls = expr.func.__base__ - else: - cls = expr.func + cls = expr.func.__base__ if isinstance(expr, UnevaluableMixin) else expr.func args[:] = [cls(*numbers)] + others @@ -427,7 +424,7 @@ def reuse_if_untouched(expr, args, evaluate=False): Reconstruct `expr` iff any of the provided `args` is different than the corresponding arg in `expr.args`. """ - if all(a is b for a, b in zip(expr.args, args)): + if all(a is b for a, b in zip(expr.args, args, strict=False)): return expr else: return expr.func(*args, evaluate=evaluate) diff --git a/devito/symbolics/queries.py b/devito/symbolics/queries.py index 76cb737294..c8624c3578 100644 --- a/devito/symbolics/queries.py +++ b/devito/symbolics/queries.py @@ -95,10 +95,7 @@ def q_terminalop(expr, depth=0): return True elif expr.is_Add or expr.is_Mul: for a in expr.args: - if a.is_Pow: - elems = a.args - else: - elems = [a] + elems = a.args if a.is_Pow else [a] if any(not q_leaf(i) for i in elems): return False return True @@ -306,14 +303,13 @@ def case1(*args): if x0 is not x1: return False - if not isinstance(p1, Constant): - # TODO: Same considerations above about Constant apply - return False # At this point we are in the form `X {+,-} X / p0 + p1`, where # `X`, `p0`, and `p1` are definitely positive; since `X > X / p0`, # definitely the answer is True - return True + # OR we are a constant and return False + # TODO: Same considerations above about Constant apply + return isinstance(p1, Constant) if len(expr.args) == 2: return case0(*expr.args) or case1(S.Zero, *expr.args) diff --git a/devito/symbolics/search.py b/devito/symbolics/search.py index 500ffd25ed..cfb9d504be 100644 --- a/devito/symbolics/search.py +++ b/devito/symbolics/search.py @@ -104,10 +104,7 @@ def search(exprs: Expression | Iterable[Expression], assert mode in ('all', 'unique'), "Unknown mode" - if isinstance(query, type): - Q = lambda obj: isinstance(obj, query) - else: - Q = query + Q = (lambda obj: isinstance(obj, query)) if isinstance(query, type) else query # Search doesn't actually use a BFS (rather, a preorder DFS), but the terminology # is retained in this function's parameters for backwards compatibility @@ -164,7 +161,7 @@ def retrieve_function_carriers(exprs, mode='all'): # Filter off Indexeds not carrying a DiscreteFunction for i in list(retval): try: - i.function + _ = i.function except AttributeError: retval.remove(i) return retval diff --git a/devito/tools/abc.py b/devito/tools/abc.py index f325778239..1b92533f5c 100644 --- a/devito/tools/abc.py +++ b/devito/tools/abc.py @@ -1,10 +1,9 @@ -import abc from hashlib import sha1 __all__ = ['Pickable', 'Reconstructable', 'Signer', 'Singleton', 'Stamp', 'Tag'] -class Tag(abc.ABC): +class Tag: """ An abstract class to define categories of object decorators. @@ -39,10 +38,7 @@ def __hash__(self): return hash((self.name, self.val)) def __str__(self): - if self.val is None: - ret = self.name - else: - ret = "%s[%s]" % (self.name, str(self.val)) + ret = self.name if self.val is None else f"{self.name}[{str(self.val)}]" return ret __repr__ = __str__ @@ -277,6 +273,6 @@ class Stamp: """ def __repr__(self): - return "<%s>" % str(id(self))[-3:] + return f"<{str(id(self))[-3:]}>" __str__ = __repr__ diff --git a/devito/tools/algorithms.py b/devito/tools/algorithms.py index 13d349149e..0543dd05bd 100644 --- a/devito/tools/algorithms.py +++ b/devito/tools/algorithms.py @@ -75,6 +75,6 @@ def toposort(data): if item not in ordered]) if len(processed) != len(set(flatten(data) + flatten(data.values()))): - raise ValueError("A cyclic dependency exists amongst %r" % data) + raise ValueError(f"A cyclic dependency exists amongst {data!r}") return processed diff --git a/devito/tools/data_structures.py b/devito/tools/data_structures.py index 48a7a342a7..4d313b00ac 100644 --- a/devito/tools/data_structures.py +++ b/devito/tools/data_structures.py @@ -1,6 +1,7 @@ import json from collections import OrderedDict, deque from collections.abc import Callable, Iterable, Mapping, MutableSet, Set +from contextlib import suppress from functools import cached_property, reduce import numpy as np @@ -41,11 +42,12 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) def __repr__(self): - return "Bunch(%s)" % ", ".join(["%s=%s" % i for i in self.__dict__.items()]) + return "Bunch({})".format( + ", ".join(["{}={}".format(*i) for i in self.__dict__.items()]) + ) def __iter__(self): - for i in self.__dict__.values(): - yield i + yield from self.__dict__.values() class EnrichedTuple(tuple, Pickable): @@ -61,7 +63,7 @@ def __new__(cls, *items, getters=None, **kwargs): obj = super().__new__(cls, items) obj.__dict__.update(kwargs) # Convert to list if we're getting an OrderedDict from rebuild - obj.getters = OrderedDict(zip(list(getters or []), items)) + obj.getters = OrderedDict(zip(list(getters or []), items, strict=False)) return obj def _rebuild(self, *args, **kwargs): @@ -118,7 +120,7 @@ def fromdicts(cls, *dicts): ret = ReducerMap() for i in dicts: if not isinstance(i, Mapping): - raise ValueError("Expected Mapping, got `%s`" % type(i)) + raise ValueError(f"Expected Mapping, got `{type(i)}`") ret.update(i) return ret @@ -179,8 +181,9 @@ def compare_to_first(v): return c return candidates[0] else: - raise ValueError("Unable to find unique value for key %s, candidates: %s" - % (key, candidates)) + raise ValueError( + f'Unable to find unique value for key {key}, candidates: {candidates}' + ) def reduce(self, key, op=None): """ @@ -240,10 +243,7 @@ def __missing__(self, key): return value def __reduce__(self): - if self.default_factory is None: - args = tuple() - else: - args = self.default_factory, + args = tuple() if self.default_factory is None else (self.default_factory,) return type(self), args, None, None, self() def copy(self): @@ -303,10 +303,10 @@ def __gt__(self, other): return self >= other and self != other def __repr__(self): - return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys()))) + return 'OrderedSet([{}])'.format(', '.join(map(repr, self.keys()))) def __str__(self): - return '{%s}' % (', '.join(map(repr, self.keys()))) + return '{{{}}}'.format(', '.join(map(repr, self.keys()))) difference = property(lambda self: self.__sub__) difference_update = property(lambda self: self.__isub__) @@ -448,15 +448,15 @@ def add_node(self, node_name, ignore_existing=False): if node_name in self.graph: if ignore_existing is True: return - raise KeyError('node %s already exists' % node_name) + raise KeyError(f'node {node_name} already exists') self.graph[node_name] = OrderedSet() def delete_node(self, node_name): """Delete a node and all edges referencing it.""" if node_name not in self.graph: - raise KeyError('node %s does not exist' % node_name) + raise KeyError(f'node {node_name} does not exist') self.graph.pop(node_name) - for node, edges in self.graph.items(): + for _, edges in self.graph.items(): if node_name in edges: edges.remove(node_name) @@ -476,10 +476,8 @@ def delete_edge(self, ind_node, dep_node): if dep_node not in self.graph.get(ind_node, []): raise KeyError('this edge does not exist in graph') self.graph[ind_node].remove(dep_node) - try: + with suppress(KeyError): del self.labels[ind_node][dep_node] - except KeyError: - pass def get_label(self, ind_node, dep_node, default=None): try: @@ -512,7 +510,7 @@ def _all_predecessors(n): def downstream(self, node): """Return a list of all nodes this node has edges towards.""" if node not in self.graph: - raise KeyError('node %s is not in graph' % node) + raise KeyError(f'node {node} is not in graph') return list(self.graph[node]) def all_downstreams(self, node): @@ -606,7 +604,7 @@ def connected_components(self, enumerated=False): def find_paths(self, node): if node not in self.graph: - raise KeyError('node %s is not in graph' % node) + raise KeyError(f'node {node} is not in graph') paths = [] @@ -660,7 +658,7 @@ def __len__(self): return len(self._dict) def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self._dict) + return f'<{self.__class__.__name__} {self._dict!r}>' def __hash__(self): if self._hash is None: @@ -782,7 +780,7 @@ def __len__(self): def __repr__(self): sitems = [s.__repr__() for s in self] - return "%s(%s)" % (self.__class__.__name__, ", ".join(sitems)) + return "{}({})".format(self.__class__.__name__, ", ".join(sitems)) def __getitem__(self, idx): if not self: diff --git a/devito/tools/dtypes_lowering.py b/devito/tools/dtypes_lowering.py index 7f0ba56911..bba63c6040 100644 --- a/devito/tools/dtypes_lowering.py +++ b/devito/tools/dtypes_lowering.py @@ -35,7 +35,7 @@ def build_dtypes_vector(field_names, counts, mapper=None): mapper = mapper or dtype_mapper for base_name, base_dtype in mapper.items(): for count in counts: - name = "%s%d" % (base_name, count) + name = f'{base_name}{count}' titles = field_names[:count] @@ -43,9 +43,9 @@ def build_dtypes_vector(field_names, counts, mapper=None): if count == 3: padded_count = 4 - names = ["s%d" % i for i in range(count)] + names = [f's{i}' for i in range(count)] while len(names) < padded_count: - names.append("padding%d" % (len(names) - count)) + names.append(f'padding{len(names) - count}') if len(titles) < len(names): titles.extend((len(names) - len(titles)) * [None]) @@ -82,7 +82,7 @@ def add_dtype(self, field_name, count): self.update(build_dtypes_vector([field_name], [count])) def get_base_dtype(self, v, default=None): - for (base_dtype, count), dtype in self.items(): + for (base_dtype, _), dtype in self.items(): if dtype is v: return base_dtype @@ -119,10 +119,12 @@ def __hash__(self): return hash((self.name, self.template, self.modifier)) def __repr__(self): - template = '<%s>' % ','.join([str(i) for i in self.template]) - return "%s%s%s" % (self.name, - template if self.template else '', - self.modifier) + template = '<{}>'.format(','.join([str(i) for i in self.template])) + return "{}{}{}".format( + self.name, + template if self.template else '', + self.modifier + ) __str__ = __repr__ @@ -241,7 +243,7 @@ class c_restrict_void_p(ctypes.c_void_p): for count in counts: dtype = dtypes_vector_mapper[(base_dtype, count)] - name = "%s%d" % (base_name, count) + name = f'{base_name}{count}' ctype = type(name, (ctypes.Structure,), {'_fields_': [(i, base_ctype) for i in field_names[:count]], '_base_dtype': True}) @@ -262,21 +264,21 @@ def ctypes_to_cstr(ctype, toarray=None): elif isinstance(ctype, CustomDtype): retval = str(ctype) elif issubclass(ctype, ctypes.Structure): - retval = 'struct %s' % ctype.__name__ + retval = f'struct {ctype.__name__}' elif issubclass(ctype, ctypes.Union): - retval = 'union %s' % ctype.__name__ + retval = f'union {ctype.__name__}' elif issubclass(ctype, ctypes._Pointer): if toarray: - retval = ctypes_to_cstr(ctype._type_, '(* %s)' % toarray) + retval = ctypes_to_cstr(ctype._type_, f'(* {toarray})') else: retval = ctypes_to_cstr(ctype._type_) if issubclass(ctype._type_, ctypes._Pointer): # Look-ahead to avoid extra ugly spaces - retval = '%s*' % retval + retval = f'{retval}*' else: - retval = '%s *' % retval + retval = f'{retval} *' elif issubclass(ctype, ctypes.Array): - retval = '%s[%d]' % (ctypes_to_cstr(ctype._type_, toarray), ctype._length_) + retval = f'{ctypes_to_cstr(ctype._type_, toarray)}[{ctype._length_}]' elif ctype.__name__.startswith('c_'): name = ctype.__name__[2:] # A primitive datatype @@ -304,9 +306,9 @@ def ctypes_to_cstr(ctype, toarray=None): retval = name if prefix: - retval = '%s %s' % (prefix, retval) + retval = f'{prefix} {retval}' if suffix: - retval = '%s %s' % (retval, suffix) + retval = f'{retval} {suffix}' else: # A custom datatype (e.g., a typedef-ed pointer to struct) retval = ctype.__name__ @@ -326,10 +328,7 @@ def is_external_ctype(ctype, includes): if issubclass(ctype, ctypes._SimpleCData): return False - if ctype in ctypes_vector_mapper.values(): - return True - - return False + return ctype in ctypes_vector_mapper.values() def is_numpy_dtype(dtype): diff --git a/devito/tools/os_helper.py b/devito/tools/os_helper.py index 8bc5a3eaf5..a7e45d442a 100644 --- a/devito/tools/os_helper.py +++ b/devito/tools/os_helper.py @@ -28,9 +28,9 @@ def make_tempdir(prefix=None): """Create a temporary directory having a deterministic name. The directory is created within the default OS temporary directory.""" if prefix is None: - name = 'devito-uid%s' % os.getuid() + name = f'devito-uid{os.getuid()}' else: - name = 'devito-%s-uid%s' % (str(prefix), os.getuid()) + name = f'devito-{str(prefix)}-uid{os.getuid()}' tmpdir = Path(gettempdir()).joinpath(name) tmpdir.mkdir(parents=True, exist_ok=True) return tmpdir diff --git a/devito/tools/timing.py b/devito/tools/timing.py index f3c562ac56..068197a786 100644 --- a/devito/tools/timing.py +++ b/devito/tools/timing.py @@ -1,4 +1,5 @@ from collections import OrderedDict, defaultdict +from contextlib import suppress from functools import partial from threading import get_ident from time import time @@ -36,7 +37,7 @@ def __new__(cls, *args, name=None): assert name is None func, name = args else: - assert False + raise AssertionError('Incorrect number of args') obj = object.__new__(cls) obj.__init__(func, name) return obj @@ -64,10 +65,7 @@ def __call__(self, *args, **kwargs): if not isinstance(timings, dict): raise ValueError("Attempting to use `timed_pass` outside a `timed_region`") - if self.name is not None: - frame = self.name - else: - frame = self.func.__name__ + frame = self.name if self.name is not None else self.func.__name__ stack = timed_pass.stack[tid] stack.append(frame) @@ -116,10 +114,8 @@ def __enter__(self): def __exit__(self, *args): self.timings[self.name] = time() - self.tic del timed_pass.timings[get_ident()] - try: + with suppress(KeyError): # Necessary clean up should one be constructing an Operator within # a try-except, with the Operator construction failing + # Typically we suppress del timed_pass.stack[get_ident()] - except KeyError: - # Typically we end up here - pass diff --git a/devito/tools/utils.py b/devito/tools/utils.py index 54c8a760cb..91b5bcdbf7 100644 --- a/devito/tools/utils.py +++ b/devito/tools/utils.py @@ -81,9 +81,9 @@ def as_tuple(item, type=None, length=None): t = (item,) * (length or 1) if length and not len(t) == length: - raise ValueError("Tuple needs to be of length %d" % length) + raise ValueError(f'Tuple needs to be of length {length}') if type and not all(isinstance(i, type) for i in t): - raise TypeError("Items need to be of type %s" % type) + raise TypeError(f'Items need to be of type {type}') return t @@ -213,7 +213,7 @@ def filter_ordered(elements, key=None): if key is None: return list(dict.fromkeys(elements)) else: - return list(dict(zip([key(i) for i in elements], elements)).values()) + return list(dict(zip([key(i) for i in elements], elements, strict=True)).values()) def filter_sorted(elements, key=None): @@ -245,7 +245,7 @@ def sweep(parameters, keys=None): sweep_values = [[v] if isinstance(v, str) or not isinstance(v, Iterable) else v for v in sweep_values] for vals in product(*sweep_values): - yield dict(zip(keys, vals)) + yield dict(zip(keys, vals, strict=True)) def indices_to_slices(inputlist): @@ -263,7 +263,7 @@ def indices_to_slices(inputlist): """ inputlist.sort() pointers = np.where(np.diff(inputlist) > 1)[0] - pointers = zip(np.r_[0, pointers+1], np.r_[pointers, len(inputlist)-1]) + pointers = zip(np.r_[0, pointers+1], np.r_[pointers, len(inputlist)-1], strict=True) slices = [(inputlist[i], inputlist[j]+1) for i, j in pointers] return slices @@ -310,7 +310,7 @@ def transitive_closure(R): {a:d, b:d, c:d} ''' ans = dict() - for k in R.keys(): + for k in R: visited = [] ans[k] = reachable_items(R, k, visited) return ans @@ -331,15 +331,15 @@ def humanbytes(B): TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: - return '%d %s' % (int(B), 'B') + return f'{int(B)} B' elif KB <= B < MB: - return '%d KB' % round(B / KB) + return f'{round(B / KB)} KB' elif MB <= B < GB: - return '%d MB' % round(B / MB) + return f'{round(B / MB)} MB' elif GB <= B < TB: - return '%.1f GB' % round(B / GB, 1) + return f'{round(B / GB, 1):.1f} GB' elif TB <= B: - return '%.2f TB' % round(B / TB, 1) + return f'{round(B / TB, 1):.2f} TB' def sorted_priority(items, priority): diff --git a/devito/types/args.py b/devito/types/args.py index 2110f58e84..9e57339b63 100644 --- a/devito/types/args.py +++ b/devito/types/args.py @@ -13,16 +13,18 @@ class ArgProvider: @property @abc.abstractmethod def _arg_names(self): - raise NotImplementedError('%s does not provide any default argument names' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide any default argument names' + ) @abc.abstractmethod def _arg_defaults(self): """ A map of default argument values defined by this type. """ - raise NotImplementedError('%s does not provide any default arguments' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide any default arguments' + ) @abc.abstractmethod def _arg_values(self, **kwargs): @@ -34,8 +36,9 @@ def _arg_values(self, **kwargs): **kwargs User-provided argument overrides. """ - raise NotImplementedError('%s does not provide argument value derivation' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide argument value derivation' + ) def _arg_check(self, *args, **kwargs): """ diff --git a/devito/types/array.py b/devito/types/array.py index 6f2914756b..c48425e33d 100644 --- a/devito/types/array.py +++ b/devito/types/array.py @@ -41,10 +41,7 @@ def __init_finalize__(self, *args, **kwargs): def __indices_setup__(cls, *args, **kwargs): dimensions = kwargs['dimensions'] - if args: - indices = args - else: - indices = dimensions + indices = args or dimensions return as_tuple(dimensions), as_tuple(indices) @@ -174,7 +171,7 @@ def __padding_setup__(self, **kwargs): elif isinstance(padding, tuple) and len(padding) == self.ndim: padding = tuple((0, i) if is_integer(i) else i for i in padding) else: - raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim) + raise TypeError(f'`padding` must be int or {self.ndim}-tuple of ints') return DimensionTuple(*padding, getters=self.dimensions) @property @@ -222,7 +219,7 @@ def free_symbols(self): return super().free_symbols - {d for d in self.dimensions if d.is_Default} def _make_pointer(self, dim): - return PointerArray(name='p%s' % self.name, dimensions=dim, array=self) + return PointerArray(name=f'p{self.name}', dimensions=dim, array=self) class MappedArrayMixin: @@ -282,13 +279,13 @@ def __init_finalize__(self, *args, **kwargs): fields = tuple(kwargs.pop('fields', ())) self._fields = fields - self._pname = kwargs.pop('pname', 't%s' % name) + self._pname = kwargs.pop('pname', f't{name}') super().__init_finalize__(*args, **kwargs) @classmethod def __dtype_setup__(cls, **kwargs): - pname = kwargs.get('pname', 't%s' % kwargs['name']) + pname = kwargs.get('pname', 't{}'.format(kwargs['name'])) pfields = cls.__pfields_setup__(**kwargs) return CtypesFactory.generate(pname, pfields) @@ -536,8 +533,10 @@ def __getitem__(self, index): component_index, indices = index[0], index[1:] return ComponentAccess(self.indexed[indices], component_index) else: - raise ValueError("Expected %d or %d indices, got %d instead" - % (self.ndim, self.ndim + 1, len(index))) + raise ValueError( + f'Expected {self.ndim} or {self.ndim + 1} indices, ' + f'got {len(index)} instead' + ) @property def _C_ctype(self): @@ -613,7 +612,7 @@ def _hashable_content(self): return super()._hashable_content() + (self._index,) def __str__(self): - return "%s.%s" % (self.base, self.sindex) + return f"{self.base}.{self.sindex}" __repr__ = __str__ diff --git a/devito/types/basic.py b/devito/types/basic.py index 7f5702a7d2..75aed9d32f 100644 --- a/devito/types/basic.py +++ b/devito/types/basic.py @@ -1,5 +1,6 @@ import abc import inspect +from contextlib import suppress from ctypes import POINTER, Structure, _Pointer, c_char, c_char_p from functools import cached_property, reduce from operator import mul @@ -101,7 +102,7 @@ def _C_typedata(self): try: # We have internal types such as c_complex that are # Structure too but should be treated as plain c_type - _type._base_dtype + _ = _type._base_dtype except AttributeError: if issubclass(_type, Structure): _type = f'struct {_type.__name__}' @@ -852,8 +853,10 @@ def __init_finalize__(self, *args, **kwargs): # Averaging mode for off the grid evaluation self._avg_mode = kwargs.get('avg_mode', 'arithmetic') if self._avg_mode not in ['arithmetic', 'harmonic', 'safe_harmonic']: - raise ValueError("Invalid averaging mode_mode %s, accepted values are" - " arithmetic or harmonic" % self._avg_mode) + raise ValueError( + f"Invalid averaging mode_mode {self._avg_mode}, accepted values are" + " arithmetic or harmonic" + ) @classmethod def __args_setup__(cls, *args, **kwargs): @@ -956,10 +959,14 @@ def origin(self): f(x) : origin = 0 f(x + hx/2) : origin = hx/2 """ - return DimensionTuple(*(r - d + o for d, r, o - in zip(self.dimensions, self.indices_ref, - self._offset_subdomain)), - getters=self.dimensions) + return DimensionTuple(*( + r - d + o + for d, r, o in zip( + self.dimensions, + self.indices_ref, + self._offset_subdomain, strict=True + ) + ), getters=self.dimensions) @property def dimensions(self): @@ -998,7 +1005,7 @@ def _grid_map(self): """ mapper = {} subs = {} - for i, j, d in zip(self.indices, self.indices_ref, self.dimensions): + for i, j, d in zip(self.indices, self.indices_ref, self.dimensions, strict=True): # Two indices are aligned if they differ by an Integer*spacing. if not i.has(d): # Maybe a SubDimension @@ -1114,7 +1121,7 @@ def symbolic_shape(self): padding = [sympy.Add(*i, evaluate=False) for i in self._size_padding] domain = [i.symbolic_size for i in self.dimensions] ret = tuple(sympy.Add(i, j, k) - for i, j, k in zip(domain, halo, padding)) + for i, j, k in zip(domain, halo, padding, strict=True)) return DimensionTuple(*ret, getters=self.dimensions) @property @@ -1263,8 +1270,8 @@ def _size_domain(self): @cached_property def _size_halo(self): """Number of points in the halo region.""" - left = tuple(zip(*self._halo))[0] - right = tuple(zip(*self._halo))[1] + left = tuple(zip(*self._halo, strict=True))[0] + right = tuple(zip(*self._halo, strict=True))[1] sizes = tuple(Size(i, j) for i, j in self._halo) @@ -1283,8 +1290,8 @@ def _size_owned(self): @cached_property def _size_padding(self): """Number of points in the padding region.""" - left = tuple(zip(*self._padding))[0] - right = tuple(zip(*self._padding))[1] + left = tuple(zip(*self._padding, strict=True))[0] + right = tuple(zip(*self._padding, strict=True))[1] sizes = tuple(Size(i, j) for i, j in self._padding) @@ -1293,7 +1300,10 @@ def _size_padding(self): @cached_property def _size_nopad(self): """Number of points in the domain+halo region.""" - sizes = tuple(i+sum(j) for i, j in zip(self._size_domain, self._size_halo)) + sizes = tuple( + i+sum(j) + for i, j in zip(self._size_domain, self._size_halo, strict=True) + ) return DimensionTuple(*sizes, getters=self.dimensions) @cached_property @@ -1326,7 +1336,7 @@ def _offset_halo(self): left = tuple(self._size_padding.left) right = tuple(np.add(np.add(left, self._size_halo.left), self._size_domain)) - offsets = tuple(Offset(i, j) for i, j in zip(left, right)) + offsets = tuple(Offset(i, j) for i, j in zip(left, right, strict=True)) return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right) @@ -1336,7 +1346,7 @@ def _offset_owned(self): left = tuple(self._offset_domain) right = tuple(np.add(self._offset_halo.left, self._size_domain)) - offsets = tuple(Offset(i, j) for i, j in zip(left, right)) + offsets = tuple(Offset(i, j) for i, j in zip(left, right, strict=True)) return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right) @@ -1383,7 +1393,7 @@ def indexify(self, indices=None, subs=None): # Indices after substitutions indices = [] - for a, d, o, s in zip(self.args, self.dimensions, self.origin, subs): + for a, d, o, s in zip(self.args, self.dimensions, self.origin, subs, strict=True): if a.is_Function and len(a.args) == 1: # E.g. Abs(expr) arg = a.args[0] @@ -1503,16 +1513,14 @@ def _fromrep(cls, rep): """ newobj = super()._fromrep(rep) grid, dimensions = newobj._infer_dims() - try: - # This is needed when `_fromrep` is called directly in 1.9 - # for example with mul. - newobj.__init_finalize__(newobj.rows, newobj.cols, newobj.flat(), - grid=grid, dimensions=dimensions) - except TypeError: + with suppress(TypeError): # We can end up here when `_fromrep` is called through the default _new # when input `comps` don't have grid or dimensions. For example # `test_non_devito_tens` in `test_tensor.py`. - pass + # This is suppressed when `_fromrep` is called directly in 1.9 + # for example with mul. + newobj.__init_finalize__(newobj.rows, newobj.cols, newobj.flat(), + grid=grid, dimensions=dimensions) return newobj @classmethod @@ -1659,7 +1667,10 @@ def _eval_matrix_mul(self, other): row, col = i // other.cols, i % other.cols row_indices = range(self_cols*row, self_cols*(row+1)) col_indices = range(col, other_len, other.cols) - vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)] + vec = [ + mat[a]*other_mat[b] + for a, b in zip(row_indices, col_indices, strict=True) + ] new_mat[i] = sum(vec) # Get new class and return product @@ -1739,10 +1750,8 @@ def dtype(self): def free_symbols(self): ret = {self} for i in self.indices: - try: + with suppress(AttributeError): ret.update(i.free_symbols) - except AttributeError: - pass return ret # Pickling support @@ -1865,7 +1874,7 @@ def compare(self, other): """ if (self.__class__ != other.__class__) or (self.function is not other.function): return super().compare(other) - for l, r in zip(self.indices, other.indices): + for l, r in zip(self.indices, other.indices, strict=True): try: c = int(sympy.sign(l - r)) except TypeError: diff --git a/devito/types/constant.py b/devito/types/constant.py index bea67674a2..dbd3781e7d 100644 --- a/devito/types/constant.py +++ b/devito/types/constant.py @@ -100,12 +100,14 @@ def _arg_check(self, args, intervals, **kwargs): Check that `args` contains legal runtime values bound to `self`. """ if self.name not in args: - raise InvalidArgument("No runtime value for %s" % self.name) + raise InvalidArgument(f"No runtime value for {self.name}") key = args[self.name] try: # Might be a plain number, w/o a dtype field if key.dtype != self.dtype: - warning("Data type %s of runtime value `%s` does not match the " - "Constant data type %s" % (key.dtype, self.name, self.dtype)) + warning( + f'Data type {key.dtype} of runtime value `{self.name}` ' + f'does not match the Constant data type {self.dtype}' + ) except AttributeError: pass diff --git a/devito/types/dense.py b/devito/types/dense.py index 8527cb089b..551dcad14f 100644 --- a/devito/types/dense.py +++ b/devito/types/dense.py @@ -2,6 +2,7 @@ from ctypes import POINTER, Structure, byref, c_int, c_ulong, c_void_p, cast from functools import cached_property, reduce, wraps from operator import mul +from textwrap import dedent, wrap import numpy as np import sympy @@ -110,10 +111,11 @@ def __init_finalize__(self, *args, function=None, **kwargs): # running with MPI and some processes get 0-size arrays after # domain decomposition. We touch the data anyway to avoid the # case `self._data is None` - self.data + _ = self.data else: - raise ValueError("`initializer` must be callable or buffer, not %s" - % type(initializer)) + raise ValueError( + f'`initializer` must be callable or buffer, not {type(initializer)}' + ) _subs = Differentiable._subs @@ -179,10 +181,12 @@ def __coefficients_setup__(self, **kwargs): coeffs = kwargs.get('coefficients', self._default_fd) if coeffs not in fd_weights_registry: if coeffs == 'symbolic': - deprecations.symbolic_warn + _ = deprecations.symbolic_warn else: - raise ValueError(f"coefficients must be one of {str(fd_weights_registry)}" - f" not {coeffs}") + raise ValueError( + f'coefficients must be one of {str(fd_weights_registry)}' + f' not {coeffs}' + ) return coeffs @cached_property @@ -248,7 +252,10 @@ def shape_with_halo(self): the outhalo of boundary ranks contains a number of elements depending on the rank position in the decomposed grid (corner, side, ...). """ - return tuple(j + i + k for i, (j, k) in zip(self.shape, self._size_outhalo)) + return tuple( + j + i + k + for i, (j, k) in zip(self.shape, self._size_outhalo, strict=True) + ) @cached_property def _shape_with_inhalo(self): @@ -263,7 +270,10 @@ def _shape_with_inhalo(self): Typically, this property won't be used in user code, but it may come in handy for testing or debugging """ - return tuple(j + i + k for i, (j, k) in zip(self.shape, self._halo)) + return tuple( + j + i + k + for i, (j, k) in zip(self.shape, self._halo, strict=True) + ) @cached_property def shape_allocated(self): @@ -275,9 +285,13 @@ def shape_allocated(self): ----- In an MPI context, this is the *local* with_halo region shape. """ - return DimensionTuple(*[j + i + k for i, (j, k) in zip(self._shape_with_inhalo, - self._padding)], - getters=self.dimensions) + return DimensionTuple( + *[ + j + i + k + for i, (j, k) in zip(self._shape_with_inhalo, self._padding, strict=True) + ], + getters=self.dimensions + ) @cached_property def shape_global(self): @@ -297,15 +311,19 @@ def shape_global(self): if self.grid is None: return self.shape retval = [] - for d, s in zip(self.dimensions, self.shape): + for d, s in zip(self.dimensions, self.shape, strict=True): size = self.grid.size_map.get(d) retval.append(size.glb if size is not None else s) return tuple(retval) @property def symbolic_shape(self): - return DimensionTuple(*[self._C_get_field(FULL, d).size for d in self.dimensions], - getters=self.dimensions) + return DimensionTuple( + *[ + self._C_get_field(FULL, d).size for d in self.dimensions + ], + getters=self.dimensions + ) @property def size_global(self): @@ -333,30 +351,49 @@ def _size_outhalo(self): # and inhalo correspond return self._size_inhalo - left = [abs(min(i.loc_abs_min-i.glb_min-j, 0)) if i and not i.loc_empty else 0 - for i, j in zip(self._decomposition, self._size_inhalo.left)] - right = [max(i.loc_abs_max+j-i.glb_max, 0) if i and not i.loc_empty else 0 - for i, j in zip(self._decomposition, self._size_inhalo.right)] + left = [ + abs(min(i.loc_abs_min-i.glb_min-j, 0)) + if i and not i.loc_empty else 0 + for i, j in zip(self._decomposition, self._size_inhalo.left, strict=True) + ] + right = [ + max(i.loc_abs_max+j-i.glb_max, 0) + if i and not i.loc_empty else 0 + for i, j in zip(self._decomposition, self._size_inhalo.right, strict=True) + ] - sizes = tuple(Size(i, j) for i, j in zip(left, right)) + sizes = tuple(Size(i, j) for i, j in zip(left, right, strict=True)) if self._distributor.is_parallel and (any(left) or any(right)): try: - warning_msg = f"""A space order of {self._space_order} and a halo size of {max(self._size_inhalo)} has been - set but the current rank ({self._distributor.myrank}) has a domain size of - only {min(self.grid.shape_local)}""" + warning_msg = dedent(f""" + A space order of {self._space_order} and a halo size of + {max(self._size_inhalo)} has been set but the current rank + ({self._distributor.myrank}) has a domain size of only + {min(self.grid.shape_local)} + """)[1:] if not self._distributor.is_boundary_rank: - warning(warning_msg) + warning(' '.join(wrap(warning_msg))) else: - left_dist = [i for i, d in zip(left, self.dimensions) if d - in self._distributor.dimensions] - right_dist = [i for i, d in zip(right, self.dimensions) if d - in self._distributor.dimensions] - for i, j, k, l in zip(left_dist, right_dist, - self._distributor.mycoords, - self._distributor.topology): + left_dist = [ + i + for i, d in zip(left, self.dimensions, strict=True) + if d in self._distributor.dimensions + ] + right_dist = [ + i + for i, d in zip(right, self.dimensions, strict=True) + if d in self._distributor.dimensions + ] + for i, j, k, l in zip( + left_dist, + right_dist, + self._distributor.mycoords, + self._distributor.topology, + strict=False + ): if l > 1 and ((j > 0 and k == 0) or (i > 0 and k == l-1)): - warning(warning_msg) + warning(' '.join(wrap(warning_msg))) break except AttributeError: pass @@ -375,25 +412,31 @@ def size_allocated(self): @cached_property def _mask_modulo(self): """Boolean mask telling which Dimensions support modulo-indexing.""" - return tuple(True if i.is_Stepping else False for i in self.dimensions) + return tuple(bool(i.is_Stepping) for i in self.dimensions) @cached_property def _mask_domain(self): """Slice-based mask to access the domain region of the allocated data.""" - return tuple(slice(i, j) for i, j in - zip(self._offset_domain, self._offset_halo.right)) + return tuple( + slice(i, j) + for i, j in zip(self._offset_domain, self._offset_halo.right, strict=True) + ) @cached_property def _mask_inhalo(self): """Slice-based mask to access the domain+inhalo region of the allocated data.""" - return tuple(slice(i.left, i.right + j.right) for i, j in - zip(self._offset_inhalo, self._size_inhalo)) + return tuple( + slice(i.left, i.right + j.right) + for i, j in zip(self._offset_inhalo, self._size_inhalo, strict=True) + ) @cached_property def _mask_outhalo(self): """Slice-based mask to access the domain+outhalo region of the allocated data.""" - return tuple(slice(i.start - j.left, i.stop and i.stop + j.right or None) - for i, j in zip(self._mask_domain, self._size_outhalo)) + return tuple( + slice(i.start - j.left, i.stop and i.stop + j.right or None) + for i, j in zip(self._mask_domain, self._size_outhalo, strict=True) + ) @cached_property def _decomposition(self): @@ -414,8 +457,11 @@ def _decomposition_outhalo(self): """ if self._distributor is None: return (None,)*self.ndim - return tuple(v.reshape(*self._size_inhalo[d]) if v is not None else v - for d, v in zip(self.dimensions, self._decomposition)) + return tuple( + v.reshape(*self._size_inhalo[d]) + if v is not None else v + for d, v in zip(self.dimensions, self._decomposition, strict=True) + ) @property def data(self): @@ -579,7 +625,7 @@ def _data_in_region(self, region, dim, side): index_array = [ slice(offset, offset+size) if d is dim else slice(pl, s - pr) for d, s, (pl, pr) - in zip(self.dimensions, self.shape_allocated, self._padding) + in zip(self.dimensions, self.shape_allocated, self._padding, strict=True) ] return np.asarray(self._data[index_array]) @@ -645,8 +691,10 @@ def local_indices(self): if self._distributor is None: return tuple(slice(0, s) for s in self.shape) else: - return tuple(self._distributor.glb_slices.get(d, slice(0, s)) - for s, d in zip(self.shape, self.dimensions)) + return tuple( + self._distributor.glb_slices.get(d, slice(0, s)) + for s, d in zip(self.shape, self.dimensions, strict=True) + ) @property def initializer(self): @@ -691,8 +739,10 @@ def _C_make_dataobj(self, alias=None, **args): dataobj._obj.nbytes = data.nbytes # MPI-related fields - dataobj._obj.npsize = (c_ulong*self.ndim)(*[i - sum(j) for i, j in - zip(data.shape, self._size_padding)]) + dataobj._obj.npsize = (c_ulong*self.ndim)(*[ + i - sum(j) + for i, j in zip(data.shape, self._size_padding, strict=True) + ]) dataobj._obj.dsize = (c_ulong*self.ndim)(*self._size_domain) dataobj._obj.hsize = (c_int*(self.ndim*2))(*flatten(self._size_halo)) dataobj._obj.hofs = (c_int*(self.ndim*2))(*flatten(self._offset_halo)) @@ -823,7 +873,7 @@ def _arg_defaults(self, alias=None, metadata=None, estimate_memory=False): args = ReducerMap({key.name: self._data_buffer(metadata=metadata)}) # Collect default dimension arguments from all indices - for a, i, s in zip(key.dimensions, self.dimensions, self.shape): + for a, i, s in zip(key.dimensions, self.dimensions, self.shape, strict=True): args.update(i._arg_defaults(_min=0, size=s, alias=a)) return args @@ -851,7 +901,7 @@ def _arg_values(self, metadata=None, estimate_memory=False, **kwargs): # We've been provided a pure-data replacement (array) values = {self.name: new} # Add value overrides for all associated dimensions - for i, s in zip(self.dimensions, new.shape): + for i, s in zip(self.dimensions, new.shape, strict=True): size = s - sum(self._size_nodomain[i]) values.update(i._arg_defaults(size=size)) else: @@ -883,7 +933,7 @@ def _arg_check(self, args, intervals, **kwargs): f"does not match the Function data type {self.dtype}") # Check each Dimension for potential OOB accesses - for i, s in zip(self.dimensions, data.shape): + for i, s in zip(self.dimensions, data.shape, strict=True): i._arg_check(args, s, intervals[i]) if args.options['index-mode'] == 'int32' and \ @@ -1133,8 +1183,10 @@ def __indices_setup__(cls, *args, **kwargs): if not staggered: staggered_indices = dimensions else: - staggered_indices = (d + i * d.spacing / 2 - for d, i in zip(dimensions, staggered)) + staggered_indices = ( + d + i * d.spacing / 2 + for d, i in zip(dimensions, staggered, strict=True) + ) return tuple(dimensions), tuple(staggered_indices) @property @@ -1169,7 +1221,7 @@ def __shape_setup__(cls, **kwargs): raise ValueError("`shape` and `dimensions` must have the " "same number of entries") loc_shape = [] - for d, s in zip(dimensions, shape): + for d, s in zip(dimensions, shape, strict=True): if d in grid.dimensions: size = grid.size_map[d] if size.glb != s and s is not None: @@ -1690,7 +1742,7 @@ def shape(self): def shape_with_halo(self): domain = self.shape halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo] - ret = tuple(sum(i) for i in zip(domain, halo)) + ret = tuple(sum(i) for i in zip(domain, halo, strict=True)) return DimensionTuple(*ret, getters=self.dimensions) shape_allocated = AbstractFunction.symbolic_shape diff --git a/devito/types/dimension.py b/devito/types/dimension.py index 3482f8da2c..fa02ebb32d 100644 --- a/devito/types/dimension.py +++ b/devito/types/dimension.py @@ -1,5 +1,6 @@ import math from collections import namedtuple +from contextlib import suppress from functools import cached_property import numpy as np @@ -306,16 +307,12 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): defaults = self._arg_defaults() if glb_minv is None: loc_minv = args.get(self.min_name, defaults[self.min_name]) - try: + with suppress(AttributeError, TypeError): loc_minv -= min(interval.lower, 0) - except (AttributeError, TypeError): - pass if glb_maxv is None: loc_maxv = args.get(self.max_name, defaults[self.max_name]) - try: + with suppress(AttributeError, TypeError): loc_maxv -= max(interval.upper, 0) - except (AttributeError, TypeError): - pass # Some `args` may still be DerivedDimensions' defaults. These, in turn, # may represent sets of legal values. If that's the case, here we just @@ -323,17 +320,13 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): try: loc_minv = loc_minv.stop except AttributeError: - try: + with suppress(TypeError): loc_minv = sorted(loc_minv).pop(0) - except TypeError: - pass try: loc_maxv = loc_maxv.stop except AttributeError: - try: + with suppress(TypeError): loc_maxv = sorted(loc_maxv).pop(0) - except TypeError: - pass return {self.min_name: loc_minv, self.max_name: loc_maxv} @@ -366,9 +359,10 @@ def _arg_check(self, args, size, interval): # Allow the specific case of max=min-1, which disables the loop if args[self.max_name] < args[self.min_name]-1: - raise InvalidArgument("Illegal %s=%d < %s=%d" - % (self.max_name, args[self.max_name], - self.min_name, args[self.min_name])) + raise InvalidArgument( + f'Illegal {self.max_name}={args[self.max_name]} < ' + f'{self.min_name}={args[self.min_name]}' + ) elif args[self.max_name] == args[self.min_name]-1: debug("%s=%d and %s=%d might cause no iterations along Dimension %s", self.min_name, args[self.min_name], @@ -645,7 +639,7 @@ def _interval(self): def _symbolic_thickness(self, **kwargs): kwargs = {'dtype': np.int32, 'is_const': True, 'nonnegative': True} - names = ["%s_%stkn" % (self.parent.name, s) for s in ('l', 'r')] + names = [f"{self.parent.name}_{s}tkn" for s in ('l', 'r')] return SubDimensionThickness(*[Thickness(name=n, **kwargs) for n in names]) @cached_property @@ -757,10 +751,12 @@ def _symbolic_thickness(self, thickness=None): kwargs = {'dtype': np.int32, 'is_const': True, 'nonnegative': True, 'root': self.root, 'local': self.local} - names = ["%s_%stkn" % (self.parent.name, s) for s in ('l', 'r')] + names = [f"{self.parent.name}_{s}tkn" for s in ('l', 'r')] sides = [LEFT, RIGHT] - return SubDimensionThickness(*[Thickness(name=n, side=s, value=t, **kwargs) - for n, s, t in zip(names, sides, thickness)]) + return SubDimensionThickness(*[ + Thickness(name=n, side=s, value=t, **kwargs) + for n, s, t in zip(names, sides, thickness, strict=True) + ]) @cached_property def _interval(self): @@ -934,7 +930,7 @@ def __init_finalize__(self, name, parent=None, factor=None, condition=None, elif is_number(factor): self._factor = int(factor) elif factor.is_Constant: - deprecations.constant_factor_warn + _ = deprecations.constant_factor_warn self._factor = factor else: raise ValueError("factor must be an integer") @@ -987,10 +983,8 @@ def free_symbols(self): retval = set(super().free_symbols) if self.condition is not None: retval |= self.condition.free_symbols - try: + with suppress(AttributeError): retval |= self.factor.free_symbols - except AttributeError: - pass return retval def _arg_values(self, interval, grid=None, args=None, **kwargs): @@ -1005,15 +999,11 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): toint = lambda x: math.ceil(x / fact) vals = {} - try: + with suppress(KeyError, TypeError): vals[self.min_name] = toint(kwargs.get(self.parent.min_name)) - except (KeyError, TypeError): - pass - try: + with suppress(KeyError, TypeError): vals[self.max_name] = toint(kwargs.get(self.parent.max_name)) - except (KeyError, TypeError): - pass vals[self.symbolic_factor.name] = fact @@ -1147,10 +1137,7 @@ def symbolic_min(self): @cached_property def symbolic_incr(self): - if self._incr is not None: - incr = self._incr - else: - incr = self.offset + incr = self._incr if self._incr is not None else self.offset if self.modulo is not None: incr = incr % self.modulo # Make sure we return a symbolic object as this point `incr` may well @@ -1370,19 +1357,22 @@ def _arg_check(self, args, *_args): # sub-BlockDimensions must be perfect divisors of their parent parent_value = args[self.parent.step.name] if parent_value % value > 0: - raise InvalidArgument("Illegal block size `%s=%d`: sub-block sizes " - "must divide the parent block size evenly (`%s=%d`)" - % (name, value, self.parent.step.name, - parent_value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: sub-block sizes ' + 'must divide the parent block size evenly ' + f'(`{self.parent.step.name}={parent_value}`)' + ) else: if value < 0: - raise InvalidArgument("Illegal block size `%s=%d`: it should be > 0" - % (name, value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: it should be > 0' + ) if value > args[self.root.max_name] - args[self.root.min_name] + 1: # Avoid OOB - raise InvalidArgument("Illegal block size `%s=%d`: it's greater than the " - "iteration range and it will cause an OOB access" - % (name, value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: it is greater than the ' + 'iteration range and it will cause an OOB access' + ) class CustomDimension(BasicDimension): @@ -1540,8 +1530,8 @@ class DynamicSubDimension(DynamicDimensionMixin, SubDimension): @classmethod def _symbolic_thickness(cls, name): - return (Scalar(name="%s_ltkn" % name, dtype=np.int32, nonnegative=True), - Scalar(name="%s_rtkn" % name, dtype=np.int32, nonnegative=True)) + return (Scalar(name=f"{name}_ltkn", dtype=np.int32, nonnegative=True), + Scalar(name=f"{name}_rtkn", dtype=np.int32, nonnegative=True)) class StencilDimension(BasicDimension): @@ -1571,13 +1561,13 @@ def __init_finalize__(self, name, _min, _max, spacing=1, step=1, self._spacing = sympy.sympify(spacing) if not is_integer(_min): - raise ValueError("Expected integer `min` (got %s)" % _min) + raise ValueError(f"Expected integer `min` (got {_min})") if not is_integer(_max): - raise ValueError("Expected integer `max` (got %s)" % _max) + raise ValueError(f"Expected integer `max` (got {_max})") if not is_integer(self._spacing): - raise ValueError("Expected integer `spacing` (got %s)" % self._spacing) + raise ValueError(f"Expected integer `spacing` (got {self._spacing})") if not is_integer(step): - raise ValueError("Expected integer `step` (got %s)" % step) + raise ValueError(f"Expected integer `step` (got {step})") self._min = int(_min) self._max = int(_max) @@ -1586,7 +1576,7 @@ def __init_finalize__(self, name, _min, _max, spacing=1, step=1, self._size = _max - _min + 1 if self._size < 1: - raise ValueError("Expected size greater than 0 (got %s)" % self._size) + raise ValueError(f"Expected size greater than 0 (got {self._size})") @property def step(self): @@ -1860,7 +1850,7 @@ def _separate_dims(cls, d0, d1, ofs_items): def dimensions(names, n=1): if n > 1: - return tuple(Dimension('%s%s' % (names, i)) for i in range(n)) + return tuple(Dimension(f'{names}{i}') for i in range(n)) else: assert type(names) is str return tuple(Dimension(i) for i in names.split()) diff --git a/devito/types/equation.py b/devito/types/equation.py index b1c918978d..3b6625c471 100644 --- a/devito/types/equation.py +++ b/devito/types/equation.py @@ -65,7 +65,7 @@ class Eq(sympy.Eq, Evaluable, Pickable): def __new__(cls, lhs, rhs=0, subdomain=None, coefficients=None, implicit_dims=None, **kwargs): if coefficients is not None: - deprecations.coeff_warn + _ = deprecations.coeff_warn kwargs['evaluate'] = False # Backward compatibility rhs = cls._apply_coeffs(rhs, coefficients) @@ -127,7 +127,7 @@ def _flatten(self): if self.lhs.is_Matrix: # Maps the Equations to retrieve the rhs from relevant lhs try: - eqs = dict(zip(self.lhs, self.rhs)) + eqs = dict(zip(self.lhs, self.rhs, strict=True)) except TypeError: # Same rhs for all lhs assert not self.rhs.is_Matrix @@ -183,7 +183,7 @@ def xreplace(self, rules): return self.func(self.lhs.xreplace(rules), self.rhs.xreplace(rules)) def __str__(self): - return "%s(%s, %s)" % (self.__class__.__name__, self.lhs, self.rhs) + return f"{self.__class__.__name__}({self.lhs}, {self.rhs})" __repr__ = __str__ @@ -198,7 +198,7 @@ class Reduction(Eq): is_Reduction = True def __str__(self): - return "%s(%s, %s)" % (self.__class__.__name__, self.lhs, self.rhs) + return f"{self.__class__.__name__}({self.lhs}, {self.rhs})" __repr__ = __str__ diff --git a/devito/types/grid.py b/devito/types/grid.py index f3e6c6b90b..e166dd1262 100644 --- a/devito/types/grid.py +++ b/devito/types/grid.py @@ -1,4 +1,3 @@ -from abc import ABC from collections import namedtuple from functools import cached_property from itertools import product @@ -27,7 +26,7 @@ GlobalLocal = namedtuple('GlobalLocal', 'glb loc') -class CartesianDiscretization(ABC): +class CartesianDiscretization: """ Abstract base class for objects representing discretizations of n-dimensional @@ -160,18 +159,22 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, ndim = len(shape) assert ndim <= 3 dim_names = self._default_dimensions[:ndim] - dim_spacing = tuple(Spacing(name='h_%s' % n, dtype=dtype, is_const=True) - for n in dim_names) - dimensions = tuple(SpaceDimension(name=n, spacing=s) - for n, s in zip(dim_names, dim_spacing)) + dim_spacing = tuple( + Spacing(name=f'h_{n}', dtype=dtype, is_const=True) + for n in dim_names + ) + dimensions = tuple( + SpaceDimension(name=n, spacing=s) + for n, s in zip(dim_names, dim_spacing, strict=True) + ) else: for d in dimensions: if not d.is_Space: - raise ValueError("Cannot create Grid with Dimension `%s` " - "since it's not a SpaceDimension" % d) + raise ValueError(f"Cannot create Grid with Dimension `{d}` " + "since it's not a SpaceDimension") if d.is_Derived and not d.is_Conditional: - raise ValueError("Cannot create Grid with derived Dimension `%s` " - "of type `%s`" % (d, type(d))) + raise ValueError(f"Cannot create Grid with derived Dimension `{d}` " + f"of type `{type(d)}`") dimensions = dimensions super().__init__(shape, dimensions, dtype) @@ -183,9 +186,8 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, if len(topology) == len(self.shape): self._topology = topology else: - warning("Ignoring the provided topology `%s` as it " - "is incompatible with the grid shape `%s`" % - (topology, self.shape)) + warning(f"Ignoring the provided topology `{topology}` as it " + f"is incompatible with the grid shape `{self.shape}`") self._topology = None else: self._topology = None @@ -198,7 +200,7 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, # The origin of the grid origin = as_tuple(origin or tuple(0. for _ in self.shape)) self._origin = tuple(dtype(o) for o in origin) - self._origin_symbols = tuple(Scalar(name='o_%s' % d.name, dtype=dtype, + self._origin_symbols = tuple(Scalar(name=f'o_{d.name}', dtype=dtype, is_const=True) for d in self.dimensions) @@ -212,22 +214,21 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, self._stepping_dim = SteppingDimension(name='t', parent=self.time_dim) elif isinstance(time_dimension, TimeDimension): self._time_dim = time_dimension - self._stepping_dim = SteppingDimension(name='%s_s' % self.time_dim.name, + self._stepping_dim = SteppingDimension(name=f'{self.time_dim.name}_s', parent=self.time_dim) else: raise ValueError("`time_dimension` must be None or of type TimeDimension") # Initialize SubDomains for legacy interface if subdomains is not None: - deprecations.subdomain_warn + _ = deprecations.subdomain_warn self._subdomains = tuple(i for i in (Domain(), Interior(), *as_tuple(subdomains))) for i in self._subdomains: i.__subdomain_finalize_legacy__(self) def __repr__(self): - return "Grid[extent=%s, shape=%s, dimensions=%s]" % ( - self.extent, self.shape, self.dimensions - ) + return 'Grid' + \ + f'[extent={self.extent}, shape={self.shape}, dimensions={self.dimensions}]' @property def extent(self): @@ -247,7 +248,7 @@ def origin_symbols(self): @property def origin_map(self): """Map between origin symbols and their values.""" - return dict(zip(self.origin_symbols, self.origin)) + return dict(zip(self.origin_symbols, self.origin, strict=True)) @property def origin_ioffset(self): @@ -259,8 +260,13 @@ def origin_ioffset(self): @property def origin_offset(self): """Physical offset of the local (per-process) origin from the domain origin.""" - return DimensionTuple(*[i*h for i, h in zip(self.origin_ioffset, self.spacing)], - getters=self.dimensions) + return DimensionTuple( + *[ + i*h + for i, h in zip(self.origin_ioffset, self.spacing, strict=True) + ], + getters=self.dimensions + ) @property def time_dim(self): @@ -302,7 +308,7 @@ def spacing_symbols(self): def spacing_map(self): """Map between spacing symbols and their values for each SpaceDimension.""" mapper = {} - for d, s in zip(self.dimensions, self.spacing): + for d, s in zip(self.dimensions, self.spacing, strict=True): if d.is_Conditional: # Special case subsampling: `Grid.dimensions` -> (xb, yb, zb)` # where `xb, yb, zb` are ConditionalDimensions whose parents @@ -313,7 +319,9 @@ def spacing_map(self): # the SpaceDimensions mapper[d.spacing] = s else: - assert False + raise AssertionError( + 'Cannot map between spacing symbol for SpaceDimension' + ) return mapper @@ -325,8 +333,10 @@ def shape_local(self): @property def size_map(self): """Map between SpaceDimensions and their global/local size.""" - return {d: GlobalLocal(g, l) - for d, g, l in zip(self.dimensions, self.shape, self.shape_local)} + return { + d: GlobalLocal(g, l) + for d, g, l in zip(self.dimensions, self.shape, self.shape_local, strict=True) + } @property def topology(self): @@ -463,7 +473,7 @@ def __hash__(self): return hash((self.name, self.dimensions, self.shape, self.dtype)) def __str__(self): - return "%s[%s%s]" % (self.__class__.__name__, self.name, self.dimensions) + return f"{self.__class__.__name__}[{self.name}{self.dimensions}]" __repr__ = __str__ @@ -528,15 +538,17 @@ def comm(self): """The MPI communicator inherited from the distributor.""" if self.grid: return self.grid.comm - raise ValueError("`SubDomain` %s has no `Grid` attached and thus no `comm`" - % self.name) + raise ValueError( + f'`SubDomain` {self.name} has no `Grid` attached and thus no `comm`' + ) def _arg_values(self, **kwargs): try: return self.grid._arg_values(**kwargs) - except AttributeError: - raise AttributeError("%s is not attached to a Grid and has no _arg_values" - % self) + except AttributeError as e: + raise AttributeError( + f'{self} is not attached to a Grid and has no _arg_values' + ) from e class SubDomain(AbstractSubDomain): @@ -604,8 +616,12 @@ def __subdomain_finalize_legacy__(self, grid): # Create the SubDomain's SubDimensions sub_dimensions = [] sdshape = [] - for k, v, s in zip(self.define(grid.dimensions).keys(), - self.define(grid.dimensions).values(), grid.shape): + for k, v, s in zip( + self.define(grid.dimensions).keys(), + self.define(grid.dimensions).values(), + grid.shape, + strict=True + ): if isinstance(v, Dimension): sub_dimensions.append(v) sdshape.append(s) @@ -624,11 +640,15 @@ def __subdomain_finalize_legacy__(self, grid): constructor = {'left': SubDimension.left, 'right': SubDimension.right}.get(side) if constructor is None: - raise ValueError(f"Expected sides 'left|right', not `{side}`") + raise ValueError( + f"Expected sides 'left|right', not `{side}`" + ) from None if s - thickness < 0: - raise ValueError(f"Maximum thickness of dimension {k.name} " - f"is {s}, not {thickness}") + raise ValueError( + f"Maximum thickness of dimension {k.name} " + f"is {s}, not {thickness}" + ) from None sub_dimensions.append(constructor(f'i{k.name}', k, thickness)) sdshape.append(thickness) @@ -643,8 +663,10 @@ def shape_local(self): @property def size_map(self): """Map between SpaceDimensions and their global/local size.""" - return {d: GlobalLocal(g, l) - for d, g, l in zip(self.dimensions, self.shape, self.shape_local)} + return { + d: GlobalLocal(g, l) + for d, g, l in zip(self.dimensions, self.shape, self.shape_local, strict=True) + } def define(self, dimensions): """ @@ -661,9 +683,10 @@ def define(self, dimensions): def _arg_names(self): try: ret = self.grid._arg_names - except AttributeError: - msg = f"{self} is not attached to a Grid and has no _arg_names" - raise AttributeError(msg) + except AttributeError as e: + raise AttributeError( + f'{self} is not attached to a Grid and has no _arg_names' + ) from e # Names for SubDomain thicknesses thickness_names = tuple([k.name for k in d._thickness_map] @@ -825,7 +848,7 @@ def __init__(self, **kwargs): super().__init__(**kwargs) try: - self.implicit_dimension + _ = self.implicit_dimension warning("`implicit_dimension` is deprecated. You may safely remove it " "from the class definition") except AttributeError: @@ -846,7 +869,7 @@ def __subdomain_finalize_core__(self, grid): shapes = [] for i in range(self._n_domains): dshape = [] - for s, m, M in zip(grid.shape, d_m, d_M): + for s, m, M in zip(grid.shape, d_m, d_M, strict=True): assert(m.size == M.size) dshape.append(s-m[i]-M[i]) shapes.append(as_tuple(dshape)) @@ -855,7 +878,7 @@ def __subdomain_finalize_core__(self, grid): if grid.distributor and grid.distributor.is_parallel: # Now create local bounds based on distributor processed = [] - for dec, m, M in zip(grid.distributor.decomposition, d_m, d_M): + for dec, m, M in zip(grid.distributor.decomposition, d_m, d_M, strict=True): processed.extend(self._bounds_glb_to_loc(dec, m, M)) self._local_bounds = as_tuple(processed) else: @@ -1086,7 +1109,7 @@ def _parse_border(border: BorderSpec, grid: Grid, raise ValueError(f"Length of {mode} specification should " "match number of dimensions") retval = [] - for b, d in zip(border, grid.dimensions): + for b, d in zip(border, grid.dimensions, strict=True): if isinstance(b, tuple): if not len(b) == 2: raise ValueError(f"{b}: more than two thicknesses supplied " @@ -1142,7 +1165,13 @@ def _build_domains_nooverlap(self, grid: Grid) -> tuple[int, tuple[np.ndarray]]: # Unpack the user-provided specification into a set of sides (on which # a cartesian product is taken) and a mapper from those sides to a set of # bounds for each dimension. - for d, s, b, i in zip(grid.dimensions, grid.shape, self.border, self.inset): + for d, s, b, i in zip( + grid.dimensions, + grid.shape, + self.border, + self.inset, + strict=True + ): if d in self.border_dims: side = self.border_dims[d] @@ -1173,12 +1202,14 @@ def _build_domains_nooverlap(self, grid: Grid) -> tuple[int, tuple[np.ndarray]]: maybe_domains = list(product(*domain_map.values())) domains = [] for d in maybe_domains: - if not all(i is CENTER for i in d): + if not all(i is CENTER for i in d): # noqa: SIM102 # Don't add any domains that are completely centered if self.corners != 'nocorners' or any(i is CENTER for i in d): # Don't add corners if 'no corners' option selected - domains.append([interval_map[dim][dom] for (dim, dom) - in zip(grid.dimensions, d)]) + domains.append([ + interval_map[dim][dom] + for (dim, dom) in zip(grid.dimensions, d, strict=True) + ]) domains = np.array(domains) @@ -1201,7 +1232,7 @@ class Domain(SubDomain): name = 'domain' def define(self, dimensions): - return dict(zip(dimensions, dimensions)) + return dict(zip(dimensions, dimensions, strict=True)) class Interior(SubDomain): diff --git a/devito/types/lazy.py b/devito/types/lazy.py index f4279327d9..3ac1b0540a 100644 --- a/devito/types/lazy.py +++ b/devito/types/lazy.py @@ -23,7 +23,10 @@ def _evaluate_maybe_nested(cls, maybe_evaluable, **kwargs): if maybe_evaluable.args: args = [Evaluable._evaluate_maybe_nested(i, **kwargs) for i in maybe_evaluable.args] - evaluate = not all(i is j for i, j in zip(args, maybe_evaluable.args)) + evaluate = not all( + i is j + for i, j in zip(args, maybe_evaluable.args, strict=True) + ) try: return maybe_evaluable.func(*args, evaluate=evaluate) except TypeError: @@ -52,7 +55,7 @@ def _evaluate(self, **kwargs): property `evaluate`. """ args = self._evaluate_args(**kwargs) - evaluate = not all(i is j for i, j in zip(args, self.args)) + evaluate = not all(i is j for i, j in zip(args, self.args, strict=True)) return self.func(*args, evaluate=evaluate) @cached_property diff --git a/devito/types/misc.py b/devito/types/misc.py index 0067d8b318..571197b717 100644 --- a/devito/types/misc.py +++ b/devito/types/misc.py @@ -109,7 +109,7 @@ def __new__(cls, base, *args, strides_map=None, accessor=None): return obj def __repr__(self): - return "%s(%s)" % (self.name, ", ".join(str(i) for i in self.indices)) + return "{}({})".format(self.name, ", ".join(str(i) for i in self.indices)) __str__ = __repr__ @@ -156,7 +156,10 @@ def bind(self, pname): macroargnames = [d.name for d in f.dimensions] macroargs = [MacroArgument(i) for i in macroargnames] - items = [m*strides_map[d] for m, d in zip(macroargs, f.dimensions[1:])] + items = [ + m*strides_map[d] + for m, d in zip(macroargs, f.dimensions[1:], strict=False) + ] items.append(MacroArgument(f.dimensions[-1].name)) define = DefFunction(pname, macroargnames) @@ -356,8 +359,10 @@ def __init__(self, opening, **kwargs): self.opening = opening def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, - 'OPEN' if self.opening else 'CLOSE') + return "{}({})".format( + self.__class__.__name__, + 'OPEN' if self.opening else 'CLOSE' + ) __str__ = __repr__ diff --git a/devito/types/object.py b/devito/types/object.py index 51790b35bc..4c30e6f8ff 100644 --- a/devito/types/object.py +++ b/devito/types/object.py @@ -1,3 +1,4 @@ +from contextlib import suppress from ctypes import byref import sympy @@ -207,11 +208,9 @@ def free_symbols(self): ret = set() ret.update(super().free_symbols) for i in self.cargs: - try: + with suppress(AttributeError): + # AttributeError with pure integers ret.update(i.free_symbols) - except AttributeError: - # E.g., pure integers - pass return ret @property diff --git a/devito/types/parallel.py b/devito/types/parallel.py index 89ebaab520..891eaad176 100644 --- a/devito/types/parallel.py +++ b/devito/types/parallel.py @@ -72,7 +72,7 @@ def _arg_defaults(self, **kwargs): try: npthreads = kwargs['metadata']['npthreads'] except KeyError: - raise InvalidArgument("Cannot determine `npthreads`") + raise InvalidArgument("Cannot determine `npthreads`") from None # If a symbolic object, it must be resolved if isinstance(npthreads, NPThreads): @@ -124,8 +124,9 @@ def _arg_values(self, **kwargs): if v < self.size: return {self.name: v} else: - raise InvalidArgument("Illegal `%s=%d`. It must be `%s<%d`" - % (self.name, v, self.name, self.size)) + raise InvalidArgument( + f'Illegal `{self.name}={v}`. It must be `{self.name}<{self.size}`' + ) else: return self._arg_defaults() @@ -251,10 +252,12 @@ def __init_finalize__(self, *args, **kwargs): dimensions = as_tuple(kwargs.get('dimensions')) if len(dimensions) != 1: - raise ValueError("Expected exactly one Dimension, got `%d`" % len(dimensions)) + raise ValueError( + f'Expected exactly one Dimension, got `{len(dimensions)}`' + ) d, = dimensions if not is_integer(d.symbolic_size): - raise ValueError("`%s` must have fixed size" % d) + raise ValueError(f"`{d}` must have fixed size") kwargs.setdefault('initvalue', np.full(d.symbolic_size, 2, dtype=np.int32)) super().__init_finalize__(*args, **kwargs) diff --git a/devito/types/sparse.py b/devito/types/sparse.py index cea1f65664..c319395419 100644 --- a/devito/types/sparse.py +++ b/devito/types/sparse.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from contextlib import suppress from functools import cached_property from itertools import product @@ -85,7 +86,7 @@ def __indices_setup__(cls, *args, **kwargs): except (KeyError, AttributeError): continue else: - sparse_dim = Dimension(name='p_%s' % kwargs["name"]) + sparse_dim = Dimension(name='p_{}'.format(kwargs["name"])) dimensions = as_tuple(kwargs.get('dimensions')) if not dimensions: @@ -119,7 +120,7 @@ def __shape_setup__(cls, **kwargs): else: loc_shape = [] assert len(dimensions) == len(shape) - for i, (d, s) in enumerate(zip(dimensions, shape)): + for i, (d, s) in enumerate(zip(dimensions, shape, strict=True)): if i == cls._sparse_position or \ (cls._sparse_position == -1 and i == len(dimensions)-1): loc_shape.append(glb_npoint[grid.distributor.myrank]) @@ -170,7 +171,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): return None # Shape and dimensions from args - name = '%s_%s' % (self.name, suffix) + name = f'{self.name}_{suffix}' if key is not None and not isinstance(key, SubFunction): key = np.array(key) @@ -209,8 +210,10 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): if shape != key.shape and \ key.shape != (shape[1],) and \ self._distributor.nprocs == 1: - raise ValueError("Incompatible shape for %s, `%s`; expected `%s`" % - (suffix, key.shape[:2], shape)) + raise ValueError( + f'Incompatible shape for {suffix}, `{key.shape[:2]}`;' + f'expected `{shape}`' + ) # Infer dtype if np.issubdtype(key.dtype.type, np.integer): @@ -221,10 +224,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): # Whether to initialize the subfunction with the provided data # Useful when rebuilding with a placeholder array only used to # infer shape and dtype and set the actual data later - if kwargs.get('init_subfunc', True): - init = {'initializer': key} - else: - init = {} + init = {'initializer': key} if kwargs.get('init_subfunc', True) else {} # Complex coordinates are not valid, so fall back to corresponding # real floating point type if dtype is complex. @@ -240,7 +240,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): # running with MPI and some processes get 0-size arrays after # domain decomposition. We "touch" the data anyway to avoid the # case ``self._data is None`` - sf.data + _ = sf.data return sf @@ -353,7 +353,7 @@ def coordinates_data(self): @cached_property def _pos_symbols(self): - return [Symbol(name='pos%s' % d, dtype=np.int32) + return [Symbol(name=f'pos{d}', dtype=np.int32) for d in self.grid.dimensions] @cached_property @@ -371,11 +371,16 @@ def _position_map(self): Symbols map for the physical position of the sparse points relative to the grid origin. """ - return OrderedDict([((c - o)/d.spacing, p) - for p, c, d, o in zip(self._pos_symbols, - self._coordinate_symbols, - self.grid.dimensions, - self.grid.origin_symbols)]) + return OrderedDict([ + ((c - o)/d.spacing, p) + for p, c, d, o in zip( + self._pos_symbols, + self._coordinate_symbols, + self.grid.dimensions, + self.grid.origin_symbols, + strict=True + ) + ]) @cached_property def dist_origin(self): @@ -415,7 +420,11 @@ def guard(self, expr=None): temps = self.interpolator._positions(self.dimensions) # Create positions and indices temporaries/indirections - for ((di, d), pos) in zip(enumerate(self.grid.dimensions), pmap.values()): + for d, pos in zip( + self.grid.dimensions, + pmap.values(), + strict=True + ): # Add conditional to avoid OOB lb = sympy.And(pos >= d.symbolic_min, evaluate=False) ub = sympy.And(pos <= d.symbolic_max, evaluate=False) @@ -482,7 +491,7 @@ def _dist_alltoall(self, dmap=None): # Per-rank shape of send/recv data sshape = [] rshape = [] - for s, r in zip(ssparse, rsparse): + for s, r in zip(ssparse, rsparse, strict=True): handle = list(self.shape) handle[self._sparse_position] = s sshape.append(tuple(handle)) @@ -586,7 +595,8 @@ def _dist_subfunc_scatter(self, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out + # via an Alltoallv sfuncd = subfunc.data._local[mask[self._sparse_position]] # Send out the sparse point SubFunction @@ -608,10 +618,8 @@ def _dist_data_gather(self, data): return # Compute dist map only once - try: + with suppress(AttributeError): data = self._C_as_ndarray(data) - except AttributeError: - pass dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) @@ -630,10 +638,8 @@ def _dist_data_gather(self, data): self._data[mask] = gathered[:] def _dist_subfunc_gather(self, sfuncd, subfunc): - try: + with suppress(AttributeError): sfuncd = subfunc._C_as_ndarray(sfuncd) - except AttributeError: - pass # If not using MPI, don't waste time if self._distributor.nprocs == 1 or self.is_local: return @@ -642,7 +648,8 @@ def _dist_subfunc_gather(self, sfuncd, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out + # via an Alltoallv if self.dist_origin[subfunc] is not None: sfuncd = sfuncd + np.array(self.dist_origin[subfunc], dtype=subfunc.dtype) @@ -695,7 +702,7 @@ def _arg_defaults(self, alias=None, estimate_memory=False): # self's local domain only for k, v in self._dist_scatter(alias=alias).items(): args[mapper[k].name] = v - for i, s in zip(mapper[k].indices, v.shape): + for i, s in zip(mapper[k].indices, v.shape, strict=True): args.update(i._arg_defaults(_min=0, size=s)) return args @@ -713,7 +720,7 @@ def _arg_values(self, estimate_memory=False, **kwargs): values = {} for k, v in self._dist_scatter(data=new).items(): values[k.name] = v - for i, s in zip(k.indices, v.shape): + for i, s in zip(k.indices, v.shape, strict=True): size = s - sum(k._size_nodomain[i]) values.update(i._arg_defaults(size=size)) else: @@ -729,7 +736,7 @@ def _arg_apply(self, dataobj, alias=None): key._dist_data_gather(dataobj) elif self._distributor.nprocs > 1: raise NotImplementedError("Don't know how to gather data from an " - "object of type `%s`" % type(key)) + f"object of type `{type(key)}`") class AbstractSparseTimeFunction(AbstractSparseFunction): @@ -1206,8 +1213,10 @@ def __init_finalize__(self, *args, **kwargs): if nr == r: r = r // 2 else: - raise ValueError("Interpolation coefficients shape %d do " - "not match specified radius %d" % (r, nr)) + raise ValueError( + f'Interpolation coefficients shape {r} do not match' + f'specified radius {nr}' + ) self._radius = r self._dist_origin.update({self._interpolation_coeffs: None}) @@ -1227,9 +1236,14 @@ def _coordinate_symbols(self): """Symbol representing the coordinate values in each Dimension.""" if self.gridpoints is not None: d_dim = self.gridpoints.dimensions[1] - return tuple([self.gridpoints._subs(d_dim, di) * d.spacing + o - for ((di, d), o) in zip(enumerate(self.grid.dimensions), - self.grid.origin)]) + return tuple([ + self.gridpoints._subs(d_dim, di) * d.spacing + o + for ((di, d), o) in zip( + enumerate(self.grid.dimensions), + self.grid.origin, + strict=True + ) + ]) else: d_dim = self.coordinates.dimensions[1] return tuple([self.coordinates._subs(d_dim, i) @@ -1252,9 +1266,14 @@ def _position_map(self): """ if self.gridpoints_data is not None: ddim = self.gridpoints.dimensions[-1] - return OrderedDict((self.gridpoints._subs(ddim, di), p) - for (di, p) in zip(range(self.grid.dim), - self._pos_symbols)) + return OrderedDict( + (self.gridpoints._subs(ddim, di), p) + for (di, p) in zip( + range(self.grid.dim), + self._pos_symbols, + strict=True + ) + ) else: return super()._position_map @@ -1453,11 +1472,11 @@ def __init_finalize__(self, *args, **kwargs): # Validate radius is set correctly for all grid Dimensions for d in self.grid.dimensions: if d not in r: - raise ValueError("dimension %s not specified in r mapping" % d) + raise ValueError(f"dimension {d} not specified in r mapping") if r[d] is None: continue if not is_integer(r[d]) or r[d] <= 0: - raise ValueError('invalid parameter value r[%s] = %s' % (d, r[d])) + raise ValueError(f'invalid parameter value r[{d}] = {r[d]}') # TODO is this going to cause some trouble with users of self.r? self._radius = r @@ -1476,10 +1495,10 @@ def __init_finalize__(self, *args, **kwargs): # Sources have their own Dimension # As do Locations - locdim = Dimension('loc_%s' % self.name) + locdim = Dimension(f'loc_{self.name}') self._gridpoints = SubFunction( - name="%s_gridpoints" % self.name, + name=f"{self.name}_gridpoints", dtype=np.int32, dimensions=(locdim, ddim), shape=(nloc, self.grid.dim), @@ -1494,7 +1513,7 @@ def __init_finalize__(self, *args, **kwargs): for d in self.grid.dimensions: if self._radius[d] is not None: rdim = DefaultDimension( - name='r%s_%s' % (d.name, self.name), + name=f'r{d.name}_{self.name}', default_value=self._radius[d] ) self.rdims.append(rdim) @@ -1505,7 +1524,7 @@ def __init_finalize__(self, *args, **kwargs): coeff_shape = self.grid.size_map[d].glb self.interpolation_coefficients[d] = SubFunction( - name="%s_coefficients_%s" % (self.name, d.name), + name=f"{self.name}_coefficients_{d.name}", dtype=self.dtype, dimensions=(locdim, coeff_dim), shape=(nloc, coeff_shape), @@ -1515,7 +1534,7 @@ def __init_finalize__(self, *args, **kwargs): # For the _sub_functions, these must be named attributes of # this SparseFunction object setattr( - self, "coefficients_%s" % d.name, + self, f"coefficients_{d.name}", self.interpolation_coefficients[d]) # We also need arrays to represent the sparse matrix map @@ -1523,7 +1542,7 @@ def __init_finalize__(self, *args, **kwargs): # constructing the expression, # - the mpi logic dynamically constructs arrays to feed to the # operator C code. - self.nnzdim = Dimension('nnz_%s' % self.name) + self.nnzdim = Dimension(f'nnz_{self.name}') # In the non-MPI case, at least, we should fill these in once if self._distributor.nprocs == 1: @@ -1533,7 +1552,7 @@ def __init_finalize__(self, *args, **kwargs): nnz_size = 1 self._mrow = DynamicSubFunction( - name='mrow_%s' % self.name, + name=f'mrow_{self.name}', dtype=np.int32, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1542,7 +1561,7 @@ def __init_finalize__(self, *args, **kwargs): allocator=self._allocator, ) self._mcol = DynamicSubFunction( - name='mcol_%s' % self.name, + name=f'mcol_{self.name}', dtype=np.int32, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1551,7 +1570,7 @@ def __init_finalize__(self, *args, **kwargs): allocator=self._allocator, ) self._mval = DynamicSubFunction( - name='mval_%s' % self.name, + name=f'mval_{self.name}', dtype=self.dtype, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1564,12 +1583,12 @@ def __init_finalize__(self, *args, **kwargs): # coordinate of the parallelised injection Dimension # This takes the form of a list of nnz indices, and a start/end # position in that list for each index in the parallel dim - self.par_dim_to_nnz_dim = DynamicDimension('par_dim_to_nnz_%s' % self.name) + self.par_dim_to_nnz_dim = DynamicDimension(f'par_dim_to_nnz_{self.name}') # This map acts as an indirect sort of the sources according to their # position along the parallelisation dimension self._par_dim_to_nnz_map = DynamicSubFunction( - name='par_dim_to_nnz_map_%s' % self.name, + name=f'par_dim_to_nnz_map_{self.name}', dtype=np.int32, dimensions=(self.par_dim_to_nnz_dim,), # shape is unknown at this stage @@ -1578,7 +1597,7 @@ def __init_finalize__(self, *args, **kwargs): parent=self, ) self._par_dim_to_nnz_m = DynamicSubFunction( - name='par_dim_to_nnz_m_%s' % self.name, + name=f'par_dim_to_nnz_m_{self.name}', dtype=np.int32, dimensions=(self._par_dim,), # shape is unknown at this stage @@ -1587,7 +1606,7 @@ def __init_finalize__(self, *args, **kwargs): parent=self, ) self._par_dim_to_nnz_M = DynamicSubFunction( - name='par_dim_to_nnz_M_%s' % self.name, + name=f'par_dim_to_nnz_M_{self.name}', dtype=np.int32, dimensions=(self._par_dim,), # shape is unknown at this stage @@ -1662,7 +1681,7 @@ def par_dim_to_nnz_M(self): @property def _sub_functions(self): return ('gridpoints', - *['coefficients_%s' % d.name for d in self.grid.dimensions], + *[f'coefficients_{d.name}' for d in self.grid.dimensions], 'mrow', 'mcol', 'mval', 'par_dim_to_nnz_map', 'par_dim_to_nnz_m', 'par_dim_to_nnz_M') @@ -1836,7 +1855,7 @@ def __shape_setup__(cls, **kwargs): def _arg_names(self): """Return a tuple of argument names introduced by this function.""" return tuple([self.name, self.name + "_" + self.gridpoints.name] - + ['%s_%s' % (self.name, x.name) + + [f'{self.name}_{x.name}' for x in self.interpolation_coefficients.values()]) @property @@ -1937,9 +1956,10 @@ def _rank_to_points(self): # so we argsort inverse_argsort = np.argsort(inverse).astype(np.int32) cumulative_counts = np.cumsum(counts) - gp_map = {tuple(bi): inverse_argsort[cci-ci:cci] - for bi, cci, ci in zip(bins, cumulative_counts, counts) - } + gp_map = { + tuple(bi): inverse_argsort[cci-ci:cci] + for bi, cci, ci in zip(bins, cumulative_counts, counts, strict=True) + } # the result is now going to be a concatenation of these lists # for each of the output ranks @@ -1957,8 +1977,10 @@ def _rank_to_points(self): from itertools import product for bi in bins: # This is a list of sets for the Dimension-specific rank - dim_rank_sets = [dgdr[bii] - for dgdr, bii in zip(dim_group_dim_rank, bi)] + dim_rank_sets = [ + dgdr[bii] + for dgdr, bii in zip(dim_group_dim_rank, bi, strict=True) + ] # Convert these to an absolute rank # This is where we will throw a KeyError if there are points OOB @@ -1970,9 +1992,12 @@ def _rank_to_points(self): empty = np.array([], dtype=np.int32) - return [np.concatenate(( - empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])])) - for rank in range(distributor.comm.Get_size())] + return [ + np.concatenate( + (empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])]) + ) + for rank in range(distributor.comm.Get_size()) + ] def _build_par_dim_to_nnz(self, active_gp, active_mrow): # The case where we parallelise over a non-local index is suboptimal, but @@ -2084,7 +2109,7 @@ def manual_scatter(self, *, data_all_zero=False): # handle None radius r_tuple_no_none = tuple( ri if ri is not None else self.grid.size_map[d].glb - for ri, d in zip(r_tuple, self.grid.dimensions) + for ri, d in zip(r_tuple, self.grid.dimensions, strict=True) ) # now all ranks can allocate the buffers to receive into @@ -2134,7 +2159,7 @@ def manual_scatter(self, *, data_all_zero=False): # first, build a reduced matrix excluding any points outside our domain for idim, (dim, mycoord) in enumerate(zip( - self.grid.dimensions, distributor.mycoords)): + self.grid.dimensions, distributor.mycoords, strict=True)): _left = distributor.decomposition[idim][mycoord][0] _right = distributor.decomposition[idim][mycoord][-1] + 1 @@ -2158,7 +2183,7 @@ def manual_scatter(self, *, data_all_zero=False): # domain. Do this on all the gridpoints for now, since this is a hack # anyway for idim, (dim, mycoord) in enumerate(zip( - self.grid.dimensions, distributor.mycoords)): + self.grid.dimensions, distributor.mycoords, strict=True)): _left = distributor.decomposition[idim][mycoord][0] _right = distributor.decomposition[idim][mycoord][-1] + 1 @@ -2217,7 +2242,7 @@ def _arg_apply(self, dataobj, *subfuncs, alias=None): key._dist_gather(self._C_as_ndarray(dataobj)) elif self._distributor.nprocs > 1: raise NotImplementedError("Don't know how to gather data from an " - "object of type `%s`" % type(key)) + f"object of type `{type(key)}`") def manual_gather(self): # data, in this case, is set to whatever dist_scatter provided? diff --git a/devito/types/tensor.py b/devito/types/tensor.py index 7a3f41d3e0..432d8a4437 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -154,8 +154,10 @@ def __getattr__(self, name): return super().__getattr__(self, name) try: return self.applyfunc(lambda x: x if x == 0 else getattr(x, name)) - except: - raise AttributeError("%r object has no attribute %r" % (self.__class__, name)) + except Exception as e: + raise AttributeError( + f'{self.__class__!r} object has no attribute {name!r}' + ) from e def _eval_at(self, func): """ @@ -250,7 +252,7 @@ def div(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (ndim, ndim)) order = order or self.space_order for i in range(len(self.space_dimensions)): - comps.append(sum([getattr(self[j, i], 'd%s' % d.name) + comps.append(sum([getattr(self[j, i], f'd{d.name}') (x0=shift_x0(shift, d, i, j), fd_order=order, method=method, side=side, w=w) for j, d in enumerate(space_dims)])) @@ -296,7 +298,7 @@ def laplacian(self, shift=None, order=None, method='FD', side=None, **kwargs): ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim)) for j in range(ndim): - comps.append(sum([getattr(self[j, i], 'd%s2' % d.name) + comps.append(sum([getattr(self[j, i], f'd{d.name}2') (x0=shift_x0(shift, d, j, i), fd_order=order, method=method, side=side, w=w) for i, d in enumerate(space_dims)])) @@ -361,8 +363,8 @@ def __subfunc_setup__(cls, *args, **kwargs): # Custom repr and str def __str__(self): - st = ''.join([' %-2s,' % c for c in self])[1:-1] - return "Vector(%s)" % st + st = ''.join([' %-2s,' % c for c in self])[1:-1] # noqa: UP031 + return f"Vector({st})" __repr__ = __str__ @@ -390,10 +392,15 @@ def div(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order space_dims = self.root_dimensions - return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method, - side=side, w=w) - for i, d in enumerate(space_dims)]) + return sum([ + getattr(self[i], f'd{d.name}')( + x0=shift_x0(shift, d, None, i), + fd_order=order, + method=method, + side=side, + w=w + ) for i, d in enumerate(space_dims) + ]) @property def laplace(self): @@ -427,11 +434,17 @@ def laplacian(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order space_dims = self.root_dimensions - comps = [sum([getattr(s, 'd%s2' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method, - side=side, w=w) - for i, d in enumerate(space_dims)]) - for s in self] + comps = [ + sum([ + getattr(s, f'd{d.name}2')( + x0=shift_x0(shift, d, None, i), + fd_order=order, + side=side, + w=w, + method=method + ) for i, d in enumerate(space_dims) + ]) for s in self + ] return func._new(comps) def curl(self, shift=None, order=None, method='FD', side=None, **kwargs): @@ -459,7 +472,7 @@ def curl(self, shift=None, order=None, method='FD', side=None, **kwargs): # The curl of a VectorFunction is a VectorFunction w = kwargs.get('weights', kwargs.get('w')) dims = self.root_dimensions - derivs = ['d%s' % d.name for d in dims] + derivs = [f'd{d.name}' for d in dims] shift_x0 = make_shift_x0(shift, (len(dims), len(dims))) order = order or self.space_order comp1 = (getattr(self[2], derivs[1])(x0=shift_x0(shift, dims[1], 2, 1), @@ -509,11 +522,17 @@ def grad(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (ndim, ndim)) order = order or self.space_order space_dims = self.root_dimensions - comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j), - fd_order=order, method=method, - side=side, w=w) - for j, d in enumerate(space_dims)] - for i, f in enumerate(self)] + comps = [ + [ + getattr(f, f'd{d.name}')( + x0=shift_x0(shift, d, i, j), + side=side, + w=w, + fd_order=order, + method=method + ) for j, d in enumerate(space_dims) + ] for i, f in enumerate(self) + ] return func._new(comps) def outer(self, other): diff --git a/devito/types/utils.py b/devito/types/utils.py index 25b697f2bd..fd0dfdb750 100644 --- a/devito/types/utils.py +++ b/devito/types/utils.py @@ -88,7 +88,7 @@ def __init__(self, suffix=''): self.suffix = suffix def __repr__(self): - return "Layer<%s>" % self.suffix + return f"Layer<{self.suffix}>" def __eq__(self, other): return (isinstance(other, HierarchyLayer) and diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index 4079423a32..192532bf5a 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -295,6 +295,8 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Install pyzfp package in the current Jupyter kernel\n", + "import sys\n", + "_ = sys.executable\n", "!{sys.executable} -m pip install blosc\n", "import blosc" ] diff --git a/requirements-testing.txt b/requirements-testing.txt index 464dbc15d3..b3d7f3db49 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -9,3 +9,4 @@ click<9.0 cloudpickle<3.1.3 ipympl<0.9.9 ipykernel<7.0.0 +pytest-timeout diff --git a/tests/test_data.py b/tests/test_data.py index f7a30b2bd9..62efb93739 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -666,7 +666,8 @@ def test_getitem(self, mode): assert np.all(result[3] == [[3, 2, 1, 0]]) result1 = np.array(f.data[5, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result1.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result1 == [[46, 45]]) @@ -674,7 +675,8 @@ def test_getitem(self, mode): assert np.all(result1 == [[44, 43, 42]]) result2 = np.array(f.data[6:4:-1, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result2.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result2[0] == [[54, 53]]) @@ -684,7 +686,8 @@ def test_getitem(self, mode): assert np.all(result2[1] == [[44, 43, 42]]) result3 = np.array(f.data[6:4:-1, 2:7]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result3.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result3[0] == [[50, 51]]) @@ -779,7 +782,8 @@ def test_setitem(self, mode): [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y] or RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: + elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y] \ + or RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(g.data)) == 0 else: assert np.all(np.array(g.data)) == 0 @@ -914,7 +918,9 @@ def test_niche_slicing(self, mode): t.data[:] = b tdat0 = np.array(f.data[-2::, -2::]) - if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0] or LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0] or RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: + if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0] \ + or LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0] \ + or RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: assert tdat0.size == 0 else: assert np.all(tdat0 == [[54, 55], @@ -1080,7 +1086,8 @@ def test_neg_start_stop(self, mode): h.data[8:10, 0:4] = f.data[slices] - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert np.count_nonzero(h.data[:]) == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(h.data) == [[0, 0, 0, 0, 0, 0], diff --git a/tests/test_dimension.py b/tests/test_dimension.py index d72d3404a2..b27b727eff 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -1809,9 +1809,12 @@ def test_issue_2007(self): # proxy integral f.data[:] = np.array(freq[:]) # Proxy Fourier integral holder - u_re = Function(name="u_re", grid=grid, - dimensions=(freq_dim,) + u.indices[1:], - shape=(nfreq,) + u.shape[1:]) + u_re = Function( + name="u_re", + grid=grid, + dimensions=(freq_dim,) + u.indices[1:], + shape=(nfreq,) + u.shape[1:] + ) # ConditionalDimension based on `f` to simulate bounds of Fourier integral ct = ConditionalDimension(name="ct", parent=time, condition=Ge(time, f)) From 5306fb9e506bea240a7f79c03a7ef48bdad12f04 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Sun, 4 Jan 2026 21:26:34 +0000 Subject: [PATCH 28/42] misc: Automated linting fixes in examples, tests, scripts --- benchmarks/user/advisor/advisor_logging.py | 8 +- benchmarks/user/advisor/roofline.py | 2 +- benchmarks/user/advisor/run_advisor.py | 4 +- benchmarks/user/benchmark.py | 30 +- conftest.py | 21 +- examples/cfd/01_convection.ipynb | 16 +- examples/cfd/01_convection_revisited.ipynb | 14 +- examples/cfd/02_convection_nonlinear.ipynb | 16 +- examples/cfd/03_diffusion.ipynb | 48 +- examples/cfd/03_diffusion_nonuniform.ipynb | 57 +- examples/cfd/04_burgers.ipynb | 33 +- examples/cfd/05_laplace.ipynb | 10 +- examples/cfd/06_poisson.ipynb | 32 +- examples/cfd/07_cavity_flow.ipynb | 152 +++--- examples/cfd/08_shallow_water_equation.ipynb | 118 ++--- examples/cfd/09_Darcy_flow_equation.ipynb | 41 +- examples/compiler/01_data_regions.ipynb | 4 +- examples/compiler/02_indexification.ipynb | 6 +- examples/compiler/04_iet-B.ipynb | 15 +- examples/finance/bs_ivbp.ipynb | 95 ++-- .../performance/02_advisor_roofline.ipynb | 6 +- .../seismic/abc_methods/01_introduction.ipynb | 139 ++--- examples/seismic/abc_methods/02_damping.ipynb | 236 +++++---- examples/seismic/abc_methods/03_pml.ipynb | 322 +++++------ examples/seismic/abc_methods/04_habc.ipynb | 500 +++++++++--------- examples/seismic/acoustic/accuracy.ipynb | 86 ++- examples/seismic/model.py | 15 +- .../sa_01_iso_implementation1.ipynb | 108 ++-- .../sa_02_iso_implementation2.ipynb | 104 ++-- .../self_adjoint/sa_03_iso_correctness.ipynb | 36 +- .../self_adjoint/test_wavesolver_iso.py | 3 +- examples/seismic/self_adjoint/utils.py | 12 +- examples/seismic/source.py | 3 +- examples/seismic/tti/operators.py | 5 +- examples/seismic/tti/tti_example.py | 10 +- examples/seismic/tti/wavesolver.py | 5 +- examples/seismic/tutorials/01_modelling.ipynb | 14 +- examples/seismic/tutorials/02_rtm.ipynb | 13 +- examples/seismic/tutorials/03_fwi.ipynb | 28 +- examples/seismic/tutorials/04_dask.ipynb | 12 +- .../seismic/tutorials/04_dask_pickling.ipynb | 53 +- .../tutorials/05_staggered_acoustic.ipynb | 10 +- examples/seismic/tutorials/06_elastic.ipynb | 41 +- .../06_elastic_varying_parameters.ipynb | 48 +- .../tutorials/07.1_dispersion_relation.ipynb | 59 ++- .../seismic/tutorials/07_DRP_schemes.ipynb | 42 +- .../seismic/tutorials/08_snapshotting.ipynb | 50 +- .../seismic/tutorials/09_viscoelastic.ipynb | 52 +- .../seismic/tutorials/10_nmo_correction.ipynb | 41 +- .../seismic/tutorials/11_viscoacoustic.ipynb | 31 +- .../seismic/tutorials/12_time_blocking.ipynb | 125 +++-- .../seismic/tutorials/13_LSRTM_acoustic.ipynb | 132 +++-- .../tutorials/14_creating_synthetics.ipynb | 1 + .../seismic/tutorials/15_tti_qp_pure.ipynb | 88 +-- examples/seismic/tutorials/16_ader_fd.ipynb | 22 +- .../seismic/tutorials/17_fourier_mode.ipynb | 18 +- examples/seismic/utils.py | 9 +- examples/timestepping/superstep.ipynb | 9 +- examples/userapi/02_apply.ipynb | 14 +- examples/userapi/03_subdomains.ipynb | 60 ++- examples/userapi/04_boundary_conditions.ipynb | 10 + .../userapi/05_conditional_dimension.ipynb | 32 +- examples/userapi/06_sparse_operations.ipynb | 60 +-- .../userapi/07_functions_on_subdomains.ipynb | 45 +- scripts/gen_sympy_funcs.py | 10 +- tests/test_adjoint.py | 6 +- tests/test_benchmark.py | 4 +- tests/test_cinterface.py | 2 +- tests/test_derivatives.py | 12 +- tests/test_dle.py | 4 +- tests/test_docstrings.py | 2 +- tests/test_dse.py | 5 +- tests/test_dtypes.py | 9 +- tests/test_gpu_openacc.py | 4 +- tests/test_gpu_openmp.py | 32 +- tests/test_gradient.py | 8 +- tests/test_ir.py | 18 +- tests/test_mpi.py | 10 +- tests/test_operator.py | 7 +- tests/test_pickle.py | 5 +- tests/test_symbolic_coefficients.py | 5 +- tests/test_tensors.py | 8 +- tests/test_tti.py | 2 +- 83 files changed, 1824 insertions(+), 1760 deletions(-) diff --git a/benchmarks/user/advisor/advisor_logging.py b/benchmarks/user/advisor/advisor_logging.py index af49ee9258..04ef2ed740 100644 --- a/benchmarks/user/advisor/advisor_logging.py +++ b/benchmarks/user/advisor/advisor_logging.py @@ -9,18 +9,18 @@ def check(cond, msg): def err(msg): - print('\033[1;37;31m%s\033[0m' % msg) # print in RED + print(f'\033[1;37;31m{msg}\033[0m') # print in RED def log(msg): - print('\033[1;37;32m%s\033[0m' % msg) # print in GREEN + print(f'\033[1;37;32m{msg}\033[0m') # print in GREEN @contextmanager def progress(msg): - print('\033[1;37;32m%s ... \033[0m' % msg, end='', flush=True) # print in GREEN + print(f'\033[1;37;32m{msg} ... \033[0m', end='', flush=True) # print in GREEN yield - print('\033[1;37;32m%s\033[0m' % 'Done!') + print('\033[1;37;32m{}\033[0m'.format('Done!')) def log_process(process, logger): diff --git a/benchmarks/user/advisor/roofline.py b/benchmarks/user/advisor/roofline.py index 12578fda92..83f7526a22 100644 --- a/benchmarks/user/advisor/roofline.py +++ b/benchmarks/user/advisor/roofline.py @@ -205,7 +205,7 @@ def roofline(name, project, scale, precision, mode, th): log(f'\nFigure saved in {figpath}{name}.pdf.') # Save the JSON file - with open('%s.json' % name, 'w') as f: + with open(f'{name}.json', 'w') as f: f.write(json.dumps(roofline_data)) log(f'\nJSON file saved as {name}.json.') diff --git a/benchmarks/user/advisor/run_advisor.py b/benchmarks/user/advisor/run_advisor.py index b3cef8387a..23fdaa1f75 100644 --- a/benchmarks/user/advisor/run_advisor.py +++ b/benchmarks/user/advisor/run_advisor.py @@ -132,7 +132,7 @@ def run_with_advisor(path, output, name, exec_args): # Before collecting the `survey` and `tripcounts` a "pure" python run # to warmup the jit cache is preceded - log('Starting Intel Advisor\'s `roofline` analysis for `%s`' % name) + log(f'Starting Intel Advisor\'s `roofline` analysis for `{name}`') dt = datetime.datetime.now() # Set up a file logger that will track the output of the advisor profiling @@ -142,7 +142,7 @@ def run_with_advisor(path, output, name, exec_args): advixe_formatter = logging.Formatter('%(asctime)s: %(message)s') logger_datetime = '%d.%d.%d.%d.%d.%d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) - advixe_handler = logging.FileHandler('%s/%s_%s.log' % (output, name, logger_datetime)) + advixe_handler = logging.FileHandler(f'{output}/{name}_{logger_datetime}.log') advixe_handler.setFormatter(advixe_formatter) advixe_logger.addHandler(advixe_handler) diff --git a/benchmarks/user/benchmark.py b/benchmarks/user/benchmark.py index 4439ad6066..4773f94dbb 100644 --- a/benchmarks/user/benchmark.py +++ b/benchmarks/user/benchmark.py @@ -74,7 +74,7 @@ def run_op(solver, operator, **options): try: op = getattr(solver, operator) except AttributeError: - raise AttributeError("Operator %s not implemented for %s" % (operator, solver)) + raise AttributeError(f"Operator {operator} not implemented for {solver}") # This is a bit ugly but not sure how to make clean input creation for different op if operator == "forward": @@ -95,7 +95,7 @@ def run_op(solver, operator, **options): args = args[:-1] return op(*args, **options) else: - raise ValueError("Unrecognized operator %s" % operator) + raise ValueError(f"Unrecognized operator {operator}") @click.group() @@ -157,15 +157,14 @@ def from_opt(ctx, param, value): # E.g., `('advanced', {'par-tile': True})` value = eval(value) if not isinstance(value, tuple) and len(value) >= 1: - raise click.BadParameter("Invalid choice `%s` (`opt` must be " - "either str or tuple)" % str(value)) + raise click.BadParameter(f"Invalid choice `{str(value)}` (`opt` must be " + "either str or tuple)") opt = value[0] except NameError: # E.g. `'advanced'` opt = value if opt not in configuration._accepted['opt']: - raise click.BadParameter("Invalid choice `%s` (choose from %s)" - % (opt, str(configuration._accepted['opt']))) + raise click.BadParameter("Invalid choice `{}` (choose from {})".format(opt, str(configuration._accepted['opt']))) return value def config_blockshape(ctx, param, value): @@ -188,11 +187,11 @@ def config_blockshape(ctx, param, value): levels = [bs[x:x+3] for x in range(0, len(bs), 3)] if any(len(level) != 3 for level in levels): raise ValueError("Expected 3 entries per block shape level, but got " - "one level with less than 3 entries (`%s`)" % levels) + f"one level with less than 3 entries (`{levels}`)") normalized_value.append(levels) if not all_equal(len(i) for i in normalized_value): raise ValueError("Found different block shapes with incompatible " - "number of levels (`%s`)" % normalized_value) + f"number of levels (`{normalized_value}`)") configuration['opt-options']['blocklevels'] = len(normalized_value[0]) else: normalized_value = [] @@ -205,8 +204,7 @@ def config_autotuning(ctx, param, value): elif value != 'off': # Sneak-peek at the `block-shape` -- if provided, keep auto-tuning off if ctx.params['block_shape']: - warning("Skipping autotuning (using explicit block-shape `%s`)" - % str(ctx.params['block_shape'])) + warning("Skipping autotuning (using explicit block-shape `{}`)".format(str(ctx.params['block_shape']))) level = False else: # Make sure to always run in preemptive mode @@ -305,11 +303,11 @@ def run(problem, **kwargs): dumpfile = kwargs.pop('dump_norms') if dumpfile: - norms = ["'%s': %f" % (i.name, norm(i)) for i in retval[:-1] + norms = [f"'{i.name}': {norm(i):f}" for i in retval[:-1] if isinstance(i, DiscreteFunction)] if rank == 0: with open(dumpfile, 'w') as f: - f.write("{%s}" % ', '.join(norms)) + f.write("{{{}}}".format(', '.join(norms))) return retval @@ -343,13 +341,13 @@ def run_jit_backdoor(problem, **kwargs): op = solver.op_fwd() # Get the filename in the JIT cache - cfile = "%s.c" % str(op._compiler.get_jit_dir().joinpath(op._soname)) + cfile = f"{str(op._compiler.get_jit_dir().joinpath(op._soname))}.c" if not os.path.exists(cfile): # First time we run this problem, let's generate and jit-compile code op.cfunction - info("You may now edit the generated code in `%s`. " - "Then save the file, and re-run this benchmark." % cfile) + info(f"You may now edit the generated code in `{cfile}`. " + "Then save the file, and re-run this benchmark.") return info("Running wave propagation Operator...") @@ -364,7 +362,7 @@ def _run_jit_backdoor(): if dumpnorms: for i in retval[:-1]: if isinstance(i, DiscreteFunction): - info("'%s': %f" % (i.name, norm(i))) + info(f"'{i.name}': {norm(i):f}") return retval diff --git a/conftest.py b/conftest.py index 23581cd74b..7933978f1a 100644 --- a/conftest.py +++ b/conftest.py @@ -40,29 +40,28 @@ def skipif(items, whole_module=False): accepted.update({'nodevice', 'noomp'}) unknown = sorted(set(items) - accepted) if unknown: - raise ValueError("Illegal skipif argument(s) `%s`" % unknown) + raise ValueError(f"Illegal skipif argument(s) `{unknown}`") skipit = False for i in items: # Skip if won't run on GPUs if i == 'device' and isinstance(configuration['platform'], Device): - skipit = "device `%s` unsupported" % configuration['platform'].name + skipit = "device `{}` unsupported".format(configuration['platform'].name) break # Skip if won't run on a specific GPU backend langs = configuration._accepted['language'] - if any(i == 'device-%s' % l and configuration['language'] == l for l in langs)\ + if any(i == f'device-{l}' and configuration['language'] == l for l in langs)\ and isinstance(configuration['platform'], Device): - skipit = "language `%s` for device unsupported" % configuration['language'] + skipit = "language `{}` for device unsupported".format(configuration['language']) break - if any(i == 'device-%s' % k and isinstance(configuration['compiler'], v) + if any(i == f'device-{k}' and isinstance(configuration['compiler'], v) for k, v in compiler_registry.items()) and\ isinstance(configuration['platform'], Device): - skipit = "compiler `%s` for device unsupported" % configuration['compiler'] + skipit = "compiler `{}` for device unsupported".format(configuration['compiler']) break # Skip if must run on GPUs but not currently on a GPU if i in ('nodevice', 'nodevice-omp', 'nodevice-acc') and\ not isinstance(configuration['platform'], Device): - skipit = ("must run on device, but currently on `%s`" % - configuration['platform'].name) + skipit = ("must run on device, but currently on `{}`".format(configuration['platform'].name)) break # Skip if it won't run with nvc on CPU backend if i == 'cpu64-nvc' and \ @@ -137,9 +136,9 @@ def EVAL(exprs, *args): def get_testname(item): if item.cls is not None: - return "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name) + return f"{item.fspath}::{item.cls.__name__}::{item.name}" else: - return "%s::%s" % (item.fspath, item.name) + return f"{item.fspath}::{item.name}" def set_run_reset(env_vars, call): @@ -179,7 +178,7 @@ def parallel(item, m): if len(m) == 2: nprocs, scheme = m else: - raise ValueError("Can't run test: unexpected mode `%s`" % m) + raise ValueError(f"Can't run test: unexpected mode `{m}`") env_vars = {'DEVITO_MPI': scheme} diff --git a/examples/cfd/01_convection.ipynb b/examples/cfd/01_convection.ipynb index 7a7fe3b6c6..8e4cf8e8e4 100644 --- a/examples/cfd/01_convection.ipynb +++ b/examples/cfd/01_convection.ipynb @@ -52,7 +52,7 @@ "c = 1.\n", "dx = 2. / (nx - 1)\n", "dy = 2. / (ny - 1)\n", - "print(\"dx %s, dy %s\" % (dx, dy))\n", + "print(f\"dx {dx}, dy {dy}\")\n", "sigma = .2\n", "dt = sigma * dx" ] @@ -83,7 +83,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create field and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -119,9 +119,9 @@ "\n", " # Apply boundary conditions.\n", " u[0, :] = 1. # left\n", - " u[-1, :] = 1. # right\n", + " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", - " u[:, -1] = 1. # top\n", + " u[:, -1] = 1. # top\n", " # Note that in the above expressions the NumPy index -1 corresponds to the final point of the array along the indexed dimension,\n", " # i.e. here u[-1, :] is equivalent to u[80, :].\n" ] @@ -143,7 +143,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# A small sanity check for auto-testing\n", "assert (u[45:55, 45:55] > 1.8).all()\n", @@ -193,7 +193,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", @@ -302,7 +302,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our initial condition in both buffers.\n", @@ -364,7 +364,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset our data field and ICs in both buffers\n", "init_hat(field=u.data[0], dx=dx, dy=dy, value=2.)\n", diff --git a/examples/cfd/01_convection_revisited.ipynb b/examples/cfd/01_convection_revisited.ipynb index e7be39a448..b05445a07d 100644 --- a/examples/cfd/01_convection_revisited.ipynb +++ b/examples/cfd/01_convection_revisited.ipynb @@ -77,7 +77,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create field and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -114,9 +114,9 @@ " # Apply boundary conditions.\n", " # Note: -1 here is the last index in the array, not the one at x=-1 or y=-1.\n", " u[0, :] = 1. # left\n", - " u[-1, :] = 1. # right\n", + " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", - " u[:, -1] = 1. # top" + " u[:, -1] = 1. # top" ] }, { @@ -136,7 +136,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# A small sanity check for auto-testing\n", "assert (u[45:55, 45:55] > 1.8).all()\n", @@ -177,7 +177,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", @@ -277,7 +277,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our initial condition in both buffers.\n", @@ -339,7 +339,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset our data field and ICs in both buffers\n", "init_smooth(field=u.data[0], dx=dx, dy=dy)\n", diff --git a/examples/cfd/02_convection_nonlinear.ipynb b/examples/cfd/02_convection_nonlinear.ipynb index 55631f61af..8181eca6f8 100644 --- a/examples/cfd/02_convection_nonlinear.ipynb +++ b/examples/cfd/02_convection_nonlinear.ipynb @@ -73,7 +73,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Allocate fields and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -109,8 +109,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "for n in range(nt + 1): ##loop across number of time steps\n", + "# NBVAL_IGNORE_OUTPUT\n", + "for n in range(nt + 1): # loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", " u[1:, 1:] = (un[1:, 1:] -\n", @@ -159,7 +159,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "# First we need two time-dependent data fields, both initialized with the hat function\n", @@ -211,8 +211,8 @@ "update_u = Eq(u.forward, stencil_u, subdomain=grid.interior)\n", "update_v = Eq(v.forward, stencil_v, subdomain=grid.interior)\n", "\n", - "print(\"U update:\\n%s\\n\" % update_u)\n", - "print(\"V update:\\n%s\\n\" % update_v)" + "print(f\"U update:\\n{update_u}\\n\")\n", + "print(f\"V update:\\n{update_v}\\n\")" ] }, { @@ -271,7 +271,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our data field and ICs\n", @@ -452,7 +452,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([update_U] + bc_u + bc_v)\n", "op(time=nt, dt=dt)\n", "\n", diff --git a/examples/cfd/03_diffusion.ipynb b/examples/cfd/03_diffusion.ipynb index 0fbd64c580..14d786e4f8 100644 --- a/examples/cfd/03_diffusion.ipynb +++ b/examples/cfd/03_diffusion.ipynb @@ -59,9 +59,9 @@ "def diffuse(u, nt):\n", " for n in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", + " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " nu * dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", - " nu * dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", + " nu * dt / dx**2 * (un[2:, 1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", @@ -133,7 +133,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Initialise u with hat function\n", "u = np.empty((nx, ny))\n", @@ -141,20 +141,20 @@ "\n", "# Field initialization.\n", "# This will create 4 equally spaced 10x10 hat functions of various values.\n", - "u[ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u[ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u[ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u[ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u[nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u[3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u[nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u[3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", - "print (\"Initial state\")\n", + "print(\"Initial state\")\n", "plot_field(u, zmax=4.5)\n", "\n", "diffuse(u, nt)\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=4.5)\n", "\n", "diffuse(u, nt)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u, zmax=4.5)" ] }, @@ -296,17 +296,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Constant, Eq, solve\n", "\n", "# Reset our data field and ICs\n", "init_hat(field=u.data[0], dx=dx, dy=dy, value=1.)\n", "\n", "# Field initialization\n", - "u.data[0][ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u.data[0][ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u.data[0][ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u.data[0][ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u.data[0][nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u.data[0][3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u.data[0][nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u.data[0][3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", "\n", "# Create an operator with second-order derivatives\n", @@ -326,11 +326,11 @@ "op = Operator([eq_stencil] + bc)\n", "op(time=nt, dt=dt, a=nu)\n", "\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=4.5)\n", "\n", "op(time=nt, dt=dt, a=nu)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=4.5)" ] }, @@ -396,23 +396,23 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u2 = TimeFunction(name='u2', grid=grid, space_order=2)\n", "init_hat(field=u2.data[0], dx=dx, dy=dy, value=1.)\n", "\n", "# Field initialization\n", - "u2.data[0][ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u2.data[0][ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u2.data[0][ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u2.data[0][ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u2.data[0][nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u2.data[0][3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u2.data[0][nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u2.data[0][3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", "op(u=u2, time=2*nt, dt=dt, a=nu)\n", "\n", - "print (\"After\", 2*nt, \"timesteps\")\n", + "print(\"After\", 2*nt, \"timesteps\")\n", "plot_field(u2.data[0], zmax=4.5)\n", "\n", "op(u=u2, time=2*nt, dt=dt, a=nu)\n", - "print (\"After another\", 2*nt, \"timesteps\")\n", + "print(\"After another\", 2*nt, \"timesteps\")\n", "plot_field(u2.data[0], zmax=4.5)" ] } diff --git a/examples/cfd/03_diffusion_nonuniform.ipynb b/examples/cfd/03_diffusion_nonuniform.ipynb index e42a2c6a26..3e320ea717 100644 --- a/examples/cfd/03_diffusion_nonuniform.ipynb +++ b/examples/cfd/03_diffusion_nonuniform.ipynb @@ -38,16 +38,16 @@ "ny = 100\n", "nt = 1000\n", "\n", - "nu = 0.15 #the value of base viscosity\n", + "nu = 0.15 # the value of base viscosity\n", "\n", - "offset = 1 # Used for field definition\n", + "offset = 1 # Used for field definition\n", "\n", - "visc = np.full((nx, ny), nu) # Initialize viscosity\n", - "visc[nx//4-offset:nx//4+offset, 1:-1] = 0.0001 # Adding a material with different viscosity\n", - "visc[1:-1,nx//4-offset:nx//4+offset ] = 0.0001\n", + "visc = np.full((nx, ny), nu) # Initialize viscosity\n", + "visc[nx//4-offset:nx//4+offset, 1:-1] = 0.0001 # Adding a material with different viscosity\n", + "visc[1:-1, nx//4-offset:nx//4+offset] = 0.0001\n", "visc[3*nx//4-offset:3*nx//4+offset, 1:-1] = 0.0001\n", "\n", - "visc_nb = visc[1:-1,1:-1]\n", + "visc_nb = visc[1:-1, 1:-1]\n", "\n", "dx = 2. / (nx - 1)\n", "dy = 2. / (ny - 1)\n", @@ -63,7 +63,7 @@ "u_init[10:-10, 10:-10] = 1.5\n", "\n", "\n", - "zmax = 2.5 # zmax for plotting" + "zmax = 2.5 # zmax for plotting" ] }, { @@ -79,12 +79,12 @@ "metadata": {}, "outputs": [], "source": [ - "def diffuse(u, nt ,visc):\n", + "def diffuse(u, nt, visc):\n", " for n in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", + " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " visc*dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", - " visc*dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", + " visc*dt / dx**2 * (un[2:, 1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", @@ -166,24 +166,24 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot material according to viscosity, uncomment to plot\n", "import matplotlib.pyplot as plt\n", - "plt.imshow(visc_nb, cmap='Greys', interpolation='nearest')\n", + "plt.imshow(visc_nb, cmap='Greys', interpolation='nearest')\n", "\n", "# Field initialization\n", "u = u_init\n", "\n", - "print (\"Initial state\")\n", + "print(\"Initial state\")\n", "plot_field(u, zmax=zmax)\n", "\n", - "diffuse(u, nt , visc_nb )\n", - "print (\"After\", nt, \"timesteps\")\n", + "diffuse(u, nt, visc_nb)\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)\n", "\n", "diffuse(u, nt, visc_nb)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)" ] }, @@ -218,14 +218,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Field initialization\n", "u = u_init\n", "\n", "\n", - "diffuse(u, nt , visc_nb)\n", - "print (\"After\", nt, \"timesteps\")\n", + "diffuse(u, nt, visc_nb)\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)" ] }, @@ -263,9 +263,8 @@ "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", "\n", "# Create an operator with second-order derivatives\n", - "a = Function(name='a',grid = grid) # Define as Function\n", - "a.data[:]= visc # Pass the viscosity in order to be used in the operator.\n", - "\n", + "a = Function(name='a', grid=grid) # Define as Function\n", + "a.data[:] = visc # Pass the viscosity in order to be used in the operator.\n", "\n", "\n", "u = TimeFunction(name='u', grid=grid, space_order=2)\n", @@ -340,7 +339,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Eq, solve, Function\n", "\n", "\n", @@ -352,8 +351,8 @@ "\n", "\n", "# Create an operator with second-order derivatives\n", - "a = Function(name='a',grid = grid)\n", - "a.data[:]= visc\n", + "a = Function(name='a', grid=grid)\n", + "a.data[:] = visc\n", "\n", "eq = Eq(u.dt, a * u.laplace, subdomain=grid.interior)\n", "stencil = solve(eq, u.forward)\n", @@ -369,13 +368,13 @@ "\n", "\n", "op = Operator([eq_stencil] + bc)\n", - "op(time=nt, dt=dt, a = a)\n", + "op(time=nt, dt=dt, a=a)\n", "\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=zmax)\n", "\n", - "op(time=nt, dt=dt, a = a)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "op(time=nt, dt=dt, a=a)\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=zmax)" ] } diff --git a/examples/cfd/04_burgers.ipynb b/examples/cfd/04_burgers.ipynb index 1ad8811331..2e917238e0 100644 --- a/examples/cfd/04_burgers.ipynb +++ b/examples/cfd/04_burgers.ipynb @@ -42,10 +42,10 @@ "nx = 41 # Grid size on x axis\n", "ny = 41 # Grid size on y axis\n", "\n", - "batches = 5 # Batches of timesteps, increase number of batches to extend evolution in time\n", + "batches = 5 # Batches of timesteps, increase number of batches to extend evolution in time\n", "# A figure of the wave state will be produced for each batch.\n", - "batch_size = 640 # Number of timesteps for every batch\n", - "nt = batches*batch_size # Number of total timesteps\n", + "batch_size = 640 # Number of timesteps for every batch\n", + "nt = batches*batch_size # Number of total timesteps\n", "\n", "c = 1\n", "dx = 2. / (nx - 1)\n", @@ -72,7 +72,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -193,9 +193,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", - "for n in range(nt + 1): ##loop across number of time steps\n", + "for n in range(nt + 1): # loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", "\n", @@ -205,7 +205,7 @@ " dt / dx * vn[1:-1, 1:-1] *\n", " (un[1:-1, 1:-1] - un[0:-2, 1:-1]) +\n", " nu * dt / dy**2 *\n", - " (un[1:-1,2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", + " (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", " nu * dt / dx**2 *\n", " (un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", "\n", @@ -229,10 +229,9 @@ " v[:, 0] = 1\n", " v[:, -1] = 1\n", "\n", - "\n", " # A figure of the wave state will be produced for each batch\n", - " if (n%batch_size) == 0:\n", - " print (\"Batch:\",n/(batch_size))\n", + " if (n % batch_size) == 0:\n", + " print(\"Batch:\", n/(batch_size))\n", " plot_field(u)" ] }, @@ -274,14 +273,14 @@ "t = grid.stepping_dim\n", "\n", "u1 = TimeFunction(name='u1', grid=grid, space_order=1)\n", - "print(\"Space order 1:\\n%s\\n\" % u1.dxl)\n", + "print(f\"Space order 1:\\n{u1.dxl}\\n\")\n", "\n", "u2 = TimeFunction(name='u2', grid=grid, space_order=2)\n", - "print(\"Space order 2:\\n%s\\n\" % u2.dxl)\n", + "print(f\"Space order 2:\\n{u2.dxl}\\n\")\n", "\n", "# We use u2 to create the explicit first-order derivative\n", "u1_dx = first_derivative(u2, dim=x, side=left, fd_order=1)\n", - "print(\"Explicit space order 1:\\n%s\\n\" % u1_dx)" + "print(f\"Explicit space order 1:\\n{u1_dx}\\n\")" ] }, { @@ -418,7 +417,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Constant, Eq, solve\n", "\n", "# Define our velocity fields and initialize with hat function\n", @@ -458,7 +457,7 @@ "# Execute the operator for a number of timesteps\n", "for batch_no in range(batches):\n", " op(time=batch_size, dt=dt, a=nu)\n", - " print (\"Batch:\",batch_no+1)\n", + " print(\"Batch:\", batch_no+1)\n", " plot_field(u.data[0])\n" ] }, @@ -701,12 +700,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([update_U] + bc_U)\n", "# Execute the operator for a number of timesteps\n", "for batch_no in range(batches):\n", " op(time=batch_size, dt=dt, a=nu)\n", - " print (\"Batch:\",batch_no+1)\n", + " print(\"Batch:\", batch_no+1)\n", " plot_field(U[0].data[0])" ] } diff --git a/examples/cfd/05_laplace.ipynb b/examples/cfd/05_laplace.ipynb index 74ee48608e..8ba567936d 100644 --- a/examples/cfd/05_laplace.ipynb +++ b/examples/cfd/05_laplace.ipynb @@ -93,7 +93,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Our initial condition is 0 everywhere, except at the boundary\n", "p = np.zeros((ny, nx))\n", @@ -125,7 +125,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "p = laplace2d(p, bc_right, dx, dy, 1e-4)\n", "plot_field(p, ymax=1.0, view=(30, 225))" @@ -181,7 +181,7 @@ "\n", "# In the resulting stencil `pn` is exclusively used on the RHS\n", "# and `p` on the LHS is the grid the kernel will update\n", - "print(\"Update stencil:\\n%s\\n\" % eq_stencil)" + "print(f\"Update stencil:\\n{eq_stencil}\\n\")" ] }, { @@ -272,7 +272,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Silence the runtime performance logging\n", "from devito import configuration\n", @@ -345,7 +345,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Initialise the two buffer fields\n", "p.data[:] = 0.\n", diff --git a/examples/cfd/06_poisson.ipynb b/examples/cfd/06_poisson.ipynb index 8fd2e7875e..edb8759681 100644 --- a/examples/cfd/06_poisson.ipynb +++ b/examples/cfd/06_poisson.ipynb @@ -38,15 +38,14 @@ "# Some variable declarations\n", "nx = 50\n", "ny = 50\n", - "nt = 100\n", + "nt = 100\n", "xmin = 0.\n", "xmax = 2.\n", "ymin = 0.\n", "ymax = 1.\n", "\n", "dx = (xmax - xmin) / (nx - 1)\n", - "dy = (ymax - ymin) / (ny - 1)\n", - "\n" + "dy = (ymax - ymin) / (ny - 1)\n" ] }, { @@ -56,12 +55,12 @@ "outputs": [], "source": [ "# Initialization\n", - "p = np.zeros((nx, ny))\n", + "p = np.zeros((nx, ny))\n", "pd = np.zeros((nx, ny))\n", - "b = np.zeros((nx, ny))\n", + "b = np.zeros((nx, ny))\n", "\n", "# Source\n", - "b[int(nx / 4), int(ny / 4)] = 100\n", + "b[int(nx / 4), int(ny / 4)] = 100\n", "b[int(3 * nx / 4), int(3 * ny / 4)] = -100" ] }, @@ -81,10 +80,10 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "for it in range(nt):\n", " pd = p.copy()\n", - " p[1:-1,1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", + " p[1:-1, 1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", " (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -\n", " b[1:-1, 1:-1] * dx**2 * dy**2) /\n", " (2 * (dx**2 + dy**2)))\n", @@ -92,8 +91,7 @@ " p[0, :] = 0\n", " p[nx-1, :] = 0\n", " p[:, 0] = 0\n", - " p[:, ny-1] = 0\n", - "\n" + " p[:, ny-1] = 0\n" ] }, { @@ -113,7 +111,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_field(p, xmax=xmax, ymax=ymax, view=(30, 225))" ] }, @@ -144,7 +142,7 @@ "# Initialise the source term `b`\n", "b = Function(name='b', grid=grid)\n", "b.data[:] = 0.\n", - "b.data[int(nx / 4), int(ny / 4)] = 100\n", + "b.data[int(nx / 4), int(ny / 4)] = 100\n", "b.data[int(3 * nx / 4), int(3 * ny / 4)] = -100\n", "\n", "# Create Laplace equation base on `pd`\n", @@ -182,7 +180,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Run the outer loop explicitly in Python\n", "for i in range(nt):\n", @@ -215,7 +213,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot result\n", "plot_field(p.data, xmax=xmax, ymax=ymax, view=(30, 225))" ] @@ -246,7 +244,7 @@ "# Initialise the source term `b`\n", "b = Function(name='b', grid=grid)\n", "b.data[:] = 0.\n", - "b.data[int(nx / 4), int(ny / 4)] = 100\n", + "b.data[int(nx / 4), int(ny / 4)] = 100\n", "b.data[int(3 * nx / 4), int(3 * ny / 4)] = -100\n", "\n", "# Create Laplace equation base on `p`\n", @@ -292,7 +290,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "configuration['log-level'] = 'ERROR'\n", "# Create and execute the operator for a number of timesteps\n", "op = Operator([eq_stencil] + bc)\n", @@ -316,7 +314,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_field(p.data[0], xmax=xmax, ymax=ymax, view=(30, 225))" ] } diff --git a/examples/cfd/07_cavity_flow.ipynb b/examples/cfd/07_cavity_flow.ipynb index f2c8506082..0832cbdd57 100644 --- a/examples/cfd/07_cavity_flow.ipynb +++ b/examples/cfd/07_cavity_flow.ipynb @@ -122,7 +122,7 @@ "dy = 1. / (ny - 1)\n", "x = np.linspace(0, 1, nx)\n", "y = np.linspace(0, 1, ny)\n", - "Y, X = np.meshgrid(x, y)\n", + "Y, X = np.meshgrid(x, y)\n", "\n", "rho = 1\n", "nu = .1\n", @@ -182,9 +182,9 @@ " (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dx**2) /\n", " (2 * (dx**2 + dy**2)) -\n", " dx**2 * dy**2 / (2 * (dx**2 + dy**2)) *\n", - " b[1:-1,1:-1])\n", + " b[1:-1, 1:-1])\n", "\n", - " p[-1, :] = p[-2, :] # dp/dx = 0 at x = 2\n", + " p[-1, :] = p[-2, :] # dp/dx = 0 at x = 2\n", " p[:, 0] = p[:, 1] # dp/dy = 0 at y = 0\n", " p[0, :] = p[1, :] # dp/dx = 0 at x = 0\n", " p[:, -1] = p[:, -2] # p = 0 at y = 2\n", @@ -211,7 +211,7 @@ " vn = np.empty_like(v)\n", " b = np.zeros((nx, ny))\n", "\n", - " for n in range(0,nt):\n", + " for n in range(0, nt):\n", " un = u.copy()\n", " vn = v.copy()\n", "\n", @@ -230,7 +230,7 @@ " dt / dy**2 *\n", " (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2])))\n", "\n", - " v[1:-1,1:-1] = (vn[1:-1, 1:-1] -\n", + " v[1:-1, 1:-1] = (vn[1:-1, 1:-1] -\n", " un[1:-1, 1:-1] * dt / dx *\n", " (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -\n", " vn[1:-1, 1:-1] * dt / dy *\n", @@ -241,14 +241,14 @@ " dt / dy**2 *\n", " (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2])))\n", "\n", - " u[:, 0] = 0\n", - " u[0, :] = 0\n", + " u[:, 0] = 0\n", + " u[0, :] = 0\n", " u[-1, :] = 0\n", " u[:, -1] = 1 # Set velocity on cavity lid equal to 1\n", "\n", - " v[:, 0] = 0\n", + " v[:, 0] = 0\n", " v[:, -1] = 0\n", - " v[0, :] = 0\n", + " v[0, :] = 0\n", " v[-1, :] = 0\n", "\n", " return u, v, p, pn" @@ -271,7 +271,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u = np.zeros((nx, ny))\n", "v = np.zeros((nx, ny))\n", "p = np.zeros((nx, ny))\n", @@ -314,36 +314,36 @@ "source": [ "# Import u values at x=L/2 (table 6, column 2 rows 12-26) in Marchi et al.\n", "Marchi_Re10_u = np.array([[0.0625, -3.85425800e-2],\n", - " [0.125, -6.96238561e-2],\n", + " [0.125, -6.96238561e-2],\n", " [0.1875, -9.6983962e-2],\n", - " [0.25, -1.22721979e-1],\n", + " [0.25, -1.22721979e-1],\n", " [0.3125, -1.47636199e-1],\n", - " [0.375, -1.71260757e-1],\n", + " [0.375, -1.71260757e-1],\n", " [0.4375, -1.91677043e-1],\n", - " [0.5, -2.05164738e-1],\n", + " [0.5, -2.05164738e-1],\n", " [0.5625, -2.05770198e-1],\n", - " [0.625, -1.84928116e-1],\n", + " [0.625, -1.84928116e-1],\n", " [0.6875, -1.313892353e-1],\n", - " [0.75, -3.1879308e-2],\n", - " [0.8125, 1.26912095e-1],\n", - " [0.875, 3.54430364e-1],\n", - " [0.9375, 6.50529292e-1]])\n", + " [0.75, -3.1879308e-2],\n", + " [0.8125, 1.26912095e-1],\n", + " [0.875, 3.54430364e-1],\n", + " [0.9375, 6.50529292e-1]])\n", "# Import v values at y=L/2 (table 6, column 2 rows 27-41) in Marchi et al.\n", "Marchi_Re10_v = np.array([[0.0625, 9.2970121e-2],\n", - " [0.125, 1.52547843e-1],\n", + " [0.125, 1.52547843e-1],\n", " [0.1875, 1.78781456e-1],\n", - " [0.25, 1.76415100e-1],\n", + " [0.25, 1.76415100e-1],\n", " [0.3125, 1.52055820e-1],\n", - " [0.375, 1.121477612e-1],\n", + " [0.375, 1.121477612e-1],\n", " [0.4375, 6.21048147e-2],\n", - " [0.5, 6.3603620e-3],\n", - " [0.5625,-5.10417285e-2],\n", + " [0.5, 6.3603620e-3],\n", + " [0.5625, -5.10417285e-2],\n", " [0.625, -1.056157259e-1],\n", - " [0.6875,-1.51622101e-1],\n", - " [0.75, -1.81633561e-1],\n", - " [0.8125,-1.87021651e-1],\n", + " [0.6875, -1.51622101e-1],\n", + " [0.75, -1.81633561e-1],\n", + " [0.8125, -1.87021651e-1],\n", " [0.875, -1.59898186e-1],\n", - " [0.9375,-9.6409942e-2]])" + " [0.9375, -9.6409942e-2]])" ] }, { @@ -363,22 +363,22 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Check results with Marchi et al 2009.\n", - "npgrid=[nx,ny]\n", + "npgrid = [nx, ny]\n", "\n", "x_coord = np.linspace(0, 1, npgrid[0])\n", "y_coord = np.linspace(0, 1, npgrid[1])\n", "\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(a[int(npgrid[0]/2),:],y_coord[:])\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(a[int(npgrid[0]/2), :], y_coord[:])\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],b[:,int(npgrid[1]/2)])\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], b[:, int(npgrid[1]/2)])\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", "\n", @@ -463,42 +463,42 @@ "# --------------------------------------\n", "u = TimeFunction(name='u', grid=grid, space_order=2)\n", "v = TimeFunction(name='v', grid=grid, space_order=2)\n", - "p = TimeFunction(name='p', grid=grid, space_order=2)\n", - "#Variables are automatically initialized at 0.\n", + "p = TimeFunction(name='p', grid=grid, space_order=2)\n", + "# Variables are automatically initialized at 0.\n", "\n", "# First order derivatives will be handled with p.dxc\n", - "eq_u =Eq(u.dt + u*u.dx + v*u.dy, -1./rho * p.dxc + nu*(u.laplace), subdomain=grid.interior)\n", - "eq_v =Eq(v.dt + u*v.dx + v*v.dy, -1./rho * p.dyc + nu*(v.laplace), subdomain=grid.interior)\n", - "eq_p =Eq(p.laplace,rho*(1./dt*(u.dxc+v.dyc)-(u.dxc*u.dxc)-2*(u.dyc*v.dxc)-(v.dyc*v.dyc)), subdomain=grid.interior)\n", + "eq_u = Eq(u.dt + u*u.dx + v*u.dy, -1./rho * p.dxc + nu*(u.laplace), subdomain=grid.interior)\n", + "eq_v = Eq(v.dt + u*v.dx + v*v.dy, -1./rho * p.dyc + nu*(v.laplace), subdomain=grid.interior)\n", + "eq_p = Eq(p.laplace, rho*(1./dt*(u.dxc+v.dyc)-(u.dxc*u.dxc)-2*(u.dyc*v.dxc)-(v.dyc*v.dyc)), subdomain=grid.interior)\n", "\n", "# NOTE: Pressure has no time dependence so we solve for the other pressure buffer.\n", - "stencil_u =solve(eq_u , u.forward)\n", - "stencil_v =solve(eq_v , v.forward)\n", - "stencil_p=solve(eq_p, p)\n", + "stencil_u = solve(eq_u, u.forward)\n", + "stencil_v = solve(eq_v, v.forward)\n", + "stencil_p = solve(eq_p, p)\n", "\n", - "update_u =Eq(u.forward, stencil_u)\n", - "update_v =Eq(v.forward, stencil_v)\n", - "update_p =Eq(p.forward, stencil_p)\n", + "update_u = Eq(u.forward, stencil_u)\n", + "update_v = Eq(v.forward, stencil_v)\n", + "update_p = Eq(p.forward, stencil_p)\n", "\n", "# Boundary Conds. u=v=0 for all sides\n", - "bc_u = [Eq(u[t+1, 0, y], 0)]\n", + "bc_u = [Eq(u[t+1, 0, y], 0)]\n", "bc_u += [Eq(u[t+1, nx-1, y], 0)]\n", "bc_u += [Eq(u[t+1, x, 0], 0)]\n", "bc_u += [Eq(u[t+1, x, ny-1], 1)] # except u=1 for y=2\n", - "bc_v = [Eq(v[t+1, 0, y], 0)]\n", + "bc_v = [Eq(v[t+1, 0, y], 0)]\n", "bc_v += [Eq(v[t+1, nx-1, y], 0)]\n", "bc_v += [Eq(v[t+1, x, ny-1], 0)]\n", "bc_v += [Eq(v[t+1, x, 0], 0)]\n", "\n", - "bc_p = [Eq(p[t+1, 0, y],p[t+1, 1,y])] # dpn/dx = 0 for x=0.\n", - "bc_p += [Eq(p[t+1,nx-1, y],p[t+1,nx-2, y])] # dpn/dx = 0 for x=2.\n", - "bc_p += [Eq(p[t+1, x, 0],p[t+1,x ,1])] # dpn/dy = 0 at y=0\n", - "bc_p += [Eq(p[t+1, x, ny-1],p[t+1, x, ny-2])] # pn=0 for y=2\n", + "bc_p = [Eq(p[t+1, 0, y], p[t+1, 1, y])] # dpn/dx = 0 for x=0.\n", + "bc_p += [Eq(p[t+1, nx-1, y], p[t+1, nx-2, y])] # dpn/dx = 0 for x=2.\n", + "bc_p += [Eq(p[t+1, x, 0], p[t+1, x, 1])] # dpn/dy = 0 at y=0\n", + "bc_p += [Eq(p[t+1, x, ny-1], p[t+1, x, ny-2])] # pn=0 for y=2\n", "bc_p += [Eq(p[t+1, 0, 0], 0)]\n", - "bc=bc_u+bc_v\n", + "bc = bc_u+bc_v\n", "\n", - "optime=Operator([update_u, update_v]+bc_u+bc_v)\n", - "oppres=Operator([update_p]+bc_p)" + "optime = Operator([update_u, update_v]+bc_u+bc_v)\n", + "oppres = Operator([update_p]+bc_p)" ] }, { @@ -514,9 +514,9 @@ "\n", "\n", "# This is the time loop.\n", - "for step in range(0,nt):\n", - " if step>0:\n", - " oppres(time_M = nit)\n", + "for step in range(0, nt):\n", + " if step > 0:\n", + " oppres(time_M=nit)\n", " optime(time_m=step, time_M=step, dt=dt)" ] }, @@ -537,15 +537,15 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "fig = pyplot.figure(figsize=(11,7), dpi=100)\n", + "# NBVAL_IGNORE_OUTPUT\n", + "fig = pyplot.figure(figsize=(11, 7), dpi=100)\n", "# Plotting the pressure field as a contour.\n", "pyplot.contourf(X, Y, p.data[0], alpha=0.5, cmap=cm.viridis)\n", "pyplot.colorbar()\n", "# Plotting the pressure field outlines.\n", "pyplot.contour(X, Y, p.data[0], cmap=cm.viridis)\n", "# Plotting velocity field.\n", - "pyplot.quiver(X[::2,::2], Y[::2,::2], u.data[0,::2,::2], v.data[0,::2,::2])\n", + "pyplot.quiver(X[::2, ::2], Y[::2, ::2], u.data[0, ::2, ::2], v.data[0, ::2, ::2])\n", "pyplot.xlabel('X')\n", "pyplot.ylabel('Y');\n" ] @@ -574,17 +574,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Again, check results with Marchi et al 2009.\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(u.data[0,int(grid.shape[0]/2),:],y_coord[:])\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(u.data[0, int(grid.shape[0]/2), :], y_coord[:])\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],v.data[0,:,int(grid.shape[0]/2)])\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], v.data[0, :, int(grid.shape[0]/2)])\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", "\n", @@ -623,28 +623,28 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(a[int(npgrid[0]/2),:],y_coord[:])\n", - "ax1.plot(u.data[0,int(grid.shape[0]/2),:],y_coord[:],'--')\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(a[int(npgrid[0]/2), :], y_coord[:])\n", + "ax1.plot(u.data[0, int(grid.shape[0]/2), :], y_coord[:], '--')\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],b[:,int(npgrid[1]/2)])\n", - "ax1.plot(x_coord[:],v.data[0,:,int(grid.shape[0]/2)],'--')\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], b[:, int(npgrid[1]/2)])\n", + "ax1.plot(x_coord[:], v.data[0, :, int(grid.shape[0]/2)], '--')\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", - "ax1.legend(['numpy','devito','Marchi (2009)'])\n", + "ax1.legend(['numpy', 'devito', 'Marchi (2009)'])\n", "\n", "pyplot.show()\n", "\n", - "#Pressure norm check\n", + "# Pressure norm check\n", "tol = 1e-3\n", - "assert np.sum((c[:,:]-d[:,:])**2/ np.maximum(d[:,:]**2,1e-10)) < tol\n", - "assert np.sum((p.data[0]-p.data[1])**2/np.maximum(p.data[0]**2,1e-10)) < tol" + "assert np.sum((c[:, :]-d[:, :])**2/ np.maximum(d[:, :]**2, 1e-10)) < tol\n", + "assert np.sum((p.data[0]-p.data[1])**2/np.maximum(p.data[0]**2, 1e-10)) < tol" ] }, { diff --git a/examples/cfd/08_shallow_water_equation.ipynb b/examples/cfd/08_shallow_water_equation.ipynb index 343b1638af..467972c0bb 100644 --- a/examples/cfd/08_shallow_water_equation.ipynb +++ b/examples/cfd/08_shallow_water_equation.ipynb @@ -120,18 +120,18 @@ "\n", " # System of equations\n", " pde_eta = Eq(eta.dt + M.dxc + N.dyc)\n", - " pde_M = Eq(M.dt + (M**2/D).dxc + (M*N/D).dyc + g*D*eta.forward.dxc + frictionTerm*M)\n", - " pde_N = Eq(N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N)\n", + " pde_M = Eq(M.dt + (M**2/D).dxc + (M*N/D).dyc + g*D*eta.forward.dxc + frictionTerm*M)\n", + " pde_N = Eq(N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N)\n", "\n", " stencil_eta = solve(pde_eta, eta.forward)\n", - " stencil_M = solve(pde_M, M.forward)\n", - " stencil_N = solve(pde_N, N.forward)\n", + " stencil_M = solve(pde_M, M.forward)\n", + " stencil_N = solve(pde_N, N.forward)\n", "\n", " # Equations with the forward in time term isolated\n", - " update_eta = Eq(eta.forward, stencil_eta, subdomain=grid.interior)\n", - " update_M = Eq(M.forward, stencil_M, subdomain=grid.interior)\n", - " update_N = Eq(N.forward, stencil_N, subdomain=grid.interior)\n", - " eq_D = Eq(D, eta.forward + h)\n", + " update_eta = Eq(eta.forward, stencil_eta, subdomain=grid.interior)\n", + " update_M = Eq(M.forward, stencil_M, subdomain=grid.interior)\n", + " update_N = Eq(N.forward, stencil_N, subdomain=grid.interior)\n", + " eq_D = Eq(D, eta.forward + h)\n", "\n", " return Operator([update_eta, update_M, update_N, eq_D] + [Eq(etasave, eta)])" ] @@ -155,7 +155,7 @@ "import matplotlib.animation as animation\n", "\n", "\n", - "def snaps2video (eta, title):\n", + "def snaps2video(eta, title):\n", " fig, ax = plt.subplots()\n", " matrice = ax.imshow(eta.data[0, :, :].T, vmin=-1, vmax=1, cmap=\"seismic\")\n", " plt.colorbar(matrice)\n", @@ -182,7 +182,7 @@ "metadata": {}, "outputs": [], "source": [ - "def plotDepthProfile (h, title):\n", + "def plotDepthProfile(h, title):\n", " fig, ax = plt.subplots()\n", " matrice = ax.imshow(h0)\n", " plt.colorbar(matrice)\n", @@ -232,26 +232,26 @@ } ], "source": [ - "Lx = 100.0 # width of the mantle in the x direction []\n", - "Ly = 100.0 # thickness of the mantle in the y direction []\n", - "nx = 401 # number of points in the x direction\n", - "ny = 401 # number of points in the y direction\n", - "dx = Lx / (nx - 1) # grid spacing in the x direction []\n", - "dy = Ly / (ny - 1) # grid spacing in the y direction []\n", - "g = 9.81 # gravity acceleration [m/s^2]\n", - "alpha = 0.025 # friction coefficient for natural channels in good condition\n", + "Lx = 100.0 # width of the mantle in the x direction []\n", + "Ly = 100.0 # thickness of the mantle in the y direction []\n", + "nx = 401 # number of points in the x direction\n", + "ny = 401 # number of points in the y direction\n", + "dx = Lx / (nx - 1) # grid spacing in the x direction []\n", + "dy = Ly / (ny - 1) # grid spacing in the y direction []\n", + "g = 9.81 # gravity acceleration [m/s^2]\n", + "alpha = 0.025 # friction coefficient for natural channels in good condition\n", "\n", "# Maximum wave propagation time [s]\n", - "Tmax = 3.\n", - "dt = 1/4500.\n", - "nt = (int)(Tmax/dt)\n", + "Tmax = 3.\n", + "dt = 1/4500.\n", + "nt = (int)(Tmax/dt)\n", "print(dt, nt)\n", "\n", "x = np.linspace(0.0, Lx, num=nx)\n", "y = np.linspace(0.0, Ly, num=ny)\n", "\n", "# Define initial eta, M, N\n", - "X, Y = np.meshgrid(x,y) # coordinates X,Y required to define eta, h, M, N\n", + "X, Y = np.meshgrid(x, y) # coordinates X,Y required to define eta, h, M, N\n", "\n", "# Define constant ocean depth profile h = 50 m\n", "h0 = 50. * np.ones_like(X)\n", @@ -264,7 +264,7 @@ "N0 = 0. * M0\n", "D0 = eta0 + 50.\n", "\n", - "grid = Grid(shape=(ny, nx), extent=(Ly, Lx), dtype=np.float32)" + "grid = Grid(shape=(ny, nx), extent=(Ly, Lx), dtype=np.float32)" ] }, { @@ -307,18 +307,18 @@ "nsnaps = 400\n", "\n", "# Defining symbolic functions\n", - "eta = TimeFunction(name='eta', grid=grid, space_order=2)\n", - "M = TimeFunction(name='M', grid=grid, space_order=2)\n", - "N = TimeFunction(name='N', grid=grid, space_order=2)\n", - "h = Function(name='h', grid=grid)\n", - "D = Function(name='D', grid=grid)\n", + "eta = TimeFunction(name='eta', grid=grid, space_order=2)\n", + "M = TimeFunction(name='M', grid=grid, space_order=2)\n", + "N = TimeFunction(name='N', grid=grid, space_order=2)\n", + "h = Function(name='h', grid=grid)\n", + "D = Function(name='D', grid=grid)\n", "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -1785,7 +1785,7 @@ "outputs": [], "source": [ "# To look at the code, uncomment the line below\n", - "#print(op.ccode)" + "# print(op.ccode)" ] }, { @@ -1809,8 +1809,8 @@ "h0 = 50 * np.ones_like(X)\n", "\n", "# Define initial Gaussian eta distribution [m]\n", - "eta0 = 0.5 * np.exp(-((X-35)**2/10)-((Y-35)**2/10)) # first Tsunami source\n", - "eta0 -= 0.5 * np.exp(-((X-65)**2/10)-((Y-65)**2/10)) # add second Tsunami source\n", + "eta0 = 0.5 * np.exp(-((X-35)**2/10)-((Y-35)**2/10)) # first Tsunami source\n", + "eta0 -= 0.5 * np.exp(-((X-65)**2/10)-((Y-65)**2/10)) # add second Tsunami source\n", "\n", "# Define initial M and N\n", "M0 = 100. * eta0\n", @@ -1862,10 +1862,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -4007,10 +4007,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -5494,10 +5494,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -6808,9 +6808,9 @@ "pert = 5. # perturbation amplitude\n", "\n", "np.random.seed(102034)\n", - "r = 2.0 * (np.random.rand(ny, nx) - 0.5) * pert # create random number perturbations\n", - "r = gaussian_filter(r, sigma=16) # smooth random number perturbation\n", - "h0 = h0 * (1 + r) # add perturbations to constant seafloor\n", + "r = 2.0 * (np.random.rand(ny, nx) - 0.5) * pert # create random number perturbations\n", + "r = gaussian_filter(r, sigma=16) # smooth random number perturbation\n", + "h0 = h0 * (1 + r) # add perturbations to constant seafloor\n", "\n", "# Define initial eta [m]\n", "eta0 = 0.2 * np.exp(-((X-30)**2/5)-((Y-50)**2/5))\n", @@ -6897,10 +6897,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -8100,7 +8100,7 @@ "eta0[mask] = 0.5\n", "\n", "# Smooth dam boundaries with gaussian filter\n", - "eta0 = gaussian_filter(eta0, sigma=8) # smooth random number perturbation\n", + "eta0 = gaussian_filter(eta0, sigma=8) # smooth random number perturbation\n", "\n", "# Define initial M and N\n", "M0 = 1. * eta0\n", @@ -8152,10 +8152,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", diff --git a/examples/cfd/09_Darcy_flow_equation.ipynb b/examples/cfd/09_Darcy_flow_equation.ipynb index 482cda7096..fe4979ccc0 100644 --- a/examples/cfd/09_Darcy_flow_equation.ipynb +++ b/examples/cfd/09_Darcy_flow_equation.ipynb @@ -98,15 +98,15 @@ " k_max = size//2\n", "\n", " if dim == 2:\n", - " wavenumers = (np.concatenate((np.arange(0, k_max, 1), \\\n", - " np.arange(-k_max, 0, 1)),0))\n", - " wavenumers = np.tile(wavenumers, (size,1))\n", + " wavenumers = (np.concatenate((np.arange(0, k_max, 1),\n", + " np.arange(-k_max, 0, 1)), 0))\n", + " wavenumers = np.tile(wavenumers, (size, 1))\n", "\n", - " k_x = wavenumers.transpose(1,0)\n", + " k_x = wavenumers.transpose(1, 0)\n", " k_y = wavenumers\n", "\n", " self.sqrt_eig = (size**2)*math.sqrt(2.0)*sigma*((4*(math.pi**2)*(k_x**2 + k_y**2) + tau**2)**(-alpha/2.0))\n", - " self.sqrt_eig[0,0] = 0.0\n", + " self.sqrt_eig[0, 0] = 0.0\n", "\n", " self.size = []\n", " for j in range(self.dim):\n", @@ -119,7 +119,6 @@ " coeff = np.random.randn(N, *self.size)\n", " coeff = self.sqrt_eig * coeff\n", "\n", - "\n", " return fft.ifftn(coeff).real" ] }, @@ -147,7 +146,7 @@ "s = 256\n", "\n", "# Create s x s grid with spacing 1\n", - "grid = Grid(shape=(s, s), extent=(1.0,1.0))\n", + "grid = Grid(shape=(s, s), extent=(1.0, 1.0))\n", "\n", "x, y = grid.dimensions\n", "t = grid.stepping_dim" @@ -176,8 +175,8 @@ "# Sample random fields\n", "# Create a threshold, either 4 or 12 (common for permeability)\n", "thresh_a = norm_a.sample(3)\n", - "thresh_a[thresh_a>=0] = 12\n", - "thresh_a[thresh_a<0] = 4\n", + "thresh_a[thresh_a >= 0] = 12\n", + "thresh_a[thresh_a < 0] = 4\n", "\n", "# The inputs:\n", "w1 = thresh_a[0]\n", @@ -218,7 +217,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot to show the input:\n", "ax1 = plt.subplot(221)\n", "ax2 = plt.subplot(222)\n", @@ -288,7 +287,7 @@ "source": [ "# Define 2D Darcy flow equation\n", "# Staggered FD is used to avoid numerical instability\n", - "equation_u = Eq(-div(a*grad(u,shift=.5),shift=-.5),f1)" + "equation_u = Eq(-div(a*grad(u, shift=.5), shift=-.5), f1)" ] }, { @@ -335,10 +334,10 @@ "# Boundary Conditions\n", "nx = s\n", "ny = s\n", - "bc = [Eq(u[t+1, 0, y],u[t+1, 1,y])] # du/dx = 0 for x=0.\n", - "bc += [Eq(u[t+1,nx-1, y],u[t+1,nx-2, y])] # du/dx = 0 for x=1.\n", - "bc += [Eq(u[t+1, x, 0],u[t+1,x ,1])] # du/dx = 0 at y=0\n", - "bc += [Eq(u[t+1, x, ny-1],u[t+1, x, ny-2])] # du/dx=0 for y=1\n", + "bc = [Eq(u[t+1, 0, y], u[t+1, 1, y])] # du/dx = 0 for x=0.\n", + "bc += [Eq(u[t+1, nx-1, y], u[t+1, nx-2, y])] # du/dx = 0 for x=1.\n", + "bc += [Eq(u[t+1, x, 0], u[t+1, x, 1])] # du/dx = 0 at y=0\n", + "bc += [Eq(u[t+1, x, ny-1], u[t+1, x, ny-2])] # du/dx=0 for y=1\n", "# u=0 for all sides\n", "bc += [Eq(u[t+1, x, 0], 0.)]\n", "bc += [Eq(u[t+1, x, ny-1], 0.)]\n", @@ -373,6 +372,8 @@ "f: Array of size (s, s)\n", " The forcing function f(x) = 1\n", " '''\n", + "\n", + "\n", "def darcy_flow_2d(perm, f):\n", "\n", " # a(x) is the coefficients\n", @@ -382,7 +383,7 @@ " initialize_function(a, perm, 0)\n", "\n", " # call operator for the 15,000th pseudo-timestep\n", - " op(time= 15000)\n", + " op(time=15000)\n", "\n", " return np.array(u.data[0])" ] @@ -419,9 +420,9 @@ "metadata": {}, "outputs": [], "source": [ - "assert np.isclose(LA.norm(output1),1.0335084, atol=1e-3, rtol=0)\n", - "assert np.isclose(LA.norm(output2),1.3038709, atol=1e-3, rtol=0)\n", - "assert np.isclose(LA.norm(output3),1.3940924, atol=1e-3, rtol=0)" + "assert np.isclose(LA.norm(output1), 1.0335084, atol=1e-3, rtol=0)\n", + "assert np.isclose(LA.norm(output2), 1.3038709, atol=1e-3, rtol=0)\n", + "assert np.isclose(LA.norm(output3), 1.3940924, atol=1e-3, rtol=0)" ] }, { @@ -457,7 +458,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# plot to show the output:\n", "ax1 = plt.subplot(221)\n", "ax2 = plt.subplot(222)\n", diff --git a/examples/compiler/01_data_regions.ipynb b/examples/compiler/01_data_regions.ipynb index b6392845cb..5f97892694 100644 --- a/examples/compiler/01_data_regions.ipynb +++ b/examples/compiler/01_data_regions.ipynb @@ -425,7 +425,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op.apply(time_M=2)\n", "print(u_new.data_with_halo)" ] @@ -499,7 +499,7 @@ } ], "source": [ - "u_pad = TimeFunction(name='u_pad', grid=grid, space_order=2, padding=(0,2,2))\n", + "u_pad = TimeFunction(name='u_pad', grid=grid, space_order=2, padding=(0, 2, 2))\n", "u_pad._data_allocated[:] = 0\n", "u_pad.data_with_halo[:] = 1\n", "u_pad.data[:] = 2\n", diff --git a/examples/compiler/02_indexification.ipynb b/examples/compiler/02_indexification.ipynb index 793d8b008f..22f79729ad 100644 --- a/examples/compiler/02_indexification.ipynb +++ b/examples/compiler/02_indexification.ipynb @@ -82,7 +82,7 @@ } ], "source": [ - "u_i = u.indexify() # For more details about the method `indexify`, see `devito/symbolics/manipulation.py`\n", + "u_i = u.indexify() # For more details about the method `indexify`, see `devito/symbolics/manipulation.py`\n", "print(u_i)" ] }, @@ -126,7 +126,7 @@ "metadata": {}, "outputs": [], "source": [ - "a = u[time,x+1]" + "a = u[time, x+1]" ] }, { @@ -166,7 +166,7 @@ "metadata": {}, "outputs": [], "source": [ - "b = u[time+1,x-2]" + "b = u[time+1, x-2]" ] }, { diff --git a/examples/compiler/04_iet-B.ipynb b/examples/compiler/04_iet-B.ipynb index 5f49f72e78..ac81c5c19e 100644 --- a/examples/compiler/04_iet-B.ipynb +++ b/examples/compiler/04_iet-B.ipynb @@ -71,11 +71,11 @@ " 'b': Constant(name='b'),\n", " 'c': Array(name='c', shape=(3,), dimensions=(dims['i'],)).indexify(),\n", " 'd': Array(name='d',\n", - " shape=(3,3),\n", - " dimensions=(dims['j'],dims['k'])).indexify(),\n", + " shape=(3, 3),\n", + " dimensions=(dims['j'], dims['k'])).indexify(),\n", " 'e': Function(name='e',\n", - " shape=(3,3,3),\n", - " dimensions=(dims['t0'],dims['t1'],dims['i'])).indexify(),\n", + " shape=(3, 3, 3),\n", + " dimensions=(dims['t0'], dims['t1'], dims['i'])).indexify(),\n", " 'f': TimeFunction(name='f', grid=grid).indexify()}\n", "symbs" ] @@ -108,12 +108,14 @@ "from devito.ir.equations import DummyEq\n", "from devito.tools import pprint\n", "\n", + "\n", "def get_exprs(a, b, c, d, e, f):\n", " return [Expression(DummyEq(a, b + c + 5.)),\n", " Expression(DummyEq(d, e - f)),\n", " Expression(DummyEq(a, 4 * (b * a))),\n", " Expression(DummyEq(a, (6. / b) + (8. * a)))]\n", "\n", + "\n", "exprs = get_exprs(symbs['a'],\n", " symbs['b'],\n", " symbs['c'],\n", @@ -139,6 +141,7 @@ "source": [ "from devito.ir.iet import Iteration\n", "\n", + "\n", "def get_iters(dims):\n", " return [lambda ex: Iteration(ex, dims['i'], (0, 3, 1)),\n", " lambda ex: Iteration(ex, dims['j'], (0, 5, 1)),\n", @@ -146,6 +149,7 @@ " lambda ex: Iteration(ex, dims['t0'], (0, 4, 1)),\n", " lambda ex: Iteration(ex, dims['t1'], (0, 4, 1))]\n", "\n", + "\n", "iters = get_iters(dims)" ] }, @@ -199,6 +203,7 @@ " # expr0\n", " return iters[0](iters[1](iters[2](exprs[0])))\n", "\n", + "\n", "def get_block2(exprs, iters):\n", " # Non-perfect simple loop nest:\n", " # for i\n", @@ -208,6 +213,7 @@ " # expr1\n", " return iters[0]([exprs[0], iters[1](iters[2](exprs[1]))])\n", "\n", + "\n", "def get_block3(exprs, iters):\n", " # Non-perfect non-trivial loop nest:\n", " # for i\n", @@ -223,6 +229,7 @@ " iters[1](iters[2]([exprs[1], exprs[2]])),\n", " iters[4](exprs[3])])\n", "\n", + "\n", "block1 = get_block1(exprs, iters)\n", "block2 = get_block2(exprs, iters)\n", "block3 = get_block3(exprs, iters)\n", diff --git a/examples/finance/bs_ivbp.ipynb b/examples/finance/bs_ivbp.ipynb index 4c46d48e14..44457bcda0 100644 --- a/examples/finance/bs_ivbp.ipynb +++ b/examples/finance/bs_ivbp.ipynb @@ -51,7 +51,7 @@ "\n", "configuration[\"log-level\"] = 'INFO'\n", "\n", - "## Constants\n", + "# Constants\n", "# The strike price of the option\n", "K = 100.0\n", "\n", @@ -67,14 +67,14 @@ "\n", "# If you want to try some different problems, uncomment these lines\n", "\n", - "## Example 2\n", + "# Example 2\n", "# K = 10.0\n", "# r = 0.1\n", "# sigma = 0.2\n", "# smin = 0.0\n", "# smax = 20.0\n", "\n", - "## Example 3\n", + "# Example 3\n", "# K = 100.0\n", "# r = 0.05\n", "# sigma = 0.25\n", @@ -89,16 +89,16 @@ "# Extent calculations\n", "tmax = 1.0\n", "dt0 = 0.0005\n", - "ds0 = 1.0\n", + "ds0 = 1.0\n", "nt = (int)(tmax / dt0) + 1\n", "ns = int((smax - smin) / ds0) + 1\n", "\n", "shape = (ns, )\n", - "origin =(smin, )\n", + "origin = (smin, )\n", "spacing = (ds0, )\n", "extent = int(ds0 * (ns - 1))\n", "\n", - "print(\"dt,tmax,nt;\", dt0,tmax,nt)\n", + "print(\"dt,tmax,nt;\", dt0, tmax, nt)\n", "print(\"shape; \", shape)\n", "print(\"origin; \", origin)\n", "print(\"spacing; \", spacing)\n", @@ -162,10 +162,10 @@ "grid = Grid(shape=shape, origin=origin, extent=extent, dimensions=(s, ))\n", "\n", "so = 2\n", - "v = TimeFunction(name='v', grid=grid, space_order=so, time_order=1, save=nt)\n", - "v_no_bc = TimeFunction(name='v_no_bc', grid=grid, space_order=so, time_order=1, save=nt)\n", + "v = TimeFunction(name='v', grid=grid, space_order=so, time_order=1, save=nt)\n", + "v_no_bc = TimeFunction(name='v_no_bc', grid=grid, space_order=so, time_order=1, save=nt)\n", "\n", - "t,s = v.dimensions\n", + "t, s = v.dimensions\n", "ds = s.spacing\n", "dt = t.spacing\n", "\n", @@ -215,18 +215,18 @@ "outputs": [], "source": [ "# Equations with Neumann boundary conditions\n", - "eq = [Eq(v[t,extent], v[t,extent-1]+(v[t,extent-1]-v[t,extent-2])),\n", - " Eq(v[t,extent+1], v[t,extent]+(v[t,extent-1]-v[t,extent-2])),\n", + "eq = [Eq(v[t, extent], v[t, extent-1]+(v[t, extent-1]-v[t, extent-2])),\n", + " Eq(v[t, extent+1], v[t, extent]+(v[t, extent-1]-v[t, extent-2])),\n", " Eq(v.forward, update_centered)]\n", "eq_no_bc = [Eq(v.forward, update_centered)]\n", "\n", - "op = Operator(eq, subs=v.grid.spacing_map)\n", + "op = Operator(eq, subs=v.grid.spacing_map)\n", "op_no_bc = Operator(eq_no_bc, subs=v_no_bc.grid.spacing_map)\n", "\n", "# Initial conditions\n", "\n", "for i in range(shape[0]):\n", - " v.data[0, i] = max((smin + ds0 * i) - K, 0)\n", + " v.data[0, i] = max((smin + ds0 * i) - K, 0)\n", " v_no_bc.data[0, i] = max((smin + ds0 * i) - K, 0)" ] }, @@ -256,7 +256,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Run our operators\n", "startDevito = timer.time()\n", @@ -287,27 +287,27 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Get an appropriate ylimit\n", - "slice_smax = v.data[:,int(smax-smin-padding)]\n", + "slice_smax = v.data[:, int(smax-smin-padding)]\n", "ymax = max(slice_smax) + 2\n", "\n", "# Plot\n", "s = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10), facecolor='w')\n", + "plt.figure(figsize=(12, 10), facecolor='w')\n", "\n", "time = [1*nt//5, 2*nt//5, 3*nt//5, 4*nt//5, 5*nt//5-1]\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", "\n", "# initial conditions\n", - "plt.plot(s, v_no_bc.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s, v_no_bc.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s, v_no_bc.data[time[i],:], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", + " plt.plot(s, v_no_bc.data[time[i], :], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend(loc=2)\n", "plt.grid(True)\n", @@ -341,27 +341,27 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Get an appropriate ylimit\n", - "slice_smax = v.data[:,int(smax-smin-padding)]\n", + "slice_smax = v.data[:, int(smax-smin-padding)]\n", "ymax = max(slice_smax) + 2\n", "\n", "# Plot\n", "s = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10), facecolor='w')\n", + "plt.figure(figsize=(12, 10), facecolor='w')\n", "\n", "time = [1*nt//5, 2*nt//5, 3*nt//5, 4*nt//5, 5*nt//5-1]\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", "\n", "# initial conditions\n", - "plt.plot(s, v.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s, v.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s, v.data[time[i],:], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", + " plt.plot(s, v.data[time[i], :], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend(loc=2)\n", "plt.grid(True)\n", @@ -403,7 +403,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "\n", "# Trim the padding off smin and smax\n", @@ -413,15 +413,15 @@ "tt = np.linspace(0.0, dt0*(nt-1), nt)\n", "ss = np.linspace(smin+padding, smax-padding, shape[0]-padding*2)\n", "\n", - "hf = plt.figure(figsize=(12,12))\n", + "hf = plt.figure(figsize=(12, 12))\n", "ha = plt.axes(projection='3d')\n", "\n", "# 45 degree viewpoint\n", "ha.view_init(elev=25, azim=-45)\n", "\n", - "ha.set_xlim3d([0.0,1.0])\n", - "ha.set_ylim3d([smin+padding,smax-padding])\n", - "ha.set_zlim3d([0,ymax])\n", + "ha.set_xlim3d([0.0, 1.0])\n", + "ha.set_ylim3d([smin+padding, smax-padding])\n", + "ha.set_zlim3d([0, ymax])\n", "\n", "ha.set_xlabel('Time to expiration', labelpad=12, fontsize=16)\n", "ha.set_ylabel('Stock value', labelpad=12, fontsize=16)\n", @@ -491,7 +491,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Derived formula for Black Scholes call from\n", "# https://aaronschlegel.me/black-scholes-formula-python.html\n", @@ -504,6 +504,7 @@ " call = (S * cdf(N)(d1) - K * np.exp(-r * T) * cdf(N)(d2))\n", " return call\n", "\n", + "\n", "startBF = timer.time()\n", "\n", "# Calculate truth and compare to our solution\n", @@ -522,17 +523,17 @@ "print(\"call_value_bs timesteps: %12.6s, %12.6fs runtime\" % (len(time), endBF - startBF))\n", "\n", "s2 = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10))\n", + "plt.figure(figsize=(12, 10))\n", "\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", - "plt.plot(s2, v.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s2, v.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s2, results[i], ':', color=colors[i], label='truth t='+str(time[i]), linewidth=3)\n", - " plt.plot(s2, v.data[int(time[i]*nt),:], '-', color=colors[i], label='pde t='+str(time[i]), linewidth=1)\n", + " plt.plot(s2, results[i], ':', color=colors[i], label='truth t='+str(time[i]), linewidth=3)\n", + " plt.plot(s2, v.data[int(time[i]*nt), :], '-', color=colors[i], label='pde t='+str(time[i]), linewidth=1)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend()\n", "plt.grid(True)\n", @@ -580,24 +581,24 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the l2 norm of the formula and our solution over time\n", - "t_range = np.linspace(dt0,1.0,50)\n", - "x_range = range(padding, smax-smin-padding*2, 1)\n", + "t_range = np.linspace(dt0, 1.0, 50)\n", + "x_range = range(padding, smax-smin-padding*2, 1)\n", "vals = []\n", "\n", "for t in t_range:\n", " l2 = 0.0\n", " for x in x_range:\n", " truth = call_value_bs(x+smin, K, t, r, sigma)\n", - " val = v.data[int(t*(nt-1)), x]\n", - " l2 += (truth - val)**2\n", + " val = v.data[int(t*(nt-1)), x]\n", + " l2 += (truth - val)**2\n", "\n", " rms = np.sqrt(np.float64(l2 / len(x_range)))\n", " vals.append(rms)\n", "\n", - "plt.figure(figsize=(12,10))\n", + "plt.figure(figsize=(12, 10))\n", "plt.plot(t_range, np.array(vals))" ] }, @@ -618,7 +619,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "np.mean(vals)" ] diff --git a/examples/performance/02_advisor_roofline.ipynb b/examples/performance/02_advisor_roofline.ipynb index dc76d7ecf0..aeb3c5aa2d 100644 --- a/examples/performance/02_advisor_roofline.ipynb +++ b/examples/performance/02_advisor_roofline.ipynb @@ -107,7 +107,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py --path $DEVITO_JUPYTER/benchmarks/user/benchmark.py --exec-args \"run -P acoustic -d 64 64 64 -so 4 --tn 50 --autotune off\" --output $DEVITO_JUPYTER/examples/performance/profilings --name JupyterProfiling\n" ] @@ -166,7 +166,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode overview --name $DEVITO_JUPYTER/examples/performance/resources/OverviewRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" ] @@ -230,7 +230,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode top-loops --name $DEVITO_JUPYTER/examples/performance/resources/TopLoopsRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" ] diff --git a/examples/seismic/abc_methods/01_introduction.ipynb b/examples/seismic/abc_methods/01_introduction.ipynb index d041564b41..b470078b62 100644 --- a/examples/seismic/abc_methods/01_introduction.ipynb +++ b/examples/seismic/abc_methods/01_introduction.ipynb @@ -188,11 +188,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -217,10 +217,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -244,16 +244,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hx = (x1-x0)/(nptx-1)\n", - "hz = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hx = (x1-x0)/(nptx-1)\n", + "hz = (z1-z0)/(nptz-1)" ] }, { @@ -276,10 +276,10 @@ "metadata": {}, "outputs": [], "source": [ - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hx,hz)" + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hx, hz)" ] }, { @@ -297,9 +297,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: z, z: z}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -332,12 +335,12 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", + "v0 = np.zeros((nptx, nptz))\n", "p0 = 0\n", "p1 = int((1/2)*nptz)\n", "p2 = nptz\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -355,11 +358,11 @@ "source": [ "def graph2dvel(vel):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(vel), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(vel), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Velocity Profile')\n", @@ -433,13 +436,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hx,hz)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hx, hz)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -463,8 +466,8 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -487,10 +490,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hxs,hzs) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hxs, hzs) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -517,10 +520,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*compx\n", - "zposf = hz" + "xposf = 0.5*compx\n", + "zposf = hz" ] }, { @@ -558,7 +561,7 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -613,9 +616,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hz" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hz" ] }, { @@ -651,7 +654,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -675,7 +678,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -702,8 +705,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -731,7 +734,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -804,7 +807,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil = Eq(u.forward, solve(pde,u.forward),subdomain = grid.subdomains['d0'])" + "stencil = Eq(u.forward, solve(pde, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -838,7 +841,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -864,7 +867,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil] + src_term + bc + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil] + src_term + bc + rec_term, subs=grid.spacing_map)" ] }, { @@ -928,7 +931,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -946,11 +949,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U)/10.\n", - " extent = [fscale*x0,fscale*x1,fscale*z1,fscale*z0]\n", - " fig = plot.imshow(np.transpose(U),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U)/10.\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(U), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1004,7 +1007,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1024,11 +1027,11 @@ "source": [ "def graph2drec(rec):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscaled = 1/10**(3)\n", " fscalet = 1/10**(3)\n", - " scale = np.amax(rec)/10.\n", - " extent = [fscaled*x0,fscaled*x1, fscalet*tn, fscalet*t0]\n", + " scale = np.amax(rec)/10.\n", + " extent = [fscaled*x0, fscaled*x1, fscalet*tn, fscalet*t0]\n", " fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", diff --git a/examples/seismic/abc_methods/02_damping.ipynb b/examples/seismic/abc_methods/02_damping.ipynb index 6111471cf8..3ade379331 100644 --- a/examples/seismic/abc_methods/02_damping.ipynb +++ b/examples/seismic/abc_methods/02_damping.ipynb @@ -148,11 +148,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -171,10 +171,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -190,16 +190,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -227,8 +227,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -261,18 +261,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -312,9 +312,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: ('middle', npmlx, npmlx), z: ('middle', 0, npmlz)}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -339,23 +342,34 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -376,7 +390,7 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" ] }, { @@ -392,9 +406,9 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -408,18 +422,18 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm): pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm): pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", "\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -437,11 +451,11 @@ "source": [ "def graph2dvel(vel):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Velocity Profile')\n", @@ -505,13 +519,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -527,8 +541,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -544,10 +558,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -563,10 +577,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -582,7 +596,7 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -629,9 +643,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -647,7 +661,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -665,7 +679,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -681,8 +695,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -691,7 +705,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -723,10 +737,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv\n", - "x1pml = x1 - npmlx*hxv\n", - "z0pml = z0\n", - "z1pml = z1 - npmlz*hzv" + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -749,13 +763,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fdamp(x,z):\n", + "def fdamp(x, z):\n", "\n", - " quibar = 1.5*np.log(1.0/0.001)/(40)\n", - " cte = 1./vmax\n", + " quibar = 1.5*np.log(1.0/0.001)/(40)\n", + " cte = 1./vmax\n", "\n", - " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", - " b = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", + " a = np.where(x <= x0pml, (np.abs(x-x0pml)/lx), np.where(x >= x1pml, (np.abs(x-x1pml)/lx), 0.))\n", + " b = np.where(z <= z0pml, (np.abs(z-z0pml)/lz), np.where(z >= z1pml, (np.abs(z-z1pml)/lz), 0.))\n", " adamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))/hxv\n", " bdamp = quibar*(b-(1./(2.*np.pi))*np.sin(2.*np.pi*b))/hzv\n", " fdamp = cte*(adamp+bdamp)\n", @@ -778,11 +792,11 @@ "source": [ "def generatemdamp():\n", "\n", - " X0 = np.linspace(x0,x1,nptx)\n", - " Z0 = np.linspace(z0,z1,nptz)\n", - " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", - " D0 = np.zeros((nptx,nptz))\n", - " D0 = np.transpose(fdamp(X0grid,Z0grid))\n", + " X0 = np.linspace(x0, x1, nptx)\n", + " Z0 = np.linspace(z0, z1, nptz)\n", + " X0grid, Z0grid = np.meshgrid(X0, Z0)\n", + " D0 = np.zeros((nptx, nptz))\n", + " D0 = np.transpose(fdamp(X0grid, Z0grid))\n", "\n", " return D0" ] @@ -818,12 +832,12 @@ "source": [ "def graph2damp(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Absorbing Layer Function')\n", @@ -887,8 +901,8 @@ "metadata": {}, "outputs": [], "source": [ - "damp = Function(name=\"damp\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "damp.data[:,:] = D0" + "damp = Function(name=\"damp\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "damp.data[:, :] = D0" ] }, { @@ -931,7 +945,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil0 = Eq(u.forward, solve(pde0,u.forward),subdomain = grid.subdomains['d0'])" + "stencil0 = Eq(u.forward, solve(pde0, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -947,7 +961,7 @@ "metadata": {}, "outputs": [], "source": [ - "subds = ['d1','d2','d3']" + "subds = ['d1', 'd2', 'd3']" ] }, { @@ -956,7 +970,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil1 = [Eq(u.forward, solve(pde1,u.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]" + "stencil1 = [Eq(u.forward, solve(pde1, u.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]" ] }, { @@ -972,7 +986,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -996,7 +1010,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil0,stencil1] + src_term + bc + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil0, stencil1] + src_term + bc + rec_term, subs=grid.spacing_map)" ] }, { @@ -1059,7 +1073,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1077,11 +1091,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1124,7 +1138,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1142,12 +1156,12 @@ "source": [ "def graph2drec(rec):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscaled = 1/10**(3)\n", " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", " plot.axis('equal')\n", diff --git a/examples/seismic/abc_methods/03_pml.ipynb b/examples/seismic/abc_methods/03_pml.ipynb index 3be3fc9deb..2f19230259 100644 --- a/examples/seismic/abc_methods/03_pml.ipynb +++ b/examples/seismic/abc_methods/03_pml.ipynb @@ -153,11 +153,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -176,8 +176,8 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator\n", - "from examples.seismic import TimeAxis, RickerSource, Receiver" + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator\n", + "from examples.seismic import TimeAxis, RickerSource, Receiver" ] }, { @@ -193,16 +193,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -218,8 +218,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -252,18 +252,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -314,9 +314,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: ('middle', npmlx, npmlx), z: ('middle', 0, npmlz)}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -335,23 +338,34 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -372,7 +386,7 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" ] }, { @@ -388,10 +402,10 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "v1 = np.zeros((nptx-1,nptz-1))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "v1 = np.zeros((nptx-1, nptz-1))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -405,23 +419,23 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm): pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm): pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5\n", + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz-1\n", - "v1[0:nptx-1,p0:p1] = 1.5\n", - "v1[0:nptx-1,p1:p2] = 2.5" + "v1[0:nptx-1, p0:p1] = 1.5\n", + "v1[0:nptx-1, p1:p2] = 2.5" ] }, { @@ -439,11 +453,11 @@ "source": [ "def graph2dvel(vel):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Velocity Profile')\n", @@ -507,13 +521,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -522,8 +536,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -539,10 +553,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -558,10 +572,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -570,7 +584,7 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -617,9 +631,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -628,7 +642,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -646,7 +660,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -662,8 +676,8 @@ "metadata": {}, "outputs": [], "source": [ - "phi1 = TimeFunction(name=\"phi1\",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "phi2 = TimeFunction(name=\"phi2\",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)" + "phi1 = TimeFunction(name=\"phi1\", grid=grid, time_order=2, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "phi2 = TimeFunction(name=\"phi2\", grid=grid, time_order=2, space_order=2, staggered=(x, z), dtype=np.float64)" ] }, { @@ -679,8 +693,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -696,8 +710,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel1 = Function(name=\"vel1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "vel1.data[0:nptx-1,0:nptz-1] = v1" + "vel1 = Function(name=\"vel1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "vel1.data[0:nptx-1, 0:nptz-1] = v1" ] }, { @@ -713,8 +727,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel1.data[nptx-1,0:nptz-1] = vel1.data[nptx-2,0:nptz-1]\n", - "vel1.data[0:nptx,nptz-1] = vel1.data[0:nptx,nptz-2]" + "vel1.data[nptx-1, 0:nptz-1] = vel1.data[nptx-2, 0:nptz-1]\n", + "vel1.data[0:nptx, nptz-1] = vel1.data[0:nptx, nptz-2]" ] }, { @@ -730,7 +744,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -764,10 +778,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv\n", - "x1pml = x1 - npmlx*hxv\n", - "z0pml = z0\n", - "z1pml = z1 - npmlz*hzv" + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -783,15 +797,15 @@ "metadata": {}, "outputs": [], "source": [ - "def fdamp(x,z,i):\n", + "def fdamp(x, z, i):\n", "\n", - " quibar = 0.05\n", + " quibar = 0.05\n", "\n", - " if(i==1):\n", - " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", + " if(i == 1):\n", + " a = np.where(x <= x0pml, (np.abs(x-x0pml)/lx), np.where(x >= x1pml, (np.abs(x-x1pml)/lx), 0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", - " if(i==2):\n", - " a = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", + " if(i == 2):\n", + " a = np.where(z <= z0pml, (np.abs(z-z0pml)/lz), np.where(z >= z1pml, (np.abs(z-z1pml)/lz), 0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", "\n", " return fdamp" @@ -816,23 +830,23 @@ "source": [ "def generatemdamp():\n", "\n", - " X0 = np.linspace(x0,x1,nptx)\n", - " Z0 = np.linspace(z0,z1,nptz)\n", - " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", - " X1 = np.linspace((x0+0.5*hxv),(x1-0.5*hxv),nptx-1)\n", - " Z1 = np.linspace((z0+0.5*hzv),(z1-0.5*hzv),nptz-1)\n", - " X1grid,Z1grid = np.meshgrid(X1,Z1)\n", + " X0 = np.linspace(x0, x1, nptx)\n", + " Z0 = np.linspace(z0, z1, nptz)\n", + " X0grid, Z0grid = np.meshgrid(X0, Z0)\n", + " X1 = np.linspace((x0+0.5*hxv), (x1-0.5*hxv), nptx-1)\n", + " Z1 = np.linspace((z0+0.5*hzv), (z1-0.5*hzv), nptz-1)\n", + " X1grid, Z1grid = np.meshgrid(X1, Z1)\n", "\n", - " D01 = np.zeros((nptx,nptz))\n", - " D02 = np.zeros((nptx,nptz))\n", - " D11 = np.zeros((nptx,nptz))\n", - " D12 = np.zeros((nptx,nptz))\n", + " D01 = np.zeros((nptx, nptz))\n", + " D02 = np.zeros((nptx, nptz))\n", + " D11 = np.zeros((nptx, nptz))\n", + " D12 = np.zeros((nptx, nptz))\n", "\n", - " D01 = np.transpose(fdamp(X0grid,Z0grid,1))\n", - " D02 = np.transpose(fdamp(X0grid,Z0grid,2))\n", + " D01 = np.transpose(fdamp(X0grid, Z0grid, 1))\n", + " D02 = np.transpose(fdamp(X0grid, Z0grid, 2))\n", "\n", - " D11 = np.transpose(fdamp(X1grid,Z1grid,1))\n", - " D12 = np.transpose(fdamp(X1grid,Z1grid,2))\n", + " D11 = np.transpose(fdamp(X1grid, Z1grid, 1))\n", + " D12 = np.transpose(fdamp(X1grid, Z1grid, 2))\n", "\n", " return D01, D02, D11, D12" ] @@ -861,12 +875,12 @@ "source": [ "def graph2damp(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Absorbing Layer Function')\n", @@ -970,10 +984,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx0 = Function(name=\"dampx0\", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)\n", - "dampz0 = Function(name=\"dampz0\", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)\n", - "dampx0.data[:,:] = D01\n", - "dampz0.data[:,:] = D02" + "dampx0 = Function(name=\"dampx0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "dampz0 = Function(name=\"dampz0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "dampx0.data[:, :] = D01\n", + "dampz0.data[:, :] = D02" ] }, { @@ -982,10 +996,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx1 = Function(name=\"dampx1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "dampz1 = Function(name=\"dampz1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "dampx1.data[0:nptx-1,0:nptz-1] = D11\n", - "dampz1.data[0:nptx-1,0:nptz-1] = D12" + "dampx1 = Function(name=\"dampx1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "dampz1 = Function(name=\"dampz1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "dampx1.data[0:nptx-1, 0:nptz-1] = D11\n", + "dampz1.data[0:nptx-1, 0:nptz-1] = D12" ] }, { @@ -1001,10 +1015,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx1.data[nptx-1,0:nptz-1] = dampx1.data[nptx-2,0:nptz-1]\n", - "dampx1.data[0:nptx,nptz-1] = dampx1.data[0:nptx,nptz-2]\n", - "dampz1.data[nptx-1,0:nptz-1] = dampz1.data[nptx-2,0:nptz-1]\n", - "dampz1.data[0:nptx,nptz-1] = dampz1.data[0:nptx,nptz-2]" + "dampx1.data[nptx-1, 0:nptz-1] = dampx1.data[nptx-2, 0:nptz-1]\n", + "dampx1.data[0:nptx, nptz-1] = dampx1.data[0:nptx, nptz-2]\n", + "dampz1.data[nptx-1, 0:nptz-1] = dampz1.data[nptx-2, 0:nptz-1]\n", + "dampz1.data[0:nptx, nptz-1] = dampz1.data[0:nptx, nptz-2]" ] }, { @@ -1056,25 +1070,25 @@ "outputs": [], "source": [ "# White Region\n", - "pde01 = Eq(u.dt2-u.laplace*vel0**2)\n", + "pde01 = Eq(u.dt2-u.laplace*vel0**2)\n", "\n", "# Blue Region\n", - "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0\n", - "pde02b = - (0.5/hx)*(phi1[t,x,z-1]+phi1[t,x,z]-phi1[t,x-1,z-1]-phi1[t,x-1,z])\n", - "pde02c = - (0.5/hz)*(phi2[t,x-1,z]+phi2[t,x,z]-phi2[t,x-1,z-1]-phi2[t,x,z-1])\n", - "pde02 = Eq(pde02a + pde02b + pde02c)\n", + "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0\n", + "pde02b = - (0.5/hx)*(phi1[t, x, z-1]+phi1[t, x, z]-phi1[t, x-1, z-1]-phi1[t, x-1, z])\n", + "pde02c = - (0.5/hz)*(phi2[t, x-1, z]+phi2[t, x, z]-phi2[t, x-1, z-1]-phi2[t, x, z-1])\n", + "pde02 = Eq(pde02a + pde02b + pde02c)\n", "\n", "pde10 = phi1.dt + dampx1*0.5*(phi1.forward+phi1)\n", - "a1 = u[t+1,x+1,z] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x,z+1]\n", - "a2 = u[t,x+1,z] + u[t,x+1,z+1] - u[t,x,z] - u[t,x,z+1]\n", + "a1 = u[t+1, x+1, z] + u[t+1, x+1, z+1] - u[t+1, x, z] - u[t+1, x, z+1]\n", + "a2 = u[t, x+1, z] + u[t, x+1, z+1] - u[t, x, z] - u[t, x, z+1]\n", "pde11 = -(dampz1-dampx1)*0.5*(0.5/hx)*(a1+a2)*vel1**2\n", - "pde1 = Eq(pde10+pde11)\n", + "pde1 = Eq(pde10+pde11)\n", "\n", "pde20 = phi2.dt + dampz1*0.5*(phi2.forward+phi2)\n", - "b1 = u[t+1,x,z+1] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x+1,z]\n", - "b2 = u[t,x,z+1] + u[t,x+1,z+1] - u[t,x,z] - u[t,x+1,z]\n", + "b1 = u[t+1, x, z+1] + u[t+1, x+1, z+1] - u[t+1, x, z] - u[t+1, x+1, z]\n", + "b2 = u[t, x, z+1] + u[t, x+1, z+1] - u[t, x, z] - u[t, x+1, z]\n", "pde21 = -(dampx1-dampz1)*0.5*(0.5/hz)*(b1+b2)*vel1**2\n", - "pde2 = Eq(pde20+pde21)" + "pde2 = Eq(pde20+pde21)" ] }, { @@ -1090,7 +1104,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil01 = Eq(u.forward,solve(pde01,u.forward) ,subdomain = grid.subdomains['d0'])" + "stencil01 = Eq(u.forward, solve(pde01, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -1106,7 +1120,7 @@ "metadata": {}, "outputs": [], "source": [ - "subds = ['d1','d2','d3']" + "subds = ['d1', 'd2', 'd3']" ] }, { @@ -1115,9 +1129,9 @@ "metadata": {}, "outputs": [], "source": [ - "stencil02 = [Eq(u.forward,solve(pde02, u.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]\n", - "stencil1 = [Eq(phi1.forward, solve(pde1,phi1.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]\n", - "stencil2 = [Eq(phi2.forward, solve(pde2,phi2.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]" + "stencil02 = [Eq(u.forward, solve(pde02, u.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]\n", + "stencil1 = [Eq(phi1.forward, solve(pde1, phi1.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]\n", + "stencil2 = [Eq(phi2.forward, solve(pde2, phi2.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]" ] }, { @@ -1133,7 +1147,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -1161,7 +1175,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil01,stencil02] + src_term + bc + [stencil1,stencil2] + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil01, stencil02] + src_term + bc + [stencil1, stencil2] + rec_term, subs=grid.spacing_map)" ] }, { @@ -1177,9 +1191,9 @@ "metadata": {}, "outputs": [], "source": [ - "u.data[:] = 0.\n", - "phi1.data[:] = 0.\n", - "phi2.data[:] = 0." + "u.data[:] = 0.\n", + "phi1.data[:] = 0.\n", + "phi2.data[:] = 0." ] }, { @@ -1228,7 +1242,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1246,11 +1260,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1293,7 +1307,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1311,12 +1325,12 @@ "source": [ "def graph2drec(rec):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscaled = 1/10**(3)\n", " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", " plot.axis('equal')\n", diff --git a/examples/seismic/abc_methods/04_habc.ipynb b/examples/seismic/abc_methods/04_habc.ipynb index 0e452d6914..75a563b1ba 100644 --- a/examples/seismic/abc_methods/04_habc.ipynb +++ b/examples/seismic/abc_methods/04_habc.ipynb @@ -202,11 +202,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -225,10 +225,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -244,16 +244,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -280,8 +280,8 @@ "metadata": {}, "outputs": [], "source": [ - "habctype = 3\n", - "habcw = 2" + "habctype = 3\n", + "habcw = 2" ] }, { @@ -297,8 +297,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -331,18 +331,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -362,9 +362,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: x, z: z}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -389,26 +392,37 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " if((habctype==3)&(habcw==1)):\n", - " return {x: x, z: ('right',npmlz)}\n", + " if((habctype == 3) & (habcw == 1)):\n", + " return {x: x, z: ('right', npmlz)}\n", " else:\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -429,7 +443,7 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" ] }, { @@ -438,9 +452,9 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -454,18 +468,18 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm): pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm): pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", "\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -483,11 +497,11 @@ "source": [ "def graph2dvel(vel):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Velocity Profile')\n", @@ -551,13 +565,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -573,8 +587,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -590,10 +604,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -609,10 +623,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -621,7 +635,7 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -668,9 +682,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -679,7 +693,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -697,7 +711,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -706,8 +720,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel = Function(name=\"vel\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel.data[:,:] = v0[:,:]" + "vel = Function(name=\"vel\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel.data[:, :] = v0[:, :]" ] }, { @@ -723,7 +737,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel**2)" ] }, { @@ -757,69 +771,69 @@ "source": [ "def generateweights():\n", "\n", - " weightsx = np.zeros(npmlx)\n", - " weightsz = np.zeros(npmlz)\n", - " Mweightsx = np.zeros((nptx,nptz))\n", - " Mweightsz = np.zeros((nptx,nptz))\n", + " weightsx = np.zeros(npmlx)\n", + " weightsz = np.zeros(npmlz)\n", + " Mweightsx = np.zeros((nptx, nptz))\n", + " Mweightsz = np.zeros((nptx, nptz))\n", "\n", - " if(habcw==1):\n", + " if(habcw == 1):\n", "\n", - " for i in range(0,npmlx):\n", + " for i in range(0, npmlx):\n", " weightsx[i] = (npmlx-i)/(npmlx)\n", "\n", - " for i in range(0,npmlz):\n", + " for i in range(0, npmlz):\n", " weightsz[i] = (npmlz-i)/(npmlz)\n", "\n", - " if(habcw==2):\n", + " if(habcw == 2):\n", "\n", " mx = 2\n", " mz = 2\n", "\n", - " if(habctype==3):\n", + " if(habctype == 3):\n", "\n", - " alphax = 1.0 + 0.15*(npmlx-mx)\n", - " alphaz = 1.0 + 0.15*(npmlz-mz)\n", + " alphax = 1.0 + 0.15*(npmlx-mx)\n", + " alphaz = 1.0 + 0.15*(npmlz-mz)\n", "\n", " else:\n", "\n", - " alphax = 1.5 + 0.07*(npmlx-mx)\n", - " alphaz = 1.5 + 0.07*(npmlz-mz)\n", + " alphax = 1.5 + 0.07*(npmlx-mx)\n", + " alphaz = 1.5 + 0.07*(npmlz-mz)\n", "\n", - " for i in range(0,npmlx):\n", + " for i in range(0, npmlx):\n", "\n", - " if(0<=i<=(mx)):\n", + " if(0 <= i <= (mx)):\n", " weightsx[i] = 1\n", - " elif((mx+1)<=i<=npmlx-1):\n", + " elif((mx+1) <= i <= npmlx-1):\n", " weightsx[i] = ((npmlx-i)/(npmlx-mx))**(alphax)\n", " else:\n", " weightsx[i] = 0\n", "\n", - " for i in range(0,npmlz):\n", + " for i in range(0, npmlz):\n", "\n", - " if(0<=i<=(mz)):\n", + " if(0 <= i <= (mz)):\n", " weightsz[i] = 1\n", - " elif((mz+1)<=i<=npmlz-1):\n", + " elif((mz+1) <= i <= npmlz-1):\n", " weightsz[i] = ((npmlz-i)/(npmlz-mz))**(alphaz)\n", " else:\n", " weightsz[i] = 0\n", "\n", - " for k in range(0,npmlx):\n", + " for k in range(0, npmlx):\n", "\n", " ai = k\n", " af = nptx - k - 1\n", " bi = 0\n", " bf = nptz - k\n", - " Mweightsx[ai,bi:bf] = weightsx[k]\n", - " Mweightsx[af,bi:bf] = weightsx[k]\n", + " Mweightsx[ai, bi:bf] = weightsx[k]\n", + " Mweightsx[af, bi:bf] = weightsx[k]\n", "\n", - " for k in range(0,npmlz):\n", + " for k in range(0, npmlz):\n", "\n", " ai = k\n", " af = nptx - k\n", " bf = nptz - k - 1\n", - " Mweightsz[ai:af,bf] = weightsz[k]\n", + " Mweightsz[ai:af, bf] = weightsz[k]\n", "\n", - " return Mweightsx,Mweightsz" + " return Mweightsx, Mweightsz" ] }, { @@ -835,7 +849,7 @@ "metadata": {}, "outputs": [], "source": [ - "Mweightsx,Mweightsz = generateweights();" + "Mweightsx, Mweightsz = generateweights();" ] }, { @@ -853,12 +867,12 @@ "source": [ "def graph2dweight(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Weight Function')\n", @@ -960,11 +974,11 @@ "metadata": {}, "outputs": [], "source": [ - "weightsx = Function(name=\"weightsx\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "weightsx.data[:,:] = Mweightsx[:,:]\n", + "weightsx = Function(name=\"weightsx\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "weightsx.data[:, :] = Mweightsx[:, :]\n", "\n", - "weightsz = Function(name=\"weightsz\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "weightsz.data[:,:] = Mweightsz[:,:]" + "weightsz = Function(name=\"weightsz\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "weightsz.data[:, :] = Mweightsz[:, :]" ] }, { @@ -980,9 +994,9 @@ "metadata": {}, "outputs": [], "source": [ - "u1 = Function(name=\"u1\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "u2 = Function(name=\"u2\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "u3 = Function(name=\"u3\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)" + "u1 = Function(name=\"u1\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "u2 = Function(name=\"u2\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "u3 = Function(name=\"u3\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -1004,7 +1018,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil01 = [Eq(u1,u.backward),Eq(u2,u),Eq(u3,u.forward)]" + "stencil01 = [Eq(u1, u.backward), Eq(u2, u), Eq(u3, u.forward)]" ] }, { @@ -1020,7 +1034,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil02 = [Eq(u3,u.forward)]" + "stencil02 = [Eq(u3, u.forward)]" ] }, { @@ -1056,7 +1070,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil0 = Eq(u.forward, solve(pde0,u.forward))" + "stencil0 = Eq(u.forward, solve(pde0, u.forward))" ] }, { @@ -1082,22 +1096,22 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==1):\n", + "if(habctype == 1):\n", "\n", " # Region B_{1}\n", - " aux1 = ((-vel[x,z]*dt+hx)*u2[x,z] + (vel[x,z]*dt+hx)*u2[x+1,z] + (vel[x,z]*dt-hx)*u3[x+1,z])/(vel[x,z]*dt+hx)\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " aux1 = ((-vel[x, z]*dt+hx)*u2[x, z] + (vel[x, z]*dt+hx)*u2[x+1, z] + (vel[x, z]*dt-hx)*u3[x+1, z])/(vel[x, z]*dt+hx)\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", - " aux2 = ((-vel[x,z]*dt+hx)*u2[x,z] + (vel[x,z]*dt+hx)*u2[x-1,z] + (vel[x,z]*dt-hx)*u3[x-1,z])/(vel[x,z]*dt+hx)\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " aux2 = ((-vel[x, z]*dt+hx)*u2[x, z] + (vel[x, z]*dt+hx)*u2[x-1, z] + (vel[x, z]*dt-hx)*u3[x-1, z])/(vel[x, z]*dt+hx)\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", - " aux3 = ((-vel[x,z]*dt+hz)*u2[x,z] + (vel[x,z]*dt+hz)*u2[x,z-1] + (vel[x,z]*dt-hz)*u3[x,z-1])/(vel[x,z]*dt+hz)\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])" + " aux3 = ((-vel[x, z]*dt+hz)*u2[x, z] + (vel[x, z]*dt+hz)*u2[x, z-1] + (vel[x, z]*dt-hz)*u3[x, z-1])/(vel[x, z]*dt+hz)\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])" ] }, { @@ -1113,62 +1127,62 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==2):\n", + "if(habctype == 2):\n", "\n", " # Region B_{1}\n", - " cte11 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", - " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]*vel[x,z]\n", - " cte31 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", - " cte41 = (1/(dt**2))\n", - " cte51 = (1/(4*hz**2))*vel[x,z]**2\n", + " cte11 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z]\n", + " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z] - (1/(2*hz**2))*vel[x, z]*vel[x, z]\n", + " cte31 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x, z]\n", + " cte41 = (1/(dt**2))\n", + " cte51 = (1/(4*hz**2))*vel[x, z]**2\n", "\n", - " aux1 = (cte21*(u3[x+1,z] + u1[x,z]) + cte31*u1[x+1,z] + cte41*(u2[x,z]+u2[x+1,z]) + cte51*(u3[x+1,z+1] + u3[x+1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte11\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " aux1 = (cte21*(u3[x+1, z] + u1[x, z]) + cte31*u1[x+1, z] + cte41*(u2[x, z]+u2[x+1, z]) + cte51*(u3[x+1, z+1] + u3[x+1, z-1] + u1[x, z+1] + u1[x, z-1]))/cte11\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", - " cte12 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", - " cte22 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]**2\n", - " cte32 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", - " cte42 = (1/(dt**2))\n", - " cte52 = (1/(4*hz**2))*vel[x,z]*vel[x,z]\n", + " cte12 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z]\n", + " cte22 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z] - (1/(2*hz**2))*vel[x, z]**2\n", + " cte32 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x, z]\n", + " cte42 = (1/(dt**2))\n", + " cte52 = (1/(4*hz**2))*vel[x, z]*vel[x, z]\n", "\n", - " aux2 = (cte22*(u3[x-1,z] + u1[x,z]) + cte32*u1[x-1,z] + cte42*(u2[x,z]+u2[x-1,z]) + cte52*(u3[x-1,z+1] + u3[x-1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte12\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " aux2 = (cte22*(u3[x-1, z] + u1[x, z]) + cte32*u1[x-1, z] + cte42*(u2[x, z]+u2[x-1, z]) + cte52*(u3[x-1, z+1] + u3[x-1, z-1] + u1[x, z+1] + u1[x, z-1]))/cte12\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", - " cte13 = (1/(2*dt**2)) + (1/(2*dt*hz))*vel[x,z]\n", - " cte23 = -(1/(2*dt**2)) + (1/(2*dt*hz))*vel[x,z] - (1/(2*hx**2))*vel[x,z]**2\n", - " cte33 = -(1/(2*dt**2)) - (1/(2*dt*hz))*vel[x,z]\n", - " cte43 = (1/(dt**2))\n", - " cte53 = (1/(4*hx**2))*vel[x,z]*vel[x,z]\n", + " cte13 = (1/(2*dt**2)) + (1/(2*dt*hz))*vel[x, z]\n", + " cte23 = -(1/(2*dt**2)) + (1/(2*dt*hz))*vel[x, z] - (1/(2*hx**2))*vel[x, z]**2\n", + " cte33 = -(1/(2*dt**2)) - (1/(2*dt*hz))*vel[x, z]\n", + " cte43 = (1/(dt**2))\n", + " cte53 = (1/(4*hx**2))*vel[x, z]*vel[x, z]\n", "\n", - " aux3 = (cte23*(u3[x,z-1] + u1[x,z]) + cte33*u1[x,z-1] + cte43*(u2[x,z]+u2[x,z-1]) + cte53*(u3[x+1,z-1] + u3[x-1,z-1] + u1[x+1,z] + u1[x-1,z]))/cte13\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])\n", + " aux3 = (cte23*(u3[x, z-1] + u1[x, z]) + cte33*u1[x, z-1] + cte43*(u2[x, z]+u2[x, z-1]) + cte53*(u3[x+1, z-1] + u3[x-1, z-1] + u1[x+1, z] + u1[x-1, z]))/cte13\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])\n", "\n", " # Red point right side\n", - " stencil4 = [Eq(u[t+1,nptx-1-k,nptz-1-k],(1-weightsz[nptx-1-k,nptz-1-k])*u3[nptx-1-k,nptz-1-k] +\n", - " weightsz[nptx-1-k,nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-1-k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt)))) for k in range(0,npmlz)]\n", + " stencil4 = [Eq(u[t+1, nptx-1-k, nptz-1-k], (1-weightsz[nptx-1-k, nptz-1-k])*u3[nptx-1-k, nptz-1-k] +\n", + " weightsz[nptx-1-k, nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt)))) for k in range(0, npmlz)]\n", "\n", " # Red point left side\n", - " stencil5 = [Eq(u[t+1,k,nptz-1-k],(1-weightsx[k,nptz-1-k] )*u3[k,nptz-1-k]\n", - " + weightsx[k,nptz-1-k]*(( (-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt)))) for k in range(0,npmlx)]" + " stencil5 = [Eq(u[t+1, k, nptz-1-k], (1-weightsx[k, nptz-1-k])*u3[k, nptz-1-k]\n", + " + weightsx[k, nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt)))) for k in range(0, npmlx)]" ] }, { @@ -1184,7 +1198,7 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==3):\n", + "if(habctype == 3):\n", "\n", " alpha1 = 0.0\n", " alpha2 = np.pi/4\n", @@ -1196,83 +1210,83 @@ " # Region B_{1}\n", " gama111 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama121 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama131 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", - " gama141 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", + " gama131 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x, z]\n", + " gama141 = np.cos(alpha1)*(b1)*(1/hx)*vel[x, z]\n", "\n", " gama211 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama221 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama231 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", - " gama241 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", + " gama231 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x, z]\n", + " gama241 = np.cos(alpha2)*(b2)*(1/hx)*vel[x, z]\n", "\n", - " c111 = gama111 + gama131\n", + " c111 = gama111 + gama131\n", " c121 = -gama111 + gama141\n", - " c131 = gama121 - gama131\n", + " c131 = gama121 - gama131\n", " c141 = -gama121 - gama141\n", "\n", - " c211 = gama211 + gama231\n", + " c211 = gama211 + gama231\n", " c221 = -gama211 + gama241\n", - " c231 = gama221 - gama231\n", + " c231 = gama221 - gama231\n", " c241 = -gama221 - gama241\n", "\n", - " aux1 = ( u2[x,z]*(-c111*c221-c121*c211) + u3[x+1,z]*(-c111*c231-c131*c211) + u2[x+1,z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", - " + u1[x,z]*(-c121*c221) + u1[x+1,z]*(-c121*c241-c141*c221) + u3[x+2,z]*(-c131*c231) +u2[x+2,z]*(-c131*c241-c141*c231)\n", - " + u1[x+2,z]*(-c141*c241))/(c111*c211)\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " aux1 = (u2[x, z]*(-c111*c221-c121*c211) + u3[x+1, z]*(-c111*c231-c131*c211) + u2[x+1, z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", + " + u1[x, z]*(-c121*c221) + u1[x+1, z]*(-c121*c241-c141*c221) + u3[x+2, z]*(-c131*c231) +u2[x+2, z]*(-c131*c241-c141*c231)\n", + " + u1[x+2, z]*(-c141*c241))/(c111*c211)\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", " gama112 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama122 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama132 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", - " gama142 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", + " gama132 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x, z]\n", + " gama142 = np.cos(alpha1)*(b1)*(1/hx)*vel[x, z]\n", "\n", " gama212 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama222 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama232 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", - " gama242 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", + " gama232 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x, z]\n", + " gama242 = np.cos(alpha2)*(b2)*(1/hx)*vel[x, z]\n", "\n", - " c112 = gama112 + gama132\n", + " c112 = gama112 + gama132\n", " c122 = -gama112 + gama142\n", - " c132 = gama122 - gama132\n", + " c132 = gama122 - gama132\n", " c142 = -gama122 - gama142\n", "\n", - " c212 = gama212 + gama232\n", + " c212 = gama212 + gama232\n", " c222 = -gama212 + gama242\n", - " c232 = gama222 - gama232\n", + " c232 = gama222 - gama232\n", " c242 = -gama222 - gama242\n", "\n", - " aux2 = ( u2[x,z]*(-c112*c222-c122*c212) + u3[x-1,z]*(-c112*c232-c132*c212) + u2[x-1,z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", - " + u1[x,z]*(-c122*c222) + u1[x-1,z]*(-c122*c242-c142*c222) + u3[x-2,z]*(-c132*c232) +u2[x-2,z]*(-c132*c242-c142*c232)\n", - " + u1[x-2,z]*(-c142*c242))/(c112*c212)\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " aux2 = (u2[x, z]*(-c112*c222-c122*c212) + u3[x-1, z]*(-c112*c232-c132*c212) + u2[x-1, z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", + " + u1[x, z]*(-c122*c222) + u1[x-1, z]*(-c122*c242-c142*c222) + u3[x-2, z]*(-c132*c232) +u2[x-2, z]*(-c132*c242-c142*c232)\n", + " + u1[x-2, z]*(-c142*c242))/(c112*c212)\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", " gama113 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama123 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama133 = np.cos(alpha1)*(1-b1)*(1/hz)*vel[x,z]\n", - " gama143 = np.cos(alpha1)*(b1)*(1/hz)*vel[x,z]\n", + " gama133 = np.cos(alpha1)*(1-b1)*(1/hz)*vel[x, z]\n", + " gama143 = np.cos(alpha1)*(b1)*(1/hz)*vel[x, z]\n", "\n", " gama213 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama223 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama233 = np.cos(alpha2)*(1-b2)*(1/hz)*vel[x,z]\n", - " gama243 = np.cos(alpha2)*(b2)*(1/hz)*vel[x,z]\n", + " gama233 = np.cos(alpha2)*(1-b2)*(1/hz)*vel[x, z]\n", + " gama243 = np.cos(alpha2)*(b2)*(1/hz)*vel[x, z]\n", "\n", - " c113 = gama113 + gama133\n", + " c113 = gama113 + gama133\n", " c123 = -gama113 + gama143\n", - " c133 = gama123 - gama133\n", + " c133 = gama123 - gama133\n", " c143 = -gama123 - gama143\n", "\n", - " c213 = gama213 + gama233\n", + " c213 = gama213 + gama233\n", " c223 = -gama213 + gama243\n", - " c233 = gama223 - gama233\n", + " c233 = gama223 - gama233\n", " c243 = -gama223 - gama243\n", "\n", - " aux3 = ( u2[x,z]*(-c113*c223-c123*c213) + u3[x,z-1]*(-c113*c233-c133*c213) + u2[x,z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", - " + u1[x,z]*(-c123*c223) + u1[x,z-1]*(-c123*c243-c143*c223) + u3[x,z-2]*(-c133*c233) +u2[x,z-2]*(-c133*c243-c143*c233)\n", - " + u1[x,z-2]*(-c143*c243))/(c113*c213)\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])" + " aux3 = (u2[x, z]*(-c113*c223-c123*c213) + u3[x, z-1]*(-c113*c233-c133*c213) + u2[x, z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", + " + u1[x, z]*(-c123*c223) + u1[x, z-1]*(-c123*c243-c143*c223) + u3[x, z-2]*(-c133*c233) +u2[x, z-2]*(-c133*c243-c143*c233)\n", + " + u1[x, z-2]*(-c143*c243))/(c113*c213)\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])" ] }, { @@ -1288,7 +1302,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -1321,10 +1335,10 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "if(habctype!=2):\n", - " op = Operator([stencil0] + src_term + [stencil01,stencil3,stencil02,stencil2,stencil1] + bc + rec_term,subs=grid.spacing_map)\n", + "if(habctype != 2):\n", + " op = Operator([stencil0] + src_term + [stencil01, stencil3, stencil02, stencil2, stencil1] + bc + rec_term, subs=grid.spacing_map)\n", "else:\n", - " op = Operator([stencil0] + src_term + [stencil01,stencil3,stencil02,stencil2,stencil1,stencil02,stencil4,stencil5] + bc + rec_term,subs=grid.spacing_map)" + " op = Operator([stencil0] + src_term + [stencil01, stencil3, stencil02, stencil2, stencil1, stencil02, stencil4, stencil5] + bc + rec_term, subs=grid.spacing_map)" ] }, { @@ -1340,7 +1354,7 @@ "metadata": {}, "outputs": [], "source": [ - "u.data[:] = 0.\n", + "u.data[:] = 0.\n", "u1.data[:] = 0.\n", "u2.data[:] = 0.\n", "u3.data[:] = 0." @@ -1394,7 +1408,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1410,23 +1424,23 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U,i):\n", + "def graph2d(U, i):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " x0pml = x0 + npmlx*hxv\n", - " x1pml = x1 - npmlx*hxv\n", - " z0pml = z0\n", - " z1pml = z1 - npmlz*hzv\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " x0pml = x0 + npmlx*hxv\n", + " x1pml = x1 - npmlx*hxv\n", + " z0pml = z0\n", + " z1pml = z1 - npmlz*hzv\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", - " if(i==1): plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", - " if(i==2): plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", - " if(i==3): plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", + " if(i == 1): plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", + " if(i == 2): plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", + " if(i == 3): plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", " plot.grid()\n", " ax = plot.gca()\n", " divider = make_axes_locatable(ax)\n", @@ -1465,7 +1479,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:],habctype)" + "graph2d(u.data[0, :, :], habctype)" ] }, { @@ -1481,22 +1495,22 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec,i):\n", + "def graph2drec(rec, i):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscaled = 1/10**(3)\n", " fscalet = 1/10**(3)\n", - " x0pml = x0 + npmlx*hxv\n", - " x1pml = x1 - npmlx*hxv\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " x0pml = x0 + npmlx*hxv\n", + " x1pml = x1 - npmlx*hxv\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", " plot.axis('equal')\n", - " if(i==1): plot.title('Receivers Signal Profile - Devito with HABC A1')\n", - " if(i==2): plot.title('Receivers Signal Profile - Devito with HABC A2')\n", - " if(i==3): plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", + " if(i == 1): plot.title('Receivers Signal Profile - Devito with HABC A1')\n", + " if(i == 2): plot.title('Receivers Signal Profile - Devito with HABC A2')\n", + " if(i == 3): plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", " ax = plot.gca()\n", " divider = make_axes_locatable(ax)\n", " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", @@ -1532,7 +1546,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2drec(rec.data,habctype)" + "graph2drec(rec.data, habctype)" ] }, { diff --git a/examples/seismic/acoustic/accuracy.ipynb b/examples/seismic/acoustic/accuracy.ipynb index 7c9bccc058..711dd1a9cd 100644 --- a/examples/seismic/acoustic/accuracy.ipynb +++ b/examples/seismic/acoustic/accuracy.ipynb @@ -141,10 +141,10 @@ "rec_coordinates = np.empty((1, 2))\n", "rec_coordinates[:, :] = 260.\n", "\n", - "print(\"The computational Grid has (%s, %s) grid points \"\n", - " \"and a physical extent of (%sm, %sm)\" % (*model.grid.shape, *model.grid.extent))\n", - "print(\"Source is at the center with coordinates (%sm, %sm)\" % tuple(src_coordinates[0]))\n", - "print(\"Receiver (single receiver) is located at (%sm, %sm) \" % tuple(rec_coordinates[0]))\n", + "print(\"The computational Grid has ({}, {}) grid points \"\n", + " \"and a physical extent of ({}m, {}m)\".format(*model.grid.shape, *model.grid.extent))\n", + "print(\"Source is at the center with coordinates ({}m, {}m)\".format(*tuple(src_coordinates[0])))\n", + "print(\"Receiver (single receiver) is located at ({}m, {}m) \".format(*tuple(rec_coordinates[0])))\n", "\n", "# Note: gets time sampling from model.critical_dt\n", "geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", @@ -198,6 +198,7 @@ "sx, sz = src_coordinates[0, :]\n", "rx, rz = rec_coordinates[0, :]\n", "\n", + "\n", "# Define a Ricker wavelet shifted to zero lag for the Fourier transform\n", "def ricker(f, T, dt, t0):\n", " t = np.linspace(-t0, T-t0, int(T/dt))\n", @@ -205,6 +206,7 @@ " y = (1.0 - 2.0 * tt) * np.exp(- tt)\n", " return y\n", "\n", + "\n", "def analytical(nt, model, time, **kwargs):\n", " dt = kwargs.get('dt', model.critical_dt)\n", " # Fourier constants\n", @@ -260,11 +262,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "print(\"Numerical data min,max,abs; %+.6e %+.6e %+.6e\" %\n", - " (np.min(ref_rec.data), np.max(ref_rec.data), np.max(np.abs(ref_rec.data)) ))\n", - "print(\"Analytic data min,max,abs; %+.6e %+.6e %+.6e\" %\n", - " (np.min(U_t), np.max(U_t), (np.max(np.abs(U_t)))))" + "# NBVAL_IGNORE_OUTPUT\n", + "print(f\"Numerical data min,max,abs; {np.min(ref_rec.data):+.6e} {np.max(ref_rec.data):+.6e} {np.max(np.abs(ref_rec.data)):+.6e}\")\n", + "print(f\"Analytic data min,max,abs; {np.min(U_t):+.6e} {np.max(U_t):+.6e} {np.max(np.abs(U_t)):+.6e}\")" ] }, { @@ -298,11 +298,11 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot wavefield and source/rec position\n", - "plt.figure(figsize=(8,8))\n", - "amax = np.max(np.abs(ref_u.data[1,:,:]))\n", - "plt.imshow(ref_u.data[1,:,:], vmin=-1.0 * amax, vmax=+1.0 * amax, cmap=\"seismic\")\n", + "plt.figure(figsize=(8, 8))\n", + "amax = np.max(np.abs(ref_u.data[1, :, :]))\n", + "plt.imshow(ref_u.data[1, :, :], vmin=-1.0 * amax, vmax=+1.0 * amax, cmap=\"seismic\")\n", "plt.plot(2*sx+40, 2*sz+40, 'r*', markersize=11, label='source') # plot position of the source in model, add nbl for correct position\n", "plt.plot(2*rx+40, 2*rz+40, 'k^', markersize=8, label='receiver') # plot position of the receiver in model, add nbl for correct position\n", "plt.legend()\n", @@ -311,18 +311,18 @@ "plt.savefig('wavefieldperf.pdf')\n", "\n", "# Plot trace\n", - "plt.figure(figsize=(12,8))\n", - "plt.subplot(2,1,1)\n", + "plt.figure(figsize=(12, 8))\n", + "plt.subplot(2, 1, 1)\n", "plt.plot(time, ref_rec.data[:, 0], '-b', label='numerical')\n", "plt.plot(time, U_t[:], '--r', label='analytical')\n", - "plt.xlim([0,150])\n", + "plt.xlim([0, 150])\n", "plt.ylim([1.15*np.min(U_t[:]), 1.15*np.max(U_t[:])])\n", "plt.xlabel('time (ms)')\n", "plt.ylabel('amplitude')\n", "plt.legend()\n", - "plt.subplot(2,1,2)\n", + "plt.subplot(2, 1, 2)\n", "plt.plot(time, 100 *(ref_rec.data[:, 0] - U_t[:]), '-b', label='difference x100')\n", - "plt.xlim([0,150])\n", + "plt.xlim([0, 150])\n", "plt.ylim([1.15*np.min(U_t[:]), 1.15*np.max(U_t[:])])\n", "plt.xlabel('time (ms)')\n", "plt.ylabel('amplitude x100')\n", @@ -345,7 +345,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "error_time = np.zeros(5)\n", "error_time[0] = np.linalg.norm(U_t[:-1] - ref_rec.data[:-1, 0], 2) / np.sqrt(nt)\n", "errors_plot = [(time, U_t[:-1] - ref_rec.data[:-1, 0])]\n", @@ -390,7 +390,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "dt = [0.1000, 0.0800, 0.0750, 0.0625, 0.0500]\n", "nnt = (np.divide(150.0, dt) + 1).astype(int)\n", "\n", @@ -418,7 +418,7 @@ "\n", " solver = AcousticWaveSolver(model, geometry, time_order=2, space_order=8)\n", " ref_rec1, ref_u1, _ = solver.forward(dt=dt[i])\n", - " ref_rec1_data = ref_rec1.data[0:nnt[i],:]\n", + " ref_rec1_data = ref_rec1.data[0:nnt[i], :]\n", "\n", " time1 = np.linspace(0.0, 3000., 20*(nnt[i]-1) + 1)\n", " U_t1 = analytical(20*(nnt[i]-1) + 1, model, time1, dt=time1[1] - time1[0])\n", @@ -428,8 +428,7 @@ "\n", " ratio_d = dt[i-1]/dt[i] if i > 0 else 1.0\n", " ratio_e = error_time[i-1]/error_time[i] if i > 0 else 1.0\n", - " print(\"error for dt=%.4f is %12.6e -- ratio dt^2,ratio err; %12.6f %12.6f \\n\" %\n", - " (dt[i], error_time[i], ratio_d**2, ratio_e))\n", + " print(f\"error for dt={dt[i]:.4f} is {error_time[i]:12.6e} -- ratio dt^2,ratio err; {ratio_d**2:12.6f} {ratio_e:12.6f} \\n\")\n", " errors_plot.append((geometry.time_axis.time_values, U_t1[:-1] - ref_rec1_data[:-1, 0]))" ] }, @@ -452,13 +451,13 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(20, 10))\n", "theory = [t**2 for t in dt]\n", "theory = [error_time[0]*th/theory[0] for th in theory]\n", "plt.loglog([t for t in dt], error_time, '-ob', label=('Numerical'), linewidth=4, markersize=10)\n", "plt.loglog([t for t in dt], theory, '-^r', label=('Theory (2nd order)'), linewidth=4, markersize=10)\n", - "for x, y, a in zip([t for t in dt], theory, [('dt = %s ms' % (t)) for t in dt]):\n", + "for x, y, a in zip([t for t in dt], theory, [(f'dt = {t} ms') for t in dt]):\n", " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", " textcoords='offset points', size=20,\n", " horizontalalignment='left', verticalalignment='top')\n", @@ -492,7 +491,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('--y', '--b', '--r', '--g', '--c')\n", "\n", "start_t = lambda dt: int(50/dt)\n", @@ -506,8 +505,8 @@ " s, e = start_t(dti), end_t(dti)\n", " if i == 0:\n", " plt.plot(timei[s:e], U_t[s:e], 'k', label='analytical', linewidth=2)\n", - " plt.plot(timei[s:e], 100*erri[s:e], stylel[i], label=\"100 x error dt=%sms\"%dti, linewidth=2)\n", - "plt.xlim([50,100])\n", + " plt.plot(timei[s:e], 100*erri[s:e], stylel[i], label=f\"100 x error dt={dti}ms\", linewidth=2)\n", + "plt.xlim([50, 100])\n", "plt.xlabel(\"Time (ms)\", fontsize=20)\n", "plt.legend(fontsize=20)\n", "plt.show()" @@ -527,9 +526,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "pf = np.polyfit(np.log([t for t in dt]), np.log(error_time), deg=1)\n", - "print(\"Convergence rate in time is: %.4f\" % pf[0])\n", + "print(f\"Convergence rate in time is: {pf[0]:.4f}\")\n", "assert np.isclose(pf[0], 1.9, atol=0, rtol=.1)" ] }, @@ -569,14 +568,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "errorl2 = np.zeros((norder, nshapes))\n", "timing = np.zeros((norder, nshapes))\n", "\n", "set_log_level(\"ERROR\")\n", "ind_o = -1\n", "for spc in orders:\n", - " ind_o +=1\n", + " ind_o += 1\n", " ind_spc = -1\n", " for nn, h in shapes:\n", " ind_spc += 1\n", @@ -608,8 +607,7 @@ " # Note: we need to normalize by the factor of grid spacing squared\n", " errorl2[ind_o, ind_spc] = np.linalg.norm(loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2) / np.sqrt(U_t.shape[0] - 1)\n", " timing[ind_o, ind_spc] = np.max([v for _, v in summary.timings.items()])\n", - " print(\"starting space order %s with (%s, %s) grid points the error is %s for %s seconds runtime\" %\n", - " (spc, nn, nn, errorl2[ind_o, ind_spc], timing[ind_o, ind_spc]))" + " print(f\"starting space order {spc} with ({nn}, {nn}) grid points the error is {errorl2[ind_o, ind_spc]} for {timing[ind_o, ind_spc]} seconds runtime\")" ] }, { @@ -631,13 +629,13 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('-^k', '-^b', '-^r', '-^g', '-^c')\n", "\n", "plt.figure(figsize=(20, 10))\n", "for i in range(0, 5):\n", - " plt.loglog(errorl2[i, :], timing[i, :], stylel[i], label=('order %s' % orders[i]), linewidth=4, markersize=10)\n", - " for x, y, a in zip(errorl2[i, :], timing[i, :], [('dx = %s m' % (sc)) for sc in dx]):\n", + " plt.loglog(errorl2[i, :], timing[i, :], stylel[i], label=(f'order {orders[i]}'), linewidth=4, markersize=10)\n", + " for x, y, a in zip(errorl2[i, :], timing[i, :], [(f'dx = {sc} m') for sc in dx]):\n", " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", " textcoords='offset points', size=20)\n", "plt.xlabel(\"$|| u_{num} - u_{ref}||_{inf}$\", fontsize=20)\n", @@ -669,7 +667,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('-^k', '-^b', '-^r', '-^g', '-^c')\n", "style2 = ('--k', '--b', '--r', '--g', '--c')\n", "\n", @@ -677,9 +675,9 @@ "for i in range(0, 5):\n", " theory = [k**(orders[i]) for k in dx]\n", " theory = [errorl2[i, 2]*th/theory[2] for th in theory]\n", - " plt.loglog([sc for sc in dx], errorl2[i, :], stylel[i], label=('Numerical order %s' % orders[i]),\n", + " plt.loglog([sc for sc in dx], errorl2[i, :], stylel[i], label=(f'Numerical order {orders[i]}'),\n", " linewidth=4, markersize=10)\n", - " plt.loglog([sc for sc in dx], theory, style2[i], label=('Theory order %s' % orders[i]),\n", + " plt.loglog([sc for sc in dx], theory, style2[i], label=(f'Theory order {orders[i]}'),\n", " linewidth=4, markersize=10)\n", "plt.xlabel(\"Grid spacing $dx$ (m)\", fontsize=20)\n", "plt.ylabel(\"$||u_{num} - u_{ref}||_{inf}$\", fontsize=20)\n", @@ -710,13 +708,13 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "for i in range(5):\n", " pf = np.polyfit(np.log([sc for sc in dx]), np.log(errorl2[i, :]), deg=1)[0]\n", - " if i==3:\n", + " if i == 3:\n", " pf = np.polyfit(np.log([sc for sc in dx][1:]), np.log(errorl2[i, 1:]), deg=1)[0]\n", - " print(\"Convergence rate for order %s is %s\" % (orders[i], pf))\n", - " if i<4:\n", + " print(f\"Convergence rate for order {orders[i]} is {pf}\")\n", + " if i < 4:\n", " assert np.isclose(pf, orders[i], atol=0, rtol=.2)" ] } diff --git a/examples/seismic/model.py b/examples/seismic/model.py index c927b88abb..e3eb312710 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -45,7 +45,7 @@ def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): if not fs or d is not damp.dimensions[-1]: dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbl) # left - dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d, + dim_l = SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=nbl) pos = Abs((nbl - (dim_l - d.symbolic_min) + 1) / float(nbl)) val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi)) @@ -53,7 +53,7 @@ def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): eqs += [Inc(damp.subs({d: dim_l}), val/d.spacing)] # right dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbr) - dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d, + dim_r = SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=nbr) pos = Abs((nbr - (d.symbolic_max - dim_r) + 1) / float(nbr)) val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi)) @@ -154,9 +154,9 @@ def _initialize_bcs(self, bcs="damp"): if init or re_init: if re_init and not init: bcs_o = "damp" if bcs == "mask" else "mask" - warning("Re-initializing damp profile from %s to %s" % (bcs_o, bcs)) - warning("Model has to be created with `bcs=\"%s\"`" - "for this WaveSolver" % bcs) + warning(f"Re-initializing damp profile from {bcs_o} to {bcs}") + warning(f"Model has to be created with `bcs=\"{bcs}\"`" + "for this WaveSolver") initialize_damp(self.damp, self.padsizes, self.spacing, abc_type=bcs, fs=self.fs) self._physical_parameters.update(['damp']) @@ -398,9 +398,8 @@ def update(self, name, value): elif value.shape == self.shape: initialize_function(param, value, self.nbl) else: - raise ValueError("Incorrect input size %s for model" % value.shape + - " %s without or %s with padding" % (self.shape, - param.shape)) + raise ValueError(f"Incorrect input size {value.shape} for model" + + f" {self.shape} without or {param.shape} with padding") else: param.data = value diff --git a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb index 01f9d32716..051e925b36 100644 --- a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb +++ b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb @@ -266,7 +266,7 @@ "diff = (f1g2+g1f2)/(f1g2-g1f2)\n", "\n", "tol = 100 * np.finfo(dtype).eps\n", - "print(\"f1g2, g1f2, diff, tol; %+.6e %+.6e %+.6e %+.6e\" % (f1g2, g1f2, diff, tol))\n", + "print(f\"f1g2, g1f2, diff, tol; {f1g2:+.6e} {g1f2:+.6e} {diff:+.6e} {tol:+.6e}\")\n", "\n", "# At last the unit test\n", "# Assert these dot products are float epsilon close in relative error\n", @@ -506,19 +506,19 @@ ], "source": [ "# Define dimensions for the interior of the model\n", - "nx,nz = 751,751\n", - "dx,dz = 10.0,10.0 # Grid spacing in m\n", + "nx, nz = 751, 751\n", + "dx, dz = 10.0, 10.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", - "nxpad,nzpad = nx+2*npad, nz+2*npad\n", - "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "nxpad, nzpad = nx+2*npad, nz+2*npad\n", + "shape_pad = np.array(shape) + 2 * npad\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -588,7 +588,7 @@ "\n", "# Constant density\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", - "b.data[:,:] = 1.0 / 1.0" + "b.data[:, :] = 1.0 / 1.0" ] }, { @@ -623,7 +623,7 @@ " \"\"\"\n", " coeff = 0.38 if len(v.grid.shape) == 3 else 0.42\n", " dt = 0.75 * v.dtype(coeff * np.min(v.grid.spacing) / (np.max(v.data)))\n", - " return v.dtype(\"%.5e\" % dt)" + " return v.dtype(f\"{dt:.5e}\")" ] }, { @@ -706,20 +706,18 @@ "# Source in the center of the model at 10 Hz center frequency\n", "fpeak = 0.010\n", "src = RickerSource(name='src', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src.coordinates.data[0,0] = dx * (nx//2)\n", - "src.coordinates.data[0,1] = dz * (nz//2)\n", + "src.coordinates.data[0, 0] = dx * (nx//2)\n", + "src.coordinates.data[0, 1] = dz * (nz//2)\n", "\n", "# line of receivers along the right edge of the model\n", "rec = Receiver(name='rec', grid=grid, npoint=nz, time_range=time_range)\n", - "rec.coordinates.data[:,0] = dx * (nx//2)\n", - "rec.coordinates.data[:,1] = np.linspace(0.0, dz*(nz-1), nz)\n", + "rec.coordinates.data[:, 0] = dx * (nx//2)\n", + "rec.coordinates.data[:, 1] = np.linspace(0.0, dz*(nz-1), nz)\n", "\n", - "print(\"src_coordinate X; %+12.4f\" % (src.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec.coordinates.data[:,0]), np.max(rec.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec.coordinates.data[:,1]), np.max(rec.coordinates.data[:,1])))\n", + "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", + "print(f\"rec_coordinates X min/max; {np.min(rec.coordinates.data[:, 0]):+12.4f} {np.max(rec.coordinates.data[:, 0]):+12.4f}\")\n", + "print(f\"rec_coordinates Z min/max; {np.min(rec.coordinates.data[:, 1]):+12.4f} {np.max(rec.coordinates.data[:, 1]):+12.4f}\")\n", "\n", "# We can plot the time signature to see the wavelet\n", "src.show()" @@ -762,7 +760,7 @@ "vmin, vmax = 1.4, 1.7\n", "dmin, dmax = 0.9, 1.1\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(m.data), cmap=cm.jet,\n", @@ -771,9 +769,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -786,9 +784,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -910,7 +908,7 @@ "q100 = np.log10(w / wOverQ_100.data)\n", "lmin, lmax = np.log10(qmin), np.log10(100)\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(q025.data), cmap=cm.jet,\n", @@ -922,9 +920,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -940,9 +938,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -974,7 +972,7 @@ "\n", "# Get the symbols for dimensions for t, x, z\n", "# We need these below in order to write the source injection and the\n", - "t,x,z = u.dimensions" + "t, x, z = u.dimensions" ] }, { @@ -1053,7 +1051,7 @@ "\n", "# Generate the time update equation and operator for Q=25 model\n", "eq_time_update = (t.spacing**2 * m**2 / b) * \\\n", - " ((b * u.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ_025) * u + \\\n", " (t.spacing * wOverQ_025 - 1) * u.backward\n", @@ -1063,7 +1061,7 @@ "# Update the dimension spacing_map to include the time dimension\n", "# These symbols will be replaced with the relevant scalars by the Operator\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "print(\"spacing_map; \", spacing_map)\n", "\n", "# op = Operator([stencil] + src_term + rec_term)\n", @@ -1397,10 +1395,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Run the operator for the Q=25 model\n", - "print(\"m min/max; %+12.6e %+12.6e\" % (np.min(m.data), np.max(m.data)))\n", - "print(\"b min/max; %+12.6e %+12.6e\" % (np.min(b.data), np.max(b.data)))\n", - "print(\"wOverQ_025 min/max; %+12.6e %+12.6e\" % (np.min(wOverQ_025.data), np.max(wOverQ_025.data)))\n", - "print(\"wOverQ_100 min/max; %+12.6e %+12.6e\" % (np.min(wOverQ_100.data), np.max(wOverQ_100.data)))\n", + "print(f\"m min/max; {np.min(m.data):+12.6e} {np.max(m.data):+12.6e}\")\n", + "print(f\"b min/max; {np.min(b.data):+12.6e} {np.max(b.data):+12.6e}\")\n", + "print(f\"wOverQ_025 min/max; {np.min(wOverQ_025.data):+12.6e} {np.max(wOverQ_025.data):+12.6e}\")\n", + "print(f\"wOverQ_100 min/max; {np.min(wOverQ_100.data):+12.6e} {np.max(wOverQ_100.data):+12.6e}\")\n", "print(time_range)\n", "u.data[:] = 0\n", "op(time=time_range.num-1)\n", @@ -1414,10 +1412,8 @@ "u.data[:] = 0\n", "op(time=time_range.num-1, wOverQ_025=wOverQ_100)\n", "\n", - "print(\"Q= 25 receiver data min/max; %+12.6e %+12.6e\" %\\\n", - " (np.min(recQ25.data[:]), np.max(recQ25.data[:])))\n", - "print(\"Q=100 receiver data min/max; %+12.6e %+12.6e\" %\\\n", - " (np.min(rec.data[:]), np.max(rec.data[:])))" + "print(f\"Q= 25 receiver data min/max; {np.min(recQ25.data[:]):+12.6e} {np.max(recQ25.data[:]):+12.6e}\")\n", + "print(f\"Q=100 receiver data min/max; {np.min(rec.data[:]):+12.6e} {np.max(rec.data[:]):+12.6e}\")" ] }, { @@ -1469,34 +1465,34 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two wavefields, normalized to Q=100 (the larger amplitude)\n", - "amax_Q25 = 1.0 * np.max(np.abs(uQ25.data[1,:,:]))\n", - "amax_Q100 = 1.0 * np.max(np.abs(u.data[1,:,:]))\n", - "print(\"amax Q= 25; %12.6f\" % (amax_Q25))\n", - "print(\"amax Q=100; %12.6f\" % (amax_Q100))\n", + "amax_Q25 = 1.0 * np.max(np.abs(uQ25.data[1, :, :]))\n", + "amax_Q100 = 1.0 * np.max(np.abs(u.data[1, :, :]))\n", + "print(f\"amax Q= 25; {amax_Q25:12.6f}\")\n", + "print(f\"amax Q=100; {amax_Q100:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(uQ25.data[1,:,:] / amax_Q100), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(uQ25.data[1, :, :] / amax_Q100), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'black', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", "plt.title(\"Data for $Q=25$ model\")\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(np.transpose(u.data[1,:,:] / amax_Q100), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(u.data[1, :, :] / amax_Q100), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'black', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -1541,15 +1537,15 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two receiver gathers, normalized to Q=100 (the larger amplitude)\n", - "amax_Q25 = 0.1 * np.max(np.abs(recQ25.data[:]))\n", + "amax_Q25 = 0.1 * np.max(np.abs(recQ25.data[:]))\n", "amax_Q100 = 0.1 * np.max(np.abs(rec.data[:]))\n", - "print(\"amax Q= 25; %12.6f\" % (amax_Q25))\n", - "print(\"amax Q=100; %12.6f\" % (amax_Q100))\n", + "print(f\"amax Q= 25; {amax_Q25:12.6f}\")\n", + "print(f\"amax Q=100; {amax_Q100:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(recQ25.data[:,:] / amax_Q100, cmap=\"seismic\",\n", + "plt.imshow(recQ25.data[:, :] / amax_Q100, cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -1557,7 +1553,7 @@ "plt.title(\"Receiver gather for $Q=25$ model\")\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(rec.data[:,:] / amax_Q100, cmap=\"seismic\",\n", + "plt.imshow(rec.data[:, :] / amax_Q100, cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.xlabel(\"X Coordinate (m)\")\n", diff --git a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb index 38e0b5b472..d290485757 100644 --- a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb +++ b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb @@ -315,19 +315,19 @@ ], "source": [ "# Define dimensions for the interior of the model\n", - "nx,nz = 301,301\n", - "dx,dz = 10.0,10.0 # Grid spacing in m\n", + "nx, nz = 301, 301\n", + "dx, dz = 10.0, 10.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", - "nxpad,nzpad = nx + 2 * npad, nz + 2 * npad\n", - "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "nxpad, nzpad = nx + 2 * npad, nz + 2 * npad\n", + "shape_pad = np.array(shape) + 2 * npad\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -405,7 +405,7 @@ "\n", "# Constant density\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", - "b.data[:,:] = 1.0 / 1.0\n", + "b.data[:, :] = 1.0 / 1.0\n", "\n", "# Initialize the attenuation profile for Q=100 model\n", "fpeak = 0.010\n", @@ -460,7 +460,7 @@ " \"\"\"\n", " coeff = 0.38 if len(v.grid.shape) == 3 else 0.42\n", " dt = 0.75 * v.dtype(coeff * np.min(v.grid.spacing) / (np.max(v.data)))\n", - " return v.dtype(\"%.5e\" % dt)" + " return v.dtype(f\"{dt:.5e}\")" ] }, { @@ -491,20 +491,18 @@ "\n", "# Source at 1/4 X, 1/2 Z, Ricker with 10 Hz center frequency\n", "src_nl = RickerSource(name='src_nl', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src_nl.coordinates.data[0,0] = dx * 1 * nx//4\n", - "src_nl.coordinates.data[0,1] = dz * shape[1]//2\n", + "src_nl.coordinates.data[0, 0] = dx * 1 * nx//4\n", + "src_nl.coordinates.data[0, 1] = dz * shape[1]//2\n", "\n", "# Receivers at 3/4 X, line in Z\n", "rec_nl = Receiver(name='rec_nl', grid=grid, npoint=nz, time_range=time_range)\n", - "rec_nl.coordinates.data[:,0] = dx * 3 * nx//4\n", - "rec_nl.coordinates.data[:,1] = np.linspace(0.0, dz*(nz-1), nz)\n", - "\n", - "print(\"src_coordinate X; %+12.4f\" % (src_nl.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src_nl.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec_nl.coordinates.data[:,0]), np.max(rec_nl.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec_nl.coordinates.data[:,1]), np.max(rec_nl.coordinates.data[:,1])))" + "rec_nl.coordinates.data[:, 0] = dx * 3 * nx//4\n", + "rec_nl.coordinates.data[:, 1] = np.linspace(0.0, dz*(nz-1), nz)\n", + "\n", + "print(f\"src_coordinate X; {src_nl.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src_nl.coordinates.data[0, 1]:+12.4f}\")\n", + "print(f\"rec_coordinates X min/max; {np.min(rec_nl.coordinates.data[:, 0]):+12.4f} {np.max(rec_nl.coordinates.data[:, 0]):+12.4f}\")\n", + "print(f\"rec_coordinates Z min/max; {np.min(rec_nl.coordinates.data[:, 1]):+12.4f} {np.max(rec_nl.coordinates.data[:, 1]):+12.4f}\")" ] }, { @@ -558,18 +556,18 @@ "x2 = dx * nx\n", "z1 = 0.0\n", "z2 = dz * nz\n", - "abcX = [x1,x1,x2,x2,x1]\n", - "abcZ = [z1,z2,z2,z1,z1]\n", + "abcX = [x1, x1, x2, x2, x1]\n", + "abcZ = [z1, z2, z2, z1, z1]\n", "\n", - "plt.figure(figsize=(12,12))\n", + "plt.figure(figsize=(12, 12))\n", "\n", "plt.subplot(2, 2, 1)\n", "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -580,9 +578,9 @@ "plt.imshow(np.transpose(1 / b.data), cmap=cm.jet,\n", " vmin=bmin, vmax=bmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Density (kg/m^3)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -593,9 +591,9 @@ "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -606,9 +604,9 @@ "plt.imshow(np.transpose(np.log10(q.data)), cmap=cm.jet,\n", " vmin=np.log10(qmin), vmax=np.log10(qmax), extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='log10 $Q_p$')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -643,7 +641,7 @@ "duAdj = TimeFunction(name=\"duAdj\", grid=grid, time_order=2, space_order=space_order, save=None)\n", "\n", "# Get the dimensions for t, x, z\n", - "t,x,z = u0.dimensions" + "t, x, z = u0.dimensions" ] }, { @@ -717,7 +715,7 @@ "# Update the dimension spacing_map to include the time dimension\n", "# Please refer to the first implementation notebook for more information\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "print(\"spacing_map; \", spacing_map)\n", "\n", "# Source injection and Receiver extraction\n", @@ -802,7 +800,7 @@ "\n", "# Receiver container and receiver extraction for the linearized operator\n", "rec_ln = Receiver(name='rec_ln', grid=grid, npoint=nz, time_range=time_range)\n", - "rec_ln.coordinates.data[:,:] = rec_nl.coordinates.data[:,:]\n", + "rec_ln.coordinates.data[:, :] = rec_nl.coordinates.data[:, :]\n", "rec_term_ln_fwd = rec_ln.interpolate(expr=duFwd.forward)\n", "\n", "# Instantiate and run the operator for the linearized forward\n", @@ -861,37 +859,37 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two wavefields, each normalized to own maximum\n", - "kt = nt - 2\n", + "kt = nt - 2\n", "\n", - "amax_nl = 1.0 * np.max(np.abs(u0.data[kt,:,:]))\n", - "amax_ln = 0.1 * np.max(np.abs(duFwd.data[kt,:,:]))\n", + "amax_nl = 1.0 * np.max(np.abs(u0.data[kt, :, :]))\n", + "amax_ln = 0.1 * np.max(np.abs(duFwd.data[kt, :, :]))\n", "\n", - "print(\"amax nl; %12.6f\" % (amax_nl))\n", - "print(\"amax ln t=%.2fs; %12.6f\" % (dt * kt / 1000, amax_ln))\n", + "print(f\"amax nl; {amax_nl:12.6f}\")\n", + "print(f\"amax ln t={dt * kt / 1000:.2f}s; {amax_ln:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,12))\n", + "plt.figure(figsize=(12, 12))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(u0.data[kt,:,:]), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(u0.data[kt, :, :]), cmap=\"seismic\",\n", " vmin=-amax_nl, vmax=+amax_nl, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", "plt.title(\"Nonlinear wavefield at t=%.2fs\" % (dt * kt / 1000))\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(np.transpose(duFwd.data[kt,:,:]), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(duFwd.data[kt, :, :]), cmap=\"seismic\",\n", " vmin=-amax_ln, vmax=+amax_ln, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -1061,21 +1059,21 @@ "amax1 = 0.5 * np.max(np.abs(dm.data[:]))\n", "amax2 = 0.5 * np.max(np.abs(dmAdj.data[:]))\n", "\n", - "print(\"amax dm; %12.6e\" % (amax1))\n", - "print(\"amax dmAdj %12.6e\" % (amax2))\n", + "print(f\"amax dm; {amax1:12.6e}\")\n", + "print(f\"amax dmAdj {amax2:12.6e}\")\n", "\n", "dm.data[:] = dm.data / amax1\n", "dmAdj.data[:] = dmAdj.data / amax2\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -1086,9 +1084,9 @@ "plt.imshow(np.transpose(dmAdj.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", diff --git a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb index 62c4fdde57..c626bf6926 100644 --- a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb +++ b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb @@ -260,7 +260,7 @@ "def analytic_response(fpeak, time_axis, src_coords, rec_coords, v):\n", " nt = time_axis.num\n", " dt = time_axis.step\n", - " v0 = v.data[0,0]\n", + " v0 = v.data[0, 0]\n", " sx, sz = src_coords[0, :]\n", " rx, rz = rec_coords[0, :]\n", " ntpad = 20 * (nt - 1) + 1\n", @@ -286,7 +286,7 @@ " for a in range(1, nf - 1):\n", " w = 2 * np.pi * faxis[a]\n", " r = np.sqrt((rx - sx)**2 + (rz - sz)**2)\n", - " U_a[a] = -1j * np.pi * hankel2(0.0, w * r / v0) * R[a]\n", + " U_a[a] = -1j * np.pi * hankel2(0.0, w * r / v0) * R[a]\n", "\n", " # Do inverse fft on 0:dt:T and you have analytical solution\n", " U_t = 1.0/(2.0 * np.pi) * np.real(np.fft.ifft(U_a[:], ntpad))\n", @@ -381,7 +381,7 @@ "arms = np.max(np.abs(recAna))\n", "drms = np.max(np.abs(diff))\n", "\n", - "print(\"\\nMaximum absolute numerical,analytic,diff; %+12.6e %+12.6e %+12.6e\" % (nrms, arms, drms))\n", + "print(f\"\\nMaximum absolute numerical,analytic,diff; {nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}\")\n", "\n", "# This isnt a very strict tolerance ...\n", "tol = 0.1\n", @@ -391,8 +391,8 @@ "amin, amax = np.min(recAna), np.max(recAna)\n", "\n", "print(\"\")\n", - "print(\"Numerical min/max; %+12.6e %+12.6e\" % (nmin, nmax))\n", - "print(\"Analytic min/max; %+12.6e %+12.6e\" % (amin, amax))" + "print(f\"Numerical min/max; {nmin:+12.6e} {nmax:+12.6e}\")\n", + "print(f\"Analytic min/max; {amin:+12.6e} {amax:+12.6e}\")" ] }, { @@ -429,9 +429,9 @@ "\n", "# Plot\n", "x1 = origin[0] - model.nbl * model.spacing[0]\n", - "x2 = model.domain_size[0] + model.nbl * model.spacing[0]\n", + "x2 = model.domain_size[0] + model.nbl * model.spacing[0]\n", "z1 = origin[1] - model.nbl * model.spacing[1]\n", - "z2 = model.domain_size[1] + model.nbl * model.spacing[1]\n", + "z2 = model.domain_size[1] + model.nbl * model.spacing[1]\n", "\n", "xABC1 = origin[0]\n", "xABC2 = model.domain_size[0]\n", @@ -442,12 +442,12 @@ "abc_pairsX = [xABC1, xABC1, xABC2, xABC2, xABC1]\n", "abc_pairsZ = [zABC1, zABC2, zABC2, zABC1, zABC1]\n", "\n", - "plt.figure(figsize=(12.5,12.5))\n", + "plt.figure(figsize=(12.5, 12.5))\n", "\n", "# Plot wavefield\n", - "plt.subplot(2,2,1)\n", + "plt.subplot(2, 2, 1)\n", "amax = 1.1 * np.max(np.abs(recNum.data[:]))\n", - "plt.imshow(uNum.data[1,:,:], vmin=-amax, vmax=+amax, cmap=\"seismic\",\n", + "plt.imshow(uNum.data[1, :, :], vmin=-amax, vmax=+amax, cmap=\"seismic\",\n", " aspect=\"auto\", extent=plt_extent)\n", "plt.plot(src_coords[0, 0], src_coords[0, 1], 'r*', markersize=15, label='Source')\n", "plt.plot(rec_coords[0, 0], rec_coords[0, 1], 'k^', markersize=11, label='Receiver')\n", @@ -460,23 +460,23 @@ "plt.tight_layout()\n", "\n", "# Plot trace\n", - "plt.subplot(2,2,3)\n", + "plt.subplot(2, 2, 3)\n", "plt.plot(time, recNum.data[:, 0], '-b', label='Numeric')\n", "plt.plot(time, recAna[:], '--r', label='Analytic')\n", "plt.xlabel('Time (ms)')\n", "plt.ylabel('Amplitude')\n", "plt.title('Trace comparison of solutions')\n", "plt.legend(loc=\"upper right\")\n", - "plt.xlim([50,90])\n", + "plt.xlim([50, 90])\n", "plt.ylim([-0.7 * amax, +amax])\n", "\n", - "plt.subplot(2,2,4)\n", - "plt.plot(time, 10 * (recNum.data[:, 0] - recAna[:]), '-k', label='Difference x10')\n", + "plt.subplot(2, 2, 4)\n", + "plt.plot(time, 10 * (recNum.data[:, 0] - recAna[:]), '-k', label='Difference x10')\n", "plt.xlabel('Time (ms)')\n", "plt.ylabel('Amplitude')\n", "plt.title('Difference of solutions (x10)')\n", "plt.legend(loc=\"upper right\")\n", - "plt.xlim([50,90])\n", + "plt.xlim([50, 90])\n", "plt.ylim([-0.7 * amax, +amax])\n", "\n", "plt.tight_layout()\n", @@ -792,7 +792,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot linearization tests\n", - "plt.figure(figsize=(14,8))\n", + "plt.figure(figsize=(14, 8))\n", "\n", "expected1 = np.empty(nstep)\n", "expected2 = np.empty(nstep)\n", @@ -1124,13 +1124,13 @@ "diff = (f1g2+g1f2)/(f1g2-g1f2)\n", "\n", "tol = 100 * np.finfo(dtype).eps\n", - "print(\"f1g2, g1f2, diff, tol; %+.6e %+.6e %+.6e %+.6e\" % (f1g2, g1f2, diff, tol))\n", + "print(f\"f1g2, g1f2, diff, tol; {f1g2:+.6e} {g1f2:+.6e} {diff:+.6e} {tol:+.6e}\")\n", "\n", "# At last the unit test\n", "# Assert these dot products are float epsilon close in relative error\n", "assert diff < 100 * np.finfo(np.float32).eps\n", "\n", - "del f1,f2,g1,g2" + "del f1, f2, g1, g2" ] }, { diff --git a/examples/seismic/self_adjoint/test_wavesolver_iso.py b/examples/seismic/self_adjoint/test_wavesolver_iso.py index 7d55122608..1271174bfd 100644 --- a/examples/seismic/self_adjoint/test_wavesolver_iso.py +++ b/examples/seismic/self_adjoint/test_wavesolver_iso.py @@ -459,8 +459,7 @@ def analytic_response(): arms = np.max(np.abs(uAna)) drms = np.max(np.abs(diff)) - info("Maximum absolute numerical,analytic,diff; %+12.6e %+12.6e %+12.6e" % - (nrms, arms, drms)) + info(f"Maximum absolute numerical,analytic,diff; {nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}") # This isnt a very strict tolerance ... tol = 0.1 diff --git a/examples/seismic/self_adjoint/utils.py b/examples/seismic/self_adjoint/utils.py index 6cdcf06c9d..4926564cd8 100644 --- a/examples/seismic/self_adjoint/utils.py +++ b/examples/seismic/self_adjoint/utils.py @@ -31,10 +31,10 @@ def setup_w_over_q(wOverQ, w, qmin, qmax, npad, sigma=0): sigma value for call to scipy gaussian smoother, default 5. """ # sanity checks - assert w > 0, "supplied w value [%f] must be positive" % (w) - assert qmin > 0, "supplied qmin value [%f] must be positive" % (qmin) - assert qmax > 0, "supplied qmax value [%f] must be positive" % (qmax) - assert npad > 0, "supplied npad value [%f] must be positive" % (npad) + assert w > 0, f"supplied w value [{w:f}] must be positive" + assert qmin > 0, f"supplied qmin value [{qmin:f}] must be positive" + assert qmax > 0, f"supplied qmax value [{qmax:f}] must be positive" + assert npad > 0, f"supplied npad value [{npad:f}] must be positive" for n in wOverQ.grid.shape: if n - 2*npad < 1: raise ValueError("2 * npad must not exceed dimension size!") @@ -47,12 +47,12 @@ def setup_w_over_q(wOverQ, w, qmin, qmax, npad, sigma=0): eqs = [Eq(wOverQ, 1)] for d in wOverQ.dimensions: # left - dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d, + dim_l = SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=npad) pos = Abs(dim_l - d.symbolic_min) / float(npad) eqs.append(Eq(wOverQ.subs({d: dim_l}), Min(wOverQ.subs({d: dim_l}), pos))) # right - dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d, + dim_r = SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=npad) pos = Abs(d.symbolic_max - dim_r) / float(npad) eqs.append(Eq(wOverQ.subs({d: dim_r}), Min(wOverQ.subs({d: dim_r}), pos))) diff --git a/examples/seismic/source.py b/examples/seismic/source.py index 12e420565d..743c91b327 100644 --- a/examples/seismic/source.py +++ b/examples/seismic/source.py @@ -74,8 +74,7 @@ def __init__(self, start=None, step=None, num=None, stop=None): self.num = int(num) def __str__(self): - return "TimeAxis: start=%g, stop=%g, step=%g, num=%g" % \ - (self.start, self.stop, self.step, self.num) + return f"TimeAxis: start={self.start:g}, stop={self.stop:g}, step={self.step:g}, num={self.num:g}" def _rebuild(self): return TimeAxis(start=self.start, stop=self.stop, num=self.num) diff --git a/examples/seismic/tti/operators.py b/examples/seismic/tti/operators.py index 3e79d974bd..7ce74f1663 100644 --- a/examples/seismic/tti/operators.py +++ b/examples/seismic/tti/operators.py @@ -151,10 +151,7 @@ def Gh_centered(model, field): ------- Sum of the 3D rotated second order derivative in the direction x and y. """ - if model.dim == 3: - Gzz = Gzz_centered(model, field) - else: - Gzz = Gzz_centered_2d(model, field) + Gzz = Gzz_centered(model, field) if model.dim == 3 else Gzz_centered_2d(model, field) b = getattr(model, 'b', None) if b is not None: _diff = lambda f, d: getattr(f, f'd{d.name}') diff --git a/examples/seismic/tti/tti_example.py b/examples/seismic/tti/tti_example.py index aebbac62c7..1452f8d671 100644 --- a/examples/seismic/tti/tti_example.py +++ b/examples/seismic/tti/tti_example.py @@ -94,15 +94,9 @@ def test_tti_stability(shape, kernel): args = parser.parse_args() if args.constant: - if args.azi: - preset = 'constant-tti-noazimuth' - else: - preset = 'constant-tti' + preset = 'constant-tti-noazimuth' if args.azi else 'constant-tti' else: - if args.azi: - preset = 'layers-tti-noazimuth' - else: - preset = 'layers-tti' + preset = 'layers-tti-noazimuth' if args.azi else 'layers-tti' # Preset parameters ndim = args.ndim diff --git a/examples/seismic/tti/wavesolver.py b/examples/seismic/tti/wavesolver.py index 3ca1c740d4..ae6c23eef5 100644 --- a/examples/seismic/tti/wavesolver.py +++ b/examples/seismic/tti/wavesolver.py @@ -39,12 +39,11 @@ def __init__(self, model, geometry, space_order=4, kernel='centered', raise ValueError("Free surface only supported for centered TTI kernel") if space_order % 2 != 0: - raise ValueError("space_order must be even but got %s" - % space_order) + raise ValueError(f"space_order must be even but got {space_order}") if space_order % 4 != 0: warning("It is recommended for space_order to be a multiple of 4" + - "but got %s" % space_order) + f"but got {space_order}") self.space_order = space_order diff --git a/examples/seismic/tutorials/01_modelling.ipynb b/examples/seismic/tutorials/01_modelling.ipynb index 5fe0e78a8c..14e4ab876c 100644 --- a/examples/seismic/tutorials/01_modelling.ipynb +++ b/examples/seismic/tutorials/01_modelling.ipynb @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Adding ignore due to (probably an np notebook magic) bug\n", "import numpy as np\n", "%matplotlib inline" @@ -117,7 +117,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "\n", "# Define a physical size\n", @@ -193,7 +193,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import RickerSource\n", "\n", "f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", @@ -234,7 +234,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Receiver\n", "\n", "# Create symbol for 101 receivers\n", @@ -414,7 +414,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)" @@ -456,7 +456,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=model.critical_dt)" ] }, @@ -484,7 +484,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_shotrecord\n", "\n", "plot_shotrecord(rec.data, model, t0, tn)" diff --git a/examples/seismic/tutorials/02_rtm.ipynb b/examples/seismic/tutorials/02_rtm.ipynb index 7ed3736463..11072cedbc 100644 --- a/examples/seismic/tutorials/02_rtm.ipynb +++ b/examples/seismic/tutorials/02_rtm.ipynb @@ -140,7 +140,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_velocity, plot_perturbation\n", "from devito import gaussian_smooth\n", "\n", @@ -184,7 +184,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "from examples.seismic import AcquisitionGeometry\n", "\n", @@ -229,7 +229,7 @@ "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", "solver = AcousticWaveSolver(model, geometry, space_order=4)\n", - "true_d , _, _ = solver.forward(vp=model.vp)" + "true_d, _, _ = solver.forward(vp=model.vp)" ] }, { @@ -279,7 +279,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot shot record for true and smooth velocity model and the difference\n", "from examples.seismic import plot_shotrecord\n", "\n", @@ -346,6 +346,7 @@ "from devito import TimeFunction, Operator, Eq, solve\n", "from examples.seismic import PointSource\n", "\n", + "\n", "def ImagingOperator(model, image):\n", " # Define the wavefield with the size of the model and the time dimension\n", " v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=4)\n", @@ -405,7 +406,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Prepare the varying source locations\n", "source_locations = np.empty((nshots, 2), dtype=np.float32)\n", @@ -534,7 +535,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_image\n", "\n", "# Plot the inverted image\n", diff --git a/examples/seismic/tutorials/03_fwi.ipynb b/examples/seismic/tutorials/03_fwi.ipynb index c309542c12..cf66ab32b8 100644 --- a/examples/seismic/tutorials/03_fwi.ipynb +++ b/examples/seismic/tutorials/03_fwi.ipynb @@ -104,7 +104,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import demo_model, plot_velocity, plot_perturbation\n", "\n", "# Define true and initial model\n", @@ -117,7 +117,7 @@ "\n", "model0 = demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,\n", " origin=origin, shape=shape, spacing=spacing, nbl=40,\n", - " grid = model.grid)\n", + " grid=model.grid)\n", "\n", "plot_velocity(model)\n", "plot_velocity(model0)\n", @@ -160,7 +160,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "from examples.seismic import AcquisitionGeometry\n", "\n", @@ -204,7 +204,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot acquisition geometry\n", "plot_velocity(model, source=geometry.src_positions,\n", " receiver=geometry.rec_positions[::4, :])" @@ -279,7 +279,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_shotrecord\n", "\n", "# Plot shot record for true and smooth velocity model and the difference\n", @@ -357,7 +357,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Prepare the varying source locations sources\n", "source_locations = np.empty((nshots, 2), dtype=np.float32)\n", @@ -375,6 +375,7 @@ "source": [ "from devito import Eq, Operator\n", "\n", + "\n", "# Computes the residual between observed and synthetic data into the residual\n", "def compute_residual(residual, dobs, dsyn):\n", " if residual.grid.distributor.is_parallel:\n", @@ -383,7 +384,7 @@ " # same position\n", " assert np.allclose(dobs.coordinates.data[:], dsyn.coordinates.data)\n", " assert np.allclose(residual.coordinates.data[:], dsyn.coordinates.data)\n", - " # Create a difference operator\n", + " # Create a difference operator\n", " diff_eq = Eq(residual, dsyn.subs({dsyn.dimensions[-1]: residual.dimensions[-1]}) -\n", " dobs.subs({dobs.dimensions[-1]: residual.dimensions[-1]}))\n", " Operator(diff_eq)()\n", @@ -404,6 +405,7 @@ "from devito import Function, norm\n", "from examples.seismic import Receiver\n", "\n", + "\n", "def fwi_gradient(vp_in):\n", " # Create symbols to hold the gradient\n", " grad = Function(name=\"grad\", grid=model.grid)\n", @@ -492,7 +494,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import mmax\n", "from examples.seismic import plot_image\n", "\n", @@ -522,6 +524,8 @@ "outputs": [], "source": [ "from devito import Min, Max\n", + "\n", + "\n", "# Define bounding box constraints on the solution.\n", "def update_with_box(vp, alpha, dm, vmin=2.0, vmax=3.5):\n", " \"\"\"\n", @@ -579,7 +583,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "from devito import mmax\n", "\n", @@ -600,7 +604,7 @@ " alpha = .05 / mmax(direction)\n", "\n", " # Update the model estimate and enforce minimum/maximum values\n", - " update_with_box(model0.vp , alpha , direction)\n", + " update_with_box(model0.vp, alpha, direction)\n", "\n", " # Log the progress made\n", " print('Objective value is %f at iteration %d' % (phi, i+1))" @@ -623,7 +627,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot inverted velocity model\n", "plot_velocity(model0)" @@ -646,7 +650,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "import matplotlib.pyplot as plt\n", "\n", "# Plot objective function decrease\n", diff --git a/examples/seismic/tutorials/04_dask.ipynb b/examples/seismic/tutorials/04_dask.ipynb index 5677323a22..4ca73c02ed 100644 --- a/examples/seismic/tutorials/04_dask.ipynb +++ b/examples/seismic/tutorials/04_dask.ipynb @@ -105,7 +105,7 @@ "\n", "# Initial model\n", "model0 = demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,\n", - " origin=origin, shape=shape, spacing=spacing, nbl=nbl, grid = model1.grid)" + " origin=origin, shape=shape, spacing=spacing, nbl=nbl, grid=model1.grid)" ] }, { @@ -176,6 +176,7 @@ "source": [ "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", + "\n", "# Serial modeling function\n", "def forward_modeling_single_shot(model, geometry, save=False, dt=4.0):\n", " solver = AcousticWaveSolver(model, geometry, space_order=4)\n", @@ -203,7 +204,7 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i, :],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", "\n", " # Call serial modeling function for each index\n", @@ -332,6 +333,7 @@ "from devito import Function\n", "from examples.seismic import Receiver\n", "\n", + "\n", "# Serial FWI objective function\n", "def fwi_objective_single_shot(model, geometry, d_obs):\n", "\n", @@ -376,7 +378,7 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i, :],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", "\n", " # Call serial FWI objective function for each shot location\n", @@ -546,6 +548,8 @@ "source": [ "# Callback to track model error\n", "model_error = []\n", + "\n", + "\n", "def fwi_callback(xk):\n", " vp = model1.vp.data[model1.nbl:-model1.nbl, model1.nbl:-model1.nbl]\n", " m = 1.0 / (vp.reshape(-1).astype(np.float64))**2\n", @@ -889,7 +893,7 @@ "ftol = 0.1\n", "maxiter = 5\n", "result = optimize.minimize(loss, m0, args=(model0, geometry0, d_obs), method='L-BFGS-B', jac=True,\n", - " callback=fwi_callback, bounds=bounds, options={'ftol':ftol, 'maxiter':maxiter, 'disp':True})" + " callback=fwi_callback, bounds=bounds, options={'ftol': ftol, 'maxiter': maxiter, 'disp': True})" ] }, { diff --git a/examples/seismic/tutorials/04_dask_pickling.ipynb b/examples/seismic/tutorials/04_dask_pickling.ipynb index 1fe51dd70e..bfdd4e136a 100644 --- a/examples/seismic/tutorials/04_dask_pickling.ipynb +++ b/examples/seismic/tutorials/04_dask_pickling.ipynb @@ -78,7 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Set up inversion parameters.\n", "param = {'t0': 0.,\n", @@ -115,6 +115,7 @@ " origin=param['origin'], shape=param['shape'],\n", " spacing=param['spacing'], nbl=param['nbl'])\n", "\n", + "\n", "def get_initial_model():\n", " '''The initial guess for the subsurface model.\n", " '''\n", @@ -125,6 +126,7 @@ " spacing=param['spacing'], nbl=param['nbl'],\n", " grid=grid)\n", "\n", + "\n", "def wrap_model(x, astype=None):\n", " '''Wrap a flat array as a subsurface model.\n", " '''\n", @@ -137,6 +139,7 @@ " model.update('vp', v_curr.reshape(model.shape))\n", " return model\n", "\n", + "\n", "def load_model(filename):\n", " \"\"\" Returns the current model. This is used by the\n", " worker to get the current model.\n", @@ -145,22 +148,26 @@ "\n", " return pkl['model']\n", "\n", + "\n", "def dump_model(filename, model):\n", " ''' Dump model to disk.\n", " '''\n", - " pickle.dump({'model':model}, open(filename, \"wb\"))\n", + " pickle.dump({'model': model}, open(filename, \"wb\"))\n", + "\n", "\n", "def load_shot_data(shot_id, dt):\n", " ''' Load shot data from disk, resampling to the model time step.\n", " '''\n", - " pkl = pickle.load(open(\"shot_%d.p\"%shot_id, \"rb\"))\n", + " pkl = pickle.load(open(\"shot_%d.p\" % shot_id, \"rb\"))\n", "\n", " return pkl['geometry'], pkl['rec'].resample(dt)\n", "\n", + "\n", "def dump_shot_data(shot_id, rec, geometry):\n", " ''' Dump shot data to disk.\n", " '''\n", - " pickle.dump({'rec':rec, 'geometry': geometry}, open('shot_%d.p'%shot_id, \"wb\"))\n", + " pickle.dump({'rec': rec, 'geometry': geometry}, open('shot_%d.p' % shot_id, \"wb\"))\n", + "\n", "\n", "def generate_shotdata_i(param):\n", " \"\"\" Inversion crime alert! Here the worker is creating the\n", @@ -174,12 +181,13 @@ " solver = cp['solver']\n", "\n", " # source position changes according to the index\n", - " shot_id=param['shot_id']\n", + " shot_id = param['shot_id']\n", "\n", - " solver.geometry.src_positions[0,:]=[20, shot_id*1000./(param['nshots']-1)]\n", + " solver.geometry.src_positions[0, :] = [20, shot_id*1000./(param['nshots']-1)]\n", " true_d = solver.forward()[0]\n", " dump_shot_data(shot_id, true_d.resample(4.0), solver.geometry.src_positions)\n", "\n", + "\n", "def generate_shotdata(solver):\n", " # Pick devito objects (save on disk)\n", " cp = {'solver': solver}\n", @@ -188,7 +196,7 @@ "\n", " work = [dict(param) for i in range(param['nshots'])]\n", " # synthetic data is generated here twice: serial(loop below) and parallel (via dask map functionality)\n", - " for i in range(param['nshots']):\n", + " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", " generate_shotdata_i(work[i])\n", "\n", @@ -278,7 +286,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Client setup\n", "cluster = LocalCluster(n_workers=2, death_timeout=600)\n", @@ -293,7 +301,7 @@ "# Set up receiver data and geometry.\n", "rec_coordinates = np.empty((nreceivers, len(param['shape'])))\n", "rec_coordinates[:, 1] = np.linspace(param['spacing'][0], true_model.domain_size[0] - param['spacing'][0], num=nreceivers)\n", - "rec_coordinates[:, 0] = 980. # 20m from the right end\n", + "rec_coordinates[:, 0] = 980. # 20m from the right end\n", "# Geometry\n", "geometry = AcquisitionGeometry(true_model, rec_coordinates, src_coordinates,\n", " param['t0'], param['tn'], src_type='Ricker',\n", @@ -360,9 +368,10 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Function\n", "\n", + "\n", "# Create FWI gradient kernel for a single shot\n", "def fwi_gradient_i(param):\n", "\n", @@ -386,7 +395,7 @@ " solver = cp['solver']\n", "\n", " # Set attributes to solver\n", - " solver.geometry.src_positions=src_positions\n", + " solver.geometry.src_positions = src_positions\n", " solver.geometry.resample(dt)\n", "\n", " # Compute simulated data and full forward wavefield u0\n", @@ -397,7 +406,7 @@ " time_range=solver.geometry.time_axis,\n", " coordinates=solver.geometry.rec_positions)\n", "\n", - " #residual.data[:] = d.data[:residual.shape[0], :] - rec.data[:residual.shape[0], :]\n", + " # residual.data[:] = d.data[:residual.shape[0], :] - rec.data[:residual.shape[0], :]\n", " residual.data[:] = d.data[:] - rec.data[0:d.data.shape[0], :]\n", " f = .5*np.linalg.norm(residual.data.flatten())**2\n", "\n", @@ -437,7 +446,7 @@ "\n", " # Define work list\n", " work = [dict(param) for i in range(param['nshots'])]\n", - " for i in range(param['nshots']):\n", + " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", "\n", " # Distribute worklist to workers.\n", @@ -469,16 +478,20 @@ "# function that can operate on the solution after every iteration. Here\n", "# we use this to monitor the true relative solution error.\n", "relative_error = []\n", + "\n", + "\n", "def fwi_callbacks(x):\n", " # Calculate true relative error\n", " true_vp = get_true_model().vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", " true_m = 1.0 / (true_vp.reshape(-1).astype(np.float64))**2\n", " relative_error.append(np.linalg.norm((x-true_m)/true_m))\n", "\n", + "\n", "# FWI with L-BFGS\n", "ftol = 0.1\n", "maxiter = 5\n", "\n", + "\n", "def fwi(model, param, ftol=ftol, maxiter=maxiter):\n", " # Initial guess\n", " v0 = model.vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", @@ -492,9 +505,9 @@ " result = optimize.minimize(fwi_gradient,\n", " m0, args=(param, ), method='L-BFGS-B', jac=True,\n", " bounds=bounds, callback=fwi_callbacks,\n", - " options={'ftol':ftol,\n", - " 'maxiter':maxiter,\n", - " 'disp':True})\n", + " options={'ftol': ftol,\n", + " 'maxiter': maxiter,\n", + " 'disp': True})\n", "\n", " return result" ] @@ -832,7 +845,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "model0 = get_initial_model()\n", "\n", @@ -870,11 +883,11 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Plot FWI result\n", "\n", - "slices = tuple(slice(param['nbl'],-param['nbl']) for _ in range(2))\n", + "slices = tuple(slice(param['nbl'], -param['nbl']) for _ in range(2))\n", "vp = 1.0/np.sqrt(result['x'].reshape(true_model.shape))\n", "plot_image(true_model.vp.data[slices], vmin=2.4, vmax=2.8, cmap=\"cividis\")\n", "plot_image(vp, vmin=2.4, vmax=2.8, cmap=\"cividis\")" @@ -897,7 +910,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "import matplotlib.pyplot as plt\n", "\n", "# Plot model error\n", diff --git a/examples/seismic/tutorials/05_staggered_acoustic.ipynb b/examples/seismic/tutorials/05_staggered_acoustic.ipynb index 03f6c62b45..e342d2aacc 100644 --- a/examples/seismic/tutorials/05_staggered_acoustic.ipynb +++ b/examples/seismic/tutorials/05_staggered_acoustic.ipynb @@ -68,7 +68,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -191,7 +191,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Propagate the source\n", "op_2(time=src.time_range.num-1, dt=dt)" @@ -234,7 +234,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0])\n", @@ -293,7 +293,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op_4 = Operator([u_v_4, u_p_4] + src_p)\n", "# Propagate the source\n", "op_4(time=src.time_range.num-1, dt=dt)" @@ -336,7 +336,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v4[0].data[-1])\n", diff --git a/examples/seismic/tutorials/06_elastic.ipynb b/examples/seismic/tutorials/06_elastic.ipynb index 4ee235fc8f..8c6a80acdd 100644 --- a/examples/seismic/tutorials/06_elastic.ipynb +++ b/examples/seismic/tutorials/06_elastic.ipynb @@ -62,6 +62,7 @@ " a = 0.004\n", " return -2.*a*(t - 1/f0) * np.exp(-a * (t - 1/f0)**2)\n", "\n", + "\n", "# Timestep size from Eq. 7 with V_p=6000. and dx=100\n", "t0, tn = 0., 300.\n", "dt = (10. / np.sqrt(2.)) / 6.\n", @@ -88,7 +89,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -122,8 +123,8 @@ "density = 1.8\n", "\n", "# The source injection term\n", - "src_xx = src.inject(field=tau.forward[0,0], expr=src)\n", - "src_zz = src.inject(field=tau.forward[1,1], expr=src)\n", + "src_xx = src.inject(field=tau.forward[0, 0], expr=src)\n", + "src_zz = src.inject(field=tau.forward[1, 1], expr=src)\n", "\n", "# Thorbecke's parameter notation\n", "cp2 = V_p*V_p\n", @@ -140,7 +141,7 @@ "u_v = Eq(v.forward, solve(pde_v, v.forward))\n", "u_t = Eq(tau.forward, solve(pde_tau, tau.forward))\n", "\n", - "op = Operator([u_v] + [u_t] + src_xx + src_zz)" + "op = Operator([u_v] + [u_t] + src_xx + src_zz)" ] }, { @@ -193,7 +194,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "op(dt=dt)" ] @@ -255,14 +256,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0], vmin=-.5*1e-1, vmax=.5*1e-1, cmap=\"seismic\")\n", "plot_image(v[1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(tau[0, 0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[1,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[0,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" + "plot_image(tau[1, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", + "plot_image(tau[0, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" ] }, { @@ -271,7 +272,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.6285093, atol=1e-4, rtol=0)" ] @@ -287,8 +288,8 @@ "v = VectorTimeFunction(name='v', grid=grid, space_order=so, time_order=1)\n", "tau = TensorTimeFunction(name='t', grid=grid, space_order=so, time_order=1)\n", "# The source injection term\n", - "src_xx = src.inject(field=tau.forward[0,0], expr=src)\n", - "src_zz = src.inject(field=tau.forward[1,1], expr=src)\n", + "src_xx = src.inject(field=tau.forward[0, 0], expr=src)\n", + "src_zz = src.inject(field=tau.forward[1, 1], expr=src)\n", "\n", "# First order elastic wave equation\n", "pde_v = v.dt - ro * div(tau)\n", @@ -297,7 +298,7 @@ "u_v = Eq(v.forward, solve(pde_v, v.forward))\n", "u_t = Eq(tau.forward, solve(pde_tau, tau.forward))\n", "\n", - "op = Operator([u_v]+ [u_t] + src_xx + src_zz )" + "op = Operator([u_v]+ [u_t] + src_xx + src_zz)" ] }, { @@ -327,12 +328,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "v[0].data.fill(0.)\n", "v[1].data.fill(0.)\n", - "tau[0,0].data.fill(0.)\n", - "tau[0,1].data.fill(0.)\n", - "tau[1,1].data.fill(0.)\n", + "tau[0, 0].data.fill(0.)\n", + "tau[0, 1].data.fill(0.)\n", + "tau[1, 1].data.fill(0.)\n", "\n", "op(dt=dt)" ] @@ -394,14 +395,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(v[1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(tau[0, 0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[1,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[0,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" + "plot_image(tau[1, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", + "plot_image(tau[0, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" ] }, { @@ -410,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.62521476, atol=1e-4, rtol=0)" ] diff --git a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb index 1663635e8a..e53394a3dc 100644 --- a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb +++ b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb @@ -53,7 +53,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Initial grid: 3km x 3km, with spacing 10m\n", "nlayers = 5\n", "so = 8\n", @@ -78,7 +78,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "aspect_ratio = model.shape[0]/model.shape[1]\n", "\n", "plt_options_model = {'cmap': 'jet', 'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", @@ -146,7 +146,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -257,7 +257,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "from examples.seismic import plot_velocity\n", "plot_velocity(model, source=src.coordinates.data,\n", " receiver=rec.coordinates.data[::10, :])\n", @@ -280,7 +280,7 @@ "pde_tau = tau.dt - l * diag(div(v.forward)) - mu * (grad(v.forward) + grad(v.forward).transpose(inner=False))\n", "# Time update\n", "u_v = Eq(v.forward, model.damp * solve(pde_v, v.forward))\n", - "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", + "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", "\n", "op = Operator([u_v] + [u_t] + src_xx + src_zz + rec_term)" ] @@ -371,7 +371,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Partial ru for 1.2sec to plot the wavefield\n", "op(dt=model.critical_dt, time_M=int(1000/model.critical_dt))" ] @@ -393,10 +393,10 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "scale = .5*1e-3\n", "\n", - "plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + "plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", "\n", @@ -416,7 +416,7 @@ "ax[0, 1].set_xlabel('X (m)', fontsize=20)\n", "ax[0, 1].set_ylabel('Depth (m)', fontsize=20)\n", "\n", - "ax[1, 0].imshow(np.transpose(tau[0,0].data[0][slices]+tau[1,1].data[0][slices]),\n", + "ax[1, 0].imshow(np.transpose(tau[0, 0].data[0][slices]+tau[1, 1].data[0][slices]),\n", " vmin=-10*scale, vmax=10*scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 0].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\",\n", " alpha=.5, **plt_options_model)\n", @@ -426,7 +426,7 @@ "ax[1, 0].set_ylabel('Depth (m)', fontsize=20)\n", "\n", "\n", - "ax[1, 1].imshow(np.transpose(tau[0,1].data[0][slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", + "ax[1, 1].imshow(np.transpose(tau[0, 1].data[0][slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 1].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\", alpha=.5, **plt_options_model)\n", "ax[1, 1].set_aspect('auto')\n", "ax[1, 1].set_title('τ_xy', fontsize=20)\n", @@ -490,7 +490,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Full run for the data\n", "op(dt=model.critical_dt, time_m=int(1000/model.critical_dt))" ] @@ -543,7 +543,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# Pressure (txx + tzz) data at sea surface\n", "extent = [rec_plot.coordinates.data[0, 0], rec_plot.coordinates.data[-1, 0], 1e-3*tn, t0]\n", "aspect = rec_plot.coordinates.data[-1, 0]/(1e-3*tn)/.5\n", @@ -582,7 +582,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", @@ -676,7 +676,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Partial ru for 1.2sec to plot the wavefield\n", "op(dt=model.critical_dt, time_M=int(1000/model.critical_dt))" ] @@ -698,10 +698,10 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "scale = 1e-4\n", "\n", - "plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + "plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", "\n", @@ -721,7 +721,7 @@ "ax[0, 1].set_xlabel('X (m)', fontsize=20)\n", "ax[0, 1].set_ylabel('Depth (m)', fontsize=20)\n", "\n", - "ax[1, 0].imshow(np.transpose(tau0[0,0].data[slices]+tau0[1,1].data[slices]),\n", + "ax[1, 0].imshow(np.transpose(tau0[0, 0].data[slices]+tau0[1, 1].data[slices]),\n", " vmin=-10*scale, vmax=10*scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 0].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\",\n", " alpha=.5, **plt_options_model)\n", @@ -731,7 +731,7 @@ "ax[1, 0].set_ylabel('Depth (m)', fontsize=20)\n", "\n", "\n", - "ax[1, 1].imshow(np.transpose(tau0[0,1].data[slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", + "ax[1, 1].imshow(np.transpose(tau0[0, 1].data[slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 1].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\", alpha=.5, **plt_options_model)\n", "ax[1, 1].set_aspect('auto')\n", "ax[1, 1].set_title('τ_xy', fontsize=20)\n", @@ -773,7 +773,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt, time_m=int(1000/model.critical_dt))" ] }, @@ -814,7 +814,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", @@ -952,7 +952,7 @@ "pde_tau = tau_rsfd.dt - l * diag(div45(v_rsfd.forward)) - mu * (grad45(v_rsfd.forward) + grad45(v_rsfd.forward).transpose(inner=False))\n", "# Time update\n", "u_v = Eq(v_rsfd.forward, model.damp * solve(pde_v, v_rsfd.forward))\n", - "u_t = Eq(tau_rsfd.forward, model.damp * solve(pde_tau, tau_rsfd.forward))\n", + "u_t = Eq(tau_rsfd.forward, model.damp * solve(pde_tau, tau_rsfd.forward))\n", "\n", "# Receiver\n", "rec_term = rec.interpolate(expr=tau_rsfd[0, 0] + tau_rsfd[1, 1])\n", @@ -995,7 +995,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt)" ] }, @@ -1037,7 +1037,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# Pressure (txx + tzz) data at sea surface\n", "extent = [rec_plot.coordinates.data[0, 0], rec_plot.coordinates.data[-1, 0], 1e-3*tn, t0]\n", "aspect = rec_plot.coordinates.data[-1, 0]/(1e-3*tn)/.5\n", @@ -1076,7 +1076,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index 70b5e5c51b..b914c43c33 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -196,13 +196,14 @@ "fmax = 100\n", "\n", "# Spatial parameters\n", - "extent = 1000 # 1km\n", + "extent = 1000 # 1km\n", "npoints = 140\n", "h = extent/npoints\n", "\n", "# Time parameters\n", "dt = 0.0008\n", "\n", + "\n", "def critical_dt(weights, h=1000/140, vmax=5500):\n", " return float(h*np.sqrt(2/np.sum([np.abs(a) for a in weights]))/vmax)" ] @@ -387,7 +388,7 @@ " else:\n", " m = len(weights)\n", " cosines = np.array(\n", - " np.cos(np.arange(1, m)*k*h*np.cos(alpha)) + \\\n", + " np.cos(np.arange(1, m)*k*h*np.cos(alpha)) +\n", " np.cos(np.arange(1, m)*k*h*np.sin(alpha)) - 2\n", " )\n", " total = np.sum(np.array(weights)[1:]*cosines)\n", @@ -557,9 +558,10 @@ "\n", " return ax, (alpha_slider, beta_slider)\n", "\n", + "\n", "fig, ax = plt.subplots(1, 2)\n", "widget_handle1 = plot_dispersion(fornberg, h, dt, velocity=vrange, ax=ax)\n", - "fig.set_size_inches(12,6)\n", + "fig.set_size_inches(12, 6)\n", "plt.show()" ] }, @@ -731,7 +733,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=fornberg, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=fornberg, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=fornberg, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=fornberg, h=h, dt=3*dt, v=1500)" ] @@ -770,9 +772,10 @@ " 'g--', lw=2\n", " )\n", "\n", + "\n", "def plot_shot(data, ax, clip=0.1, extents=(0, 1000, 0, 0.6), vline=None, r=None, first_arrival=True):\n", " ax.imshow(\n", - " data[::-1,:],\n", + " data[::-1, :],\n", " extent=extents,\n", " vmin=-clip,\n", " vmax=clip,\n", @@ -788,7 +791,7 @@ " time = np.linspace(extents[2], extents[3], data.shape[0])\n", " space = np.linspace(extents[0], extents[1], data.shape[1])\n", " if not isinstance(first_arrival, np.ndarray):\n", - " arrival = time[np.argmax(np.abs(data)>0.01, axis=0)]\n", + " arrival = time[np.argmax(np.abs(data) > 0.01, axis=0)]\n", " ax.plot(space, arrival, c='red', lw=1)\n", " ax.annotate('first arrival',\n", " xy=((extents[1] - extents[0])/2, arrival[arrival.size//2]), xycoords='data',\n", @@ -808,6 +811,7 @@ " )\n", " return arrival\n", "\n", + "\n", "def plot_profile(array, ax, clip=1, extent=(0, 1), axis_labels=('x', 'A'), first_arrival=None):\n", " ax.plot(np.linspace(extent[0], extent[1], array.size), array)\n", " ax.set_xlim(extent)\n", @@ -870,13 +874,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -932,12 +936,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", @@ -1050,6 +1054,7 @@ " y = x**2 + a[0] + 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(a[1:], m)], axis=0)\n", " return sp.integrate.trapezoid(y**2, x=x)\n", "\n", + "\n", "print(f'Value of objective function at initial guess: {objective(initial_guess)}')\n", "opt1 = sp.optimize.minimize(objective, initial_guess, method='SLSQP', constraints=constraints, options=dict(ftol=1e-15, maxiter=500))\n", "print(opt1)" @@ -1203,7 +1208,7 @@ "source": [ "fig, ax = plt.subplots(1, 2)\n", "widget_handle2 = plot_dispersion(drp_stencil1, h, dt, velocity=vrange, ax=ax)\n", - "fig.set_size_inches(12,6)\n", + "fig.set_size_inches(12, 6)\n", "plt.show()" ] }, @@ -1246,7 +1251,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=drp_stencil1, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=drp_stencil1, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=drp_stencil1, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=drp_stencil1, h=h, dt=3*dt, v=1500)" ] @@ -1289,13 +1294,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -1347,12 +1352,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", @@ -1843,7 +1848,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=drp_stencil2, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=drp_stencil2, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=drp_stencil2, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=drp_stencil2, h=h, dt=2*dt, v=1500)" ] @@ -1886,13 +1891,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -1944,12 +1949,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", diff --git a/examples/seismic/tutorials/07_DRP_schemes.ipynb b/examples/seismic/tutorials/07_DRP_schemes.ipynb index e6bcf5fe10..80f7c79b78 100644 --- a/examples/seismic/tutorials/07_DRP_schemes.ipynb +++ b/examples/seismic/tutorials/07_DRP_schemes.ipynb @@ -28,7 +28,7 @@ "Ny = Nx\n", "dx = Lx/(Nx-1)\n", "dy = dx\n", - "grid = Grid(shape=(Nx,Ny), extent=(Lx,Ly))\n", + "grid = Grid(shape=(Nx, Ny), extent=(Lx, Ly))\n", "\n", "# Define u(x,y,t) on this grid\n", "u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)\n", @@ -143,7 +143,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "%matplotlib inline\n", "\n", @@ -207,7 +207,7 @@ "\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import RickerSource\n", "\n", "f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)\n", @@ -304,7 +304,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=dt)" ] }, @@ -333,7 +333,7 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "#import matplotlib\n", + "# import matplotlib\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", "\n", @@ -351,7 +351,7 @@ "\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(u.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", "fig.colorbar(cont)\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$z$')\n", @@ -378,9 +378,11 @@ "source": [ "from devito import SubDomain\n", "\n", + "\n", "# Define our 'upper' and 'lower' SubDomains:\n", "class Upper(SubDomain):\n", " name = 'upper'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " # We want our upper layer to span the entire x-dimension and all\n", @@ -388,8 +390,10 @@ " # the following notation:\n", " return {x: x, z: ('left', 80+nbl)}\n", "\n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " # We want our lower layer to span the entire x-dimension and all\n", @@ -422,7 +426,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create our model passing it our 'upper' and 'lower' subdomains:\n", "model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", @@ -482,15 +486,15 @@ "# Define our custom FD coefficients:\n", "x, z = model.grid.dimensions\n", "# Upper layer\n", - "weights_u = np.array([ 2.00462e-03, -1.63274e-02, 7.72781e-02,\n", - " -3.15476e-01, 1.77768e+00, -3.05033e+00,\n", - " 1.77768e+00, -3.15476e-01, 7.72781e-02,\n", - " -1.63274e-02, 2.00462e-03])\n", + "weights_u = np.array([2.00462e-03, -1.63274e-02, 7.72781e-02,\n", + " -3.15476e-01, 1.77768e+00, -3.05033e+00,\n", + " 1.77768e+00, -3.15476e-01, 7.72781e-02,\n", + " -1.63274e-02, 2.00462e-03])\n", "# Lower layer\n", - "weights_l = np.array([ 0. , 0. , 0.0274017,\n", - " -0.223818, 1.64875 , -2.90467,\n", - " 1.64875 , -0.223818, 0.0274017,\n", - " 0. , 0. ])\n", + "weights_l = np.array([0., 0., 0.0274017,\n", + " -0.223818, 1.64875, -2.90467,\n", + " 1.64875, -0.223818, 0.0274017,\n", + " 0., 0.])\n", "# Create the Devito Coefficient objects:\n", "ux_u_coeffs = weights_u/x.spacing**2\n", "uz_u_coeffs = weights_u/z.spacing**2\n", @@ -547,7 +551,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=dt)" ] }, @@ -578,7 +582,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u_DRP.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(u_DRP.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", "fig.colorbar(cont)\n", "ax1.axis([0, Lx, 0, Lz])\n", "ax1.set_xlabel('$x$')\n", @@ -616,7 +620,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u_DRP.data[0,:,:].T-u.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(u_DRP.data[0, :, :].T-u.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", "fig.colorbar(cont)\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$z$')\n", @@ -631,7 +635,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Wavefield norm checks\n", "assert np.isclose(np.linalg.norm(u.data[-1]), 82.170, atol=0, rtol=1e-4)\n", diff --git a/examples/seismic/tutorials/08_snapshotting.ipynb b/examples/seismic/tutorials/08_snapshotting.ipynb index 5474ff2a72..3cf255b8df 100644 --- a/examples/seismic/tutorials/08_snapshotting.ipynb +++ b/examples/seismic/tutorials/08_snapshotting.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "%reset -f\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", @@ -87,8 +87,8 @@ "source": [ "# This cell sets up the problem that is already explained in the first TLE tutorial.\n", "\n", - "#NBVAL_IGNORE_OUTPUT\n", - "#%%flake8\n", + "# NBVAL_IGNORE_OUTPUT\n", + "# %%flake8\n", "from examples.seismic import Receiver\n", "from examples.seismic import RickerSource\n", "from examples.seismic import Model, plot_velocity, TimeAxis\n", @@ -141,7 +141,7 @@ "plot_velocity(model, source=src.coordinates.data,\n", " receiver=rec.coordinates.data[::4, :])\n", "\n", - "#Used for reshaping\n", + "# Used for reshaping\n", "vnx = nx+20\n", "vnz = nz+20\n", "\n", @@ -211,11 +211,11 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", " plt.subplot(1, plot_num+1, imcnt+1)\n", @@ -249,21 +249,21 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fobj = open(\"naivsnaps.bin\", \"rb\")\n", - "snaps = np.fromfile(fobj, dtype = np.float32)\n", - "snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) #reshape vec2mtx, devito format. nx first\n", + "snaps = np.fromfile(fobj, dtype=np.float32)\n", + "snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) # reshape vec2mtx, devito format. nx first\n", "fobj.close()\n", "\n", - "plt.rcParams['figure.figsize'] = (20,20) # Increases figure size\n", + "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", " plt.subplot(1, plot_num+1, imcnt+1)\n", " imcnt = imcnt + 1\n", - " plt.imshow(np.transpose(snaps[i,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.imshow(np.transpose(snaps[i, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -325,7 +325,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import ConditionalDimension\n", "\n", "nsnaps = 103 # desired number of equally spaced snaps\n", @@ -333,7 +333,7 @@ "\n", "print(f\"factor is {factor}\")\n", "\n", - "#Part 1 #############\n", + "# Part 1 #############\n", "time_subsampled = ConditionalDimension(\n", " 't_sub', parent=model.grid.time_dim, factor=factor)\n", "usave = TimeFunction(name='usave', grid=model.grid, time_order=2, space_order=2,\n", @@ -349,7 +349,7 @@ " expr=src * dt**2 / model.m)\n", "rec_term = rec.interpolate(expr=u)\n", "\n", - "#Part 2 #############\n", + "# Part 2 #############\n", "op1 = Operator([stencil] + src_term + rec_term,\n", " subs=model.spacing_map) # usual operator\n", "op2 = Operator([stencil] + src_term + [Eq(usave, u)] + rec_term,\n", @@ -360,7 +360,7 @@ "op2(time=nt - 2, dt=model.critical_dt)\n", "#####################\n", "\n", - "#Part 3 #############\n", + "# Part 3 #############\n", "print(\"Saving snaps file\")\n", "print(f\"Dimensions: nz = {nz + 2 * nb:d}, nx = {nx + 2 * nb:d}\")\n", "filename = \"snaps2.bin\"\n", @@ -392,7 +392,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fobj = open(\"snaps2.bin\", \"rb\")\n", "snaps = np.fromfile(fobj, dtype=np.float32)\n", "snaps = np.reshape(snaps, (nsnaps, vnx, vnz))\n", @@ -400,13 +400,13 @@ "\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "for i in range(0, plot_num):\n", " plt.subplot(1, plot_num, i+1)\n", " imcnt = imcnt + 1\n", " ind = i * int(nsnaps/plot_num)\n", - " plt.imshow(np.transpose(snaps[ind,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.imshow(np.transpose(snaps[ind, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -3340,8 +3340,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "#NBVAL_SKIP\n", + "# NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_SKIP\n", "from IPython.display import HTML\n", "import matplotlib.pyplot as plt\n", "import matplotlib.animation as animation\n", @@ -3362,10 +3362,12 @@ "plt.ylabel('z')\n", "plt.title('Modelling one shot over a 2-layer velocity model with Devito.')\n", "\n", + "\n", "def update(i):\n", " matrice.set_array(snapsObj[i, :, :].T)\n", " return matrice,\n", "\n", + "\n", "# Animation\n", "ani = animation.FuncAnimation(fig, update, frames=nsnaps, interval=50, blit=True)\n", "\n", diff --git a/examples/seismic/tutorials/09_viscoelastic.ipynb b/examples/seismic/tutorials/09_viscoelastic.ipynb index 76ab0fa2f7..799777fb26 100644 --- a/examples/seismic/tutorials/09_viscoelastic.ipynb +++ b/examples/seismic/tutorials/09_viscoelastic.ipynb @@ -40,8 +40,8 @@ "outputs": [], "source": [ "# Domain size:\n", - "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", - "h = 1.0 # Desired grid spacing\n", + "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", + "h = 1.0 # Desired grid spacing\n", "shape = (int(extent[0]/h+1), int(extent[1]/h+1), int(extent[2]/h+1))\n", "\n", "# Model physical parameters:\n", @@ -52,23 +52,23 @@ "rho = np.zeros(shape)\n", "\n", "# Set up three horizontally separated layers:\n", - "vp[:,:,:int(0.5*shape[2])+1] = 1.52\n", - "qp[:,:,:int(0.5*shape[2])+1] = 10000.\n", - "vs[:,:,:int(0.5*shape[2])+1] = 0.\n", - "qs[:,:,:int(0.5*shape[2])+1] = 0.\n", - "rho[:,:,:int(0.5*shape[2])+1] = 1.05\n", + "vp[:, :, :int(0.5*shape[2])+1] = 1.52\n", + "qp[:, :, :int(0.5*shape[2])+1] = 10000.\n", + "vs[:, :, :int(0.5*shape[2])+1] = 0.\n", + "qs[:, :, :int(0.5*shape[2])+1] = 0.\n", + "rho[:, :, :int(0.5*shape[2])+1] = 1.05\n", "\n", - "vp[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.6\n", - "qp[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 40.\n", - "vs[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 0.4\n", - "qs[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 30.\n", - "rho[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.3\n", + "vp[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.6\n", + "qp[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 40.\n", + "vs[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 0.4\n", + "qs[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 30.\n", + "rho[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.3\n", "\n", - "vp[:,:,int(0.5*shape[2])+1+int(4/h):] = 2.2\n", - "qp[:,:,int(0.5*shape[2])+1+int(4/h):] = 100.\n", - "vs[:,:,int(0.5*shape[2])+1+int(4/h):] = 1.2\n", - "qs[:,:,int(0.5*shape[2])+1+int(4/h):] = 70.\n", - "rho[:,:,int(0.5*shape[2])+1+int(4/h):] = 2." + "vp[:, :, int(0.5*shape[2])+1+int(4/h):] = 2.2\n", + "qp[:, :, int(0.5*shape[2])+1+int(4/h):] = 100.\n", + "vs[:, :, int(0.5*shape[2])+1+int(4/h):] = 1.2\n", + "qs[:, :, int(0.5*shape[2])+1+int(4/h):] = 70.\n", + "rho[:, :, int(0.5*shape[2])+1+int(4/h):] = 2." ] }, { @@ -99,8 +99,8 @@ "# Create model\n", "origin = (0, 0, 0)\n", "spacing = (h, h, h)\n", - "so = 4 # FD space order (Note that the time order is by default 1).\n", - "nbl = 20 # Number of absorbing boundary layers cells\n", + "so = 4 # FD space order (Note that the time order is by default 1).\n", + "nbl = 20 # Number of absorbing boundary layers cells\n", "model = ModelViscoelastic(space_order=so, vp=vp, qp=qp, vs=vs, qs=qs,\n", " b=1/rho, origin=origin, shape=shape, spacing=spacing,\n", " nbl=nbl)" @@ -114,7 +114,7 @@ "source": [ "# As pointed out in Thorbecke's implementation and documentation, the viscoelastic wave equation is\n", "# not always stable with the standard elastic CFL condition. We enforce a smaller critical dt here\n", - "# to ensure the stability.\n", + "# to ensure the stability.\n", "model.dt_scale = .9" ] }, @@ -187,7 +187,7 @@ "# Memory variable:\n", "r = TensorTimeFunction(name='r', grid=model.grid, space_order=so, time_order=1)\n", "\n", - "s = model.grid.stepping_dim.spacing # Symbolic representation of the model grid spacing" + "s = model.grid.stepping_dim.spacing # Symbolic representation of the model grid spacing" ] }, { @@ -219,7 +219,7 @@ "e = grad(v.forward) + grad(v.forward).transpose(inner=False)\n", "\n", "# Stress equations\n", - "pde_tau = tau.dt - r.forward - l * t_ep / t_s * diag(div(v.forward)) - mu * t_es / t_s * e\n", + "pde_tau = tau.dt - r.forward - l * t_ep / t_s * diag(div(v.forward)) - mu * t_es / t_s * e\n", "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", "\n", "# Memory variable equations:\n", @@ -276,7 +276,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Execute the operator:\n", "op(dt=dt)" @@ -362,7 +362,7 @@ } ], "source": [ - "np.mod(time_range.num,2)" + "np.mod(time_range.num, 2)" ] }, { @@ -419,7 +419,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Mid-points:\n", "mid_x = int(0.5*(v[0].data.shape[1]-1))+1\n", @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.102959, atol=1e-4, rtol=0)" ] diff --git a/examples/seismic/tutorials/10_nmo_correction.ipynb b/examples/seismic/tutorials/10_nmo_correction.ipynb index fb67563c40..107c0955f4 100644 --- a/examples/seismic/tutorials/10_nmo_correction.ipynb +++ b/examples/seismic/tutorials/10_nmo_correction.ipynb @@ -81,7 +81,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "\n", "shape = (301, 501) # Number of grid point (nx, ny, nz)\n", @@ -90,9 +90,9 @@ "\n", "# Define a velocity profile. The velocity is in km/s\n", "v = np.empty(shape, dtype=np.float32)\n", - "v[:,:100] = 1.5\n", - "v[:,100:350] = 2.5\n", - "v[:,350:] = 4.5\n", + "v[:, :100] = 1.5\n", + "v[:, 100:350] = 2.5\n", + "v[:, 350:] = 4.5\n", "\n", "# With the velocity and model size defined, we can create the seismic model that\n", "# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points\n", @@ -122,7 +122,7 @@ "\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "\n", - "nrcv = 250 # Number of Receivers" + "nrcv = 250 # Number of Receivers" ] }, { @@ -131,7 +131,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import RickerSource\n", "\n", "f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", @@ -145,7 +145,7 @@ "pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", "stencil = Eq(u.forward, solve(pde, u.forward))\n", "\n", - "src.coordinates.data[:, 0] = 400 # Source coordinates\n", + "src.coordinates.data[:, 0] = 400 # Source coordinates\n", "src.coordinates.data[:, -1] = 20. # Depth is 20m" ] }, @@ -178,12 +178,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Receiver\n", "\n", "rec = Receiver(name='rec', grid=model.grid, npoint=nrcv, time_range=time_range)\n", - "rec.coordinates.data[:,0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)\n", - "rec.coordinates.data[:,-1] = 20. # Depth is 20m\n", + "rec.coordinates.data[:, 0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)\n", + "rec.coordinates.data[:, -1] = 20. # Depth is 20m\n", "\n", "# Finally we define the source injection and receiver read function to generate the corresponding code\n", "src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)\n", @@ -213,7 +213,7 @@ "for i, coord in enumerate(rec.coordinates.data):\n", " off = (src.coordinates.data[0, 0] - coord[0])\n", " offset.append(off)\n", - " data.append(rec.data[:,i])" + " data.append(rec.data[:, i])" ] }, { @@ -229,7 +229,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", @@ -238,6 +238,7 @@ "mpl.rc('font', size=16)\n", "mpl.rc('figure', figsize=(8, 6))\n", "\n", + "\n", "def plot_traces(rec, xb, xe, t0, tn, colorbar=True):\n", " scale = np.max(rec)/100\n", " extent = [xb, xe, 1e-3*tn, t0]\n", @@ -285,7 +286,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_traces(np.transpose(data), rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)" ] }, @@ -323,8 +324,8 @@ "metadata": {}, "outputs": [], "source": [ - "ns = time_range.num # Number of samples in each trace\n", - "grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension" + "ns = time_range.num # Number of samples in each trace\n", + "grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension" ] }, { @@ -435,11 +436,11 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", - "dtms = model.critical_dt/1000 # Time discretization in ms\n", + "dtms = model.critical_dt/1000 # Time discretization in ms\n", "E1 = Eq(t_0, sample*dtms)\n", - "E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2) ))\n", + "E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2)))\n", "E3 = Eq(s, sp.floor(tt/dtms))\n", "op1 = Operator([E1, E2, E3])\n", "op1()" @@ -476,7 +477,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "s.data[s.data >= time_range.num] = 0\n", "E4 = Eq(snmo, amps[s[sample, trace], trace])\n", @@ -484,7 +485,7 @@ "op2 = Operator([E4])\n", "op2()\n", "\n", - "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!\n", + "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!\n", "\n", "plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)" ] diff --git a/examples/seismic/tutorials/11_viscoacoustic.ipynb b/examples/seismic/tutorials/11_viscoacoustic.ipynb index 0f6317dfe1..132dad6888 100644 --- a/examples/seismic/tutorials/11_viscoacoustic.ipynb +++ b/examples/seismic/tutorials/11_viscoacoustic.ipynb @@ -155,9 +155,9 @@ "for i in range(1, nlayers):\n", " v[..., i*int(shape[-1] / nlayers):] = vp_i[i] # Bottom velocity\n", "\n", - "qp[:] = 3.516*((v[:]*1000.)**2.2)*10**(-6) # Li's empirical formula\n", + "qp[:] = 3.516*((v[:]*1000.)**2.2)*10**(-6) # Li's empirical formula\n", "\n", - "rho[:] = 0.31*(v[:]*1000.)**0.25 # Gardner's relation" + "rho[:] = 0.31*(v[:]*1000.)**0.25 # Gardner's relation" ] }, { @@ -178,7 +178,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = ModelViscoacoustic(space_order=space_order, vp=v, qp=qp, b=1/rho,\n", " origin=origin, shape=shape, spacing=spacing,\n", " nbl=nbl)" @@ -201,7 +201,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "aspect_ratio = model.shape[0]/model.shape[1]\n", "\n", "plt_options_model = {'cmap': 'jet', 'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.005 # peak/dominant frequency\n", + "f0 = 0.005 # peak/dominant frequency\n", "b = model.b\n", "rho = 1./b\n", "\n", @@ -276,6 +276,7 @@ "source": [ "from examples.seismic import Receiver\n", "\n", + "\n", "def src_rec(p, model):\n", " src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)\n", " src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", @@ -332,7 +333,7 @@ " slices = [slice(model.nbl, -model.nbl), slice(model.nbl, -model.nbl)]\n", " scale = .5*1e-3\n", "\n", - " plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + " plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", " fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 7))\n", @@ -459,7 +460,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_SLS(model)" ] }, @@ -480,7 +481,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -510,7 +511,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, @@ -622,7 +623,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_KV(model)" ] }, @@ -643,7 +644,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -673,7 +674,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, @@ -782,7 +783,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_Maxwell(model)" ] }, @@ -803,7 +804,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -833,7 +834,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index 192532bf5a..d8cb5f93de 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -403,11 +403,11 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Define dimensions for the interior of the model\n", - "nx,nz = 101,101\n", + "nx, nz = 101, 101\n", "npad = 10\n", - "dx,dz = 20.0,20.0 # Grid spacing in m\n", + "dx, dz = 20.0, 20.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", "\n", @@ -434,7 +434,7 @@ "m0 = Function(name='m0', grid=grid, space_order=space_order)\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", "m0.data[:] = 1.5\n", - "b.data[:,:] = 1.0 / 1.0\n", + "b.data[:, :] = 1.0 / 1.0\n", "\n", "# Perturbation to velocity: a square offset from the center of the model\n", "dm = Function(name='dm', grid=grid, space_order=space_order)\n", @@ -462,7 +462,7 @@ "\n", "# Source 10 Hz center frequency\n", "src = RickerSource(name='src', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src.coordinates.data[0,:] = [dx * ((nx-1) / 2 - 10), dz * (nz-1) / 2]\n", + "src.coordinates.data[0, :] = [dx * ((nx-1) / 2 - 10), dz * (nz-1) / 2]\n", "\n", "# Receivers: for nonlinear forward and linearized forward\n", "# one copy each for save all and time blocking implementations\n", @@ -474,18 +474,16 @@ "nl_rec2 = Receiver(name='nl_rec2', grid=grid, npoint=nr, time_range=time_range)\n", "ln_rec1 = Receiver(name='ln_rec1', grid=grid, npoint=nr, time_range=time_range)\n", "ln_rec2 = Receiver(name='ln_rec2', grid=grid, npoint=nr, time_range=time_range)\n", - "nl_rec1.coordinates.data[:,0] = nl_rec2.coordinates.data[:,0] = \\\n", - " ln_rec1.coordinates.data[:,0] = ln_rec2.coordinates.data[:,0] = dx * ((nx-1) / 2 + 10)\n", - "nl_rec1.coordinates.data[:,1] = nl_rec2.coordinates.data[:,1] = \\\n", - " ln_rec1.coordinates.data[:,1] = ln_rec2.coordinates.data[:,1] = np.linspace(z1, z2, nr)\n", + "nl_rec1.coordinates.data[:, 0] = nl_rec2.coordinates.data[:, 0] = \\\n", + " ln_rec1.coordinates.data[:, 0] = ln_rec2.coordinates.data[:, 0] = dx * ((nx-1) / 2 + 10)\n", + "nl_rec1.coordinates.data[:, 1] = nl_rec2.coordinates.data[:, 1] = \\\n", + " ln_rec1.coordinates.data[:, 1] = ln_rec2.coordinates.data[:, 1] = np.linspace(z1, z2, nr)\n", "\n", "print(\"\")\n", - "print(\"src_coordinate X; %+12.4f\" % (src.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(nl_rec1.coordinates.data[:,0]), np.max(nl_rec1.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(nl_rec1.coordinates.data[:,1]), np.max(nl_rec1.coordinates.data[:,1])))" + "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", + "print(f\"rec_coordinates X min/max; {np.min(nl_rec1.coordinates.data[:, 0]):+12.4f} {np.max(nl_rec1.coordinates.data[:, 0]):+12.4f}\")\n", + "print(f\"rec_coordinates Z min/max; {np.min(nl_rec1.coordinates.data[:, 1]):+12.4f} {np.max(nl_rec1.coordinates.data[:, 1]):+12.4f}\")" ] }, { @@ -514,7 +512,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# note: flip sense of second dimension to make the plot positive downwards\n", "plt_extent = [origin[0], origin[0] + extent[0], origin[1] + extent[1], origin[1]]\n", "\n", @@ -522,16 +520,16 @@ "pmin, pmax = -1, +1\n", "dmin, dmax = 0.9, 1.1\n", "\n", - "plt.figure(figsize=(12,14))\n", + "plt.figure(figsize=(12, 14))\n", "\n", "# plot velocity\n", "plt.subplot(2, 2, 1)\n", "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -542,9 +540,9 @@ "plt.imshow(np.transpose(1 / b.data), cmap=cm.jet,\n", " vmin=dmin, vmax=dmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Density (m^3/kg)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -555,9 +553,9 @@ "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -570,9 +568,9 @@ "plt.subplot(2, 2, 4)\n", "plt.imshow(np.transpose(q.data), cmap=cm.jet, vmin=lmin, vmax=lmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='log10(Q)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -661,7 +659,7 @@ "v2 = TimeFunction(name=\"v2\", grid=grid, time_order=2, space_order=space_order, save=Buffer(M))\n", "\n", "# get time and space dimensions\n", - "t,x,z = u1.dimensions\n", + "t, x, z = u1.dimensions\n", "\n", "# Source terms (see notebooks linked above for more detail)\n", "src1_term = src.inject(field=u1.forward, expr=src * t.spacing**2 * m0**2 / b)\n", @@ -671,13 +669,13 @@ "\n", "# The nonlinear forward time update equation\n", "update1 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * u1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ) * u1 + \\\n", " (t.spacing * wOverQ - 1) * u1.backward\n", "\n", "update2 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * u2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ) * u2 + \\\n", " (t.spacing * wOverQ - 1) * u2.backward\n", @@ -691,7 +689,7 @@ "\n", "# Update spacing_map (see notebooks linked above for more detail)\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "\n", "# Build the Operators\n", "nl_op1 = Operator([stencil1, src1_term, nl_rec1_term, v1_term], subs=spacing_map)\n", @@ -732,9 +730,9 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(u1))\n", - "print(\"%.3e\" % norm(nl_rec1))\n", - "print(\"%.3e\" % norm(v1))\n", + "print(f\"{norm(u1):.3e}\")\n", + "print(f\"{norm(nl_rec1):.3e}\")\n", + "print(f\"{norm(v1):.3e}\")\n", "assert np.isclose(norm(u1), 4.145e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(nl_rec1), 2.669e-03, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(v1), 1.381e-02, atol=0, rtol=1e-3)" @@ -925,31 +923,31 @@ "file_length = np.zeros(nt, dtype=np.int64)\n", "\n", "# The length of the data type, 4 bytes for float32\n", - "itemsize = v2.data[0,:,:].dtype.itemsize\n", + "itemsize = v2.data[0, :, :].dtype.itemsize\n", "\n", "# The length of a an uncompressed wavefield, used to compute compression ratio below\n", - "len0 = 4.0 * np.prod(v2._data[0,:,:].shape)\n", + "len0 = 4.0 * np.prod(v2._data[0, :, :].shape)\n", "\n", "# Loop over time blocks\n", "v2_all[:] = 0\n", "u2.data[:] = 0\n", "v2.data[:] = 0\n", "nl_rec2.data[:] = 0\n", - "for kN in range(0,N,1):\n", + "for kN in range(0, N, 1):\n", " kt1 = max((kN + 0) * M, 1)\n", " kt2 = min((kN + 1) * M - 1, nt-2)\n", " nl_op2(time_m=kt1, time_M=kt2)\n", "\n", " # Copy computed Born term for correctness testing\n", - " for kt in range(kt1,kt2+1):\n", + " for kt in range(kt1, kt2+1):\n", "\n", " # assign\n", - " v2_all[kt,:,:] = v2.data[(kt%M),:,:]\n", + " v2_all[kt, :, :] = v2.data[(kt % M), :, :]\n", "\n", " # compression\n", - " c = blosc.compress_ptr(v2._data[(kt%M),:,:].__array_interface__['data'][0],\n", - " np.prod(v2._data[(kt%M),:,:].shape),\n", - " v2._data[(kt%M),:,:].dtype.itemsize, 9, True, 'zstd')\n", + " c = blosc.compress_ptr(v2._data[(kt % M), :, :].__array_interface__['data'][0],\n", + " np.prod(v2._data[(kt % M), :, :].shape),\n", + " v2._data[(kt % M), :, :].dtype.itemsize, 9, True, 'zstd')\n", "\n", " # compression ratio\n", " cratio = len0 / (1.0 * len(c))\n", @@ -990,8 +988,8 @@ "# Continuous integration hooks for the time blocking implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(nl_rec1))\n", - "print(\"%.3e\" % np.linalg.norm(v2_all))\n", + "print(f\"{norm(nl_rec1):.3e}\")\n", + "print(f\"{np.linalg.norm(v2_all):.3e}\")\n", "assert np.isclose(norm(nl_rec1), 2.669e-03, atol=0, rtol=1e-3)\n", "assert np.isclose(np.linalg.norm(v2_all), 1.381e-02, atol=0, rtol=1e-3)" ] @@ -1067,14 +1065,14 @@ "\n", "# The Jacobian linearized forward time update equation\n", "update1 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * duFwd1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", - " (b * duFwd1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + \\\n", + " ((b * duFwd1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", + " (b * duFwd1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) +\n", " (dm * v1)) + (2 - t.spacing * wOverQ) * duFwd1 + \\\n", " (t.spacing * wOverQ - 1) * duFwd1.backward\n", "\n", "update2 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * duFwd2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", - " (b * duFwd2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + \\\n", + " ((b * duFwd2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", + " (b * duFwd2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) +\n", " (dm * v2)) + (2 - t.spacing * wOverQ) * duFwd2 + \\\n", " (t.spacing * wOverQ - 1) * duFwd2.backward\n", "\n", @@ -1112,8 +1110,8 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(duFwd1))\n", - "print(\"%.3e\" % norm(ln_rec1))\n", + "print(f\"{norm(duFwd1):.3e}\")\n", + "print(f\"{norm(ln_rec1):.3e}\")\n", "assert np.isclose(norm(duFwd1), 6.438e+00, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(ln_rec1), 2.681e-02, atol=0, rtol=1e-3)" ] @@ -1290,7 +1288,7 @@ "f = open(filename, \"rb\")\n", "\n", "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0,:,:])\n", + "d = copy.copy(v2._data[0, :, :])\n", "\n", "# Array to hold compression ratio\n", "cratio = np.zeros(nt, dtype=dtype)\n", @@ -1298,17 +1296,17 @@ "# Loop over time blocks\n", "duFwd2.data[:] = 0\n", "ln_rec2.data[:] = 0\n", - "for kN in range(0,N,1):\n", + "for kN in range(0, N, 1):\n", " kt1 = max((kN + 0) * M, 1)\n", " kt2 = min((kN + 1) * M - 1, nt-2)\n", "\n", " # 1. Seek to file_offset[kt]\n", " # 2. Read file_length[kt1] bytes from file\n", " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1,kt2+1):\n", + " for kt in range(kt1, kt2+1):\n", " f.seek(file_offset[kt], 0)\n", " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt%M),:,:].__array_interface__['data'][0])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", " cratio[kt] = len0 / (1.0 * len(c))\n", "\n", " # Run the operator for this time block\n", @@ -1343,8 +1341,8 @@ "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(duFwd2))\n", - "print(\"%.3e\" % norm(ln_rec2))\n", + "print(f\"{norm(duFwd2):.3e}\")\n", + "print(f\"{norm(ln_rec2):.3e}\")\n", "assert np.isclose(norm(duFwd2), 6.438e+00, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(ln_rec2), 2.681e-02, atol=0, rtol=1e-3)" ] @@ -1474,8 +1472,8 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(duAdj1))\n", - "print(\"%.3e\" % norm(dm1))\n", + "print(f\"{norm(duAdj1):.3e}\")\n", + "print(f\"{norm(dm1):.3e}\")\n", "assert np.isclose(norm(duAdj1), 4.626e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(dm1), 1.426e-04, atol=0, rtol=1e-3)" ] @@ -1649,7 +1647,7 @@ "f = open(filename, \"rb\")\n", "\n", "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0,:,:])\n", + "d = copy.copy(v2._data[0, :, :])\n", "\n", "# Array to hold compression ratio\n", "cratio = np.zeros(nt, dtype=dtype)\n", @@ -1657,17 +1655,17 @@ "# Loop over time blocks\n", "duAdj2.data[:] = 0\n", "dm2.data[:] = 0\n", - "for kN in range(N-1,-1,-1):\n", + "for kN in range(N-1, -1, -1):\n", " kt1 = max((kN + 0) * M, 1)\n", " kt2 = min((kN + 1) * M - 1, nt-2)\n", "\n", " # 1. Seek to file_offset[kt]\n", " # 2. Read file_length[kt1] bytes from file\n", " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1,kt2+1,+1):\n", + " for kt in range(kt1, kt2+1, +1):\n", " f.seek(file_offset[kt], 0)\n", " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt%M),:,:].__array_interface__['data'][0])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", " cratio[kt] = len0 / (1.0 * len(c))\n", "\n", " # Run the operator for this time block\n", @@ -1702,8 +1700,8 @@ "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(duAdj2))\n", - "print(\"%.3e\" % norm(dm2))\n", + "print(f\"{norm(duAdj2):.3e}\")\n", + "print(f\"{norm(dm2):.3e}\")\n", "assert np.isclose(norm(duAdj2), 4.626e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(dm2), 1.426e-04, atol=0, rtol=1e-3)" ] @@ -1739,8 +1737,7 @@ "norm_dm1 = np.linalg.norm(dm1.data.reshape(-1))\n", "norm_dm12 = np.linalg.norm(dm1.data.reshape(-1) - dm2.data.reshape(-1))\n", "\n", - "print(\"Relative norm of difference wavefield,gradient; %+.4e %+.4e\" %\n", - " (norm_du12 / norm_du1, norm_dm12 /norm_dm1))\n", + "print(f\"Relative norm of difference wavefield,gradient; {norm_du12 / norm_du1:+.4e} {norm_dm12 /norm_dm1:+.4e}\")\n", "\n", "assert norm_du12 / norm_du1 < 1e-7\n", "assert norm_dm12 / norm_dm1 < 1e-7" diff --git a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb index 9e3f0ee186..ab0342e271 100644 --- a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb +++ b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb @@ -126,7 +126,7 @@ "%matplotlib inline\n", "import numpy as np\n", "\n", - "from devito import Operator,Eq,norm\n", + "from devito import Operator, Eq, norm\n", "from devito import Function\n", "from devito import gaussian_smooth\n", "from devito import mmax\n", @@ -135,7 +135,7 @@ "from examples.seismic import Model\n", "from examples.seismic import plot_velocity\n", "from examples.seismic import Receiver\n", - "from examples.seismic import plot_image,AcquisitionGeometry\n", + "from examples.seismic import plot_image, AcquisitionGeometry\n", "from examples.seismic import TimeAxis\n", "\n", "from examples.seismic.self_adjoint import (setup_w_over_q)\n", @@ -172,40 +172,40 @@ "spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km\n", "origin = (0., 0.) # What is the location of the top left corner. This is necessary to define\n", "\n", - "fpeak = 0.025# Source peak frequency is 25Hz (0.025 kHz)\n", + "fpeak = 0.025 # Source peak frequency is 25Hz (0.025 kHz)\n", "t0w = 1.0 / fpeak\n", "omega = 2.0 * np.pi * fpeak\n", "qmin = 0.1\n", "qmax = 100000\n", - "npad=50\n", + "npad = 50\n", "dtype = np.float32\n", "\n", "nshots = 21\n", "nreceivers = 101\n", "t0 = 0.\n", "tn = 1000. # Simulation last 1 second (1000 ms)\n", - "filter_sigma = (5, 5) # Filter's length\n", + "filter_sigma = (5, 5) # Filter's length\n", "\n", "v = np.empty(shape, dtype=dtype)\n", "\n", "# Define a velocity profile. The velocity is in km/s\n", "vp_top = 1.5\n", "\n", - "v[:] = vp_top # Top velocity\n", - "v[:, 30:65]= vp_top +0.5\n", - "v[:, 65:101]= vp_top +1.5\n", - "v[40:60, 35:55]= vp_top+1\n", + "v[:] = vp_top # Top velocity\n", + "v[:, 30:65] = vp_top +0.5\n", + "v[:, 65:101] = vp_top +1.5\n", + "v[40:60, 35:55] = vp_top+1\n", "\n", "init_damp = lambda func, nbl: setup_w_over_q(func, omega, qmin, qmax, npad, sigma=0)\n", "model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", - " space_order=8, bcs=init_damp,nbl=npad,dtype=dtype)\n", + " space_order=8, bcs=init_damp, nbl=npad, dtype=dtype)\n", "model0 = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", - " space_order=8, bcs=init_damp,nbl=npad,dtype=dtype)\n", + " space_order=8, bcs=init_damp, nbl=npad, dtype=dtype)\n", "\n", "dt = model.critical_dt\n", "s = model.grid.stepping_dim.spacing\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "nt=time_range.num" + "nt = time_range.num" ] }, { @@ -235,7 +235,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Create initial model and smooth the boundaries\n", "gaussian_smooth(model0.vp, sigma=filter_sigma)\n", "# Plot the true and initial model\n", @@ -282,7 +282,7 @@ "source": [ "source_locations = np.empty((nshots, 2), dtype=dtype)\n", "source_locations[:, 0] = np.linspace(0., 1000, num=nshots)\n", - "source_locations[:, 1] = 30. # Depth is 30m" + "source_locations[:, 1] = 30. # Depth is 30m" ] }, { @@ -303,44 +303,44 @@ " residual = Receiver(name='residual', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", - " d_obs = Receiver(name='d_obs', grid=model.grid,time_range=geometry.time_axis,\n", + " d_obs = Receiver(name='d_obs', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", - " d_syn = Receiver(name='d_syn', grid=model.grid,time_range=geometry.time_axis,\n", + " d_syn = Receiver(name='d_syn', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", " grad_full = Function(name='grad_full', grid=model.grid)\n", "\n", " grad_illum = Function(name='grad_illum', grid=model.grid)\n", "\n", - " src_illum = Function (name =\"src_illum\", grid = model.grid)\n", + " src_illum = Function(name=\"src_illum\", grid=model.grid)\n", "\n", " # Using devito's reference of virtual source\n", - " dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))\n", + " dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))\n", "\n", " objective = 0.\n", " u0 = None\n", " for i in range(nshots):\n", "\n", - " #Observed Data using Born's operator\n", + " # Observed Data using Born's operator\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", "\n", " _, u0, _ = solver.forward(vp=model0.vp, save=True, u=u0)\n", "\n", - " _, _, _,_ = solver.jacobian(dm_true, vp=model0.vp, rec = d_obs)\n", + " _, _, _, _ = solver.jacobian(dm_true, vp=model0.vp, rec=d_obs)\n", "\n", - " #Calculated Data using Born's operator\n", - " solver.jacobian(dm, vp=model0.vp, rec = d_syn)\n", + " # Calculated Data using Born's operator\n", + " solver.jacobian(dm, vp=model0.vp, rec=d_syn)\n", "\n", " residual.data[:] = d_syn.data[:]- d_obs.data[:]\n", "\n", - " grad_shot,_ = solver.gradient(rec=residual, u=u0, vp=model0.vp)\n", + " grad_shot, _ = solver.gradient(rec=residual, u=u0, vp=model0.vp)\n", "\n", - " src_illum_upd = Eq(src_illum, src_illum + u0**2)\n", + " src_illum_upd = Eq(src_illum, src_illum + u0**2)\n", " op_src = Operator([src_illum_upd])\n", " op_src.apply()\n", "\n", - " grad_sum = Eq(grad_full, grad_full + grad_shot)\n", + " grad_sum = Eq(grad_full, grad_full + grad_shot)\n", " op_grad = Operator([grad_sum])\n", " op_grad.apply()\n", "\n", @@ -350,7 +350,7 @@ " op_gradf = Operator([grad_f])\n", " op_gradf.apply()\n", "\n", - " return objective,grad_illum,d_obs,d_syn" + " return objective, grad_illum, d_obs, d_syn" ] }, { @@ -383,8 +383,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_alfa(grad_iter,image_iter,niter_lsrtm):\n", - "\n", + "def get_alfa(grad_iter, image_iter, niter_lsrtm):\n", "\n", " term1 = np.dot(image_iter.reshape(-1), image_iter.reshape(-1))\n", "\n", @@ -403,10 +402,7 @@ "\n", " abb3 = abb2 / abb1\n", "\n", - " if abb3 > 0 and abb3 < 1:\n", - " alfa = abb2\n", - " else:\n", - " alfa = abb1\n", + " alfa = abb2 if abb3 > 0 and abb3 < 1 else abb1\n", "\n", " return alfa" ] @@ -565,30 +561,30 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "image_up_dev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]),dtype)\n", + "# NBVAL_IGNORE_OUTPUT\n", + "image_up_dev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]), dtype)\n", "\n", "image = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "nrec=101\n", - "niter=20 # number of iterations of the LSRTM\n", - "history = np.zeros((niter, 1)) #objective function\n", + "nrec = 101\n", + "niter = 20 # number of iterations of the LSRTM\n", + "history = np.zeros((niter, 1)) # objective function\n", "\n", - "image_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "image_prev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "grad_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "grad_prev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "yk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "yk = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "sk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "sk = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "for k in range(niter) :\n", + "for k in range(niter):\n", "\n", - " dm = image_up_dev # Reflectivity for Calculated data via Born\n", + " dm = image_up_dev # Reflectivity for Calculated data via Born\n", "\n", - " print('LSRTM Iteration',k+1)\n", + " print('LSRTM Iteration', k+1)\n", "\n", - " objective,grad_full,d_obs,d_syn = lsrtm_gradient(dm)\n", + " objective, grad_full, d_obs, d_syn = lsrtm_gradient(dm)\n", "\n", " history[k] = objective\n", "\n", @@ -596,7 +592,7 @@ "\n", " sk = image_up_dev - image_prev\n", "\n", - " alfa = get_alfa(yk,sk,k)\n", + " alfa = get_alfa(yk, sk, k)\n", "\n", " grad_prev = grad_full.data\n", "\n", @@ -604,7 +600,7 @@ "\n", " image_up_dev = image_up_dev - alfa*grad_full.data\n", "\n", - " if k == 0: # Saving the first migration using Born operator.\n", + " if k == 0: # Saving the first migration using Born operator.\n", "\n", " image = image_up_dev" ] @@ -626,7 +622,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "plt.figure()\n", "plt.plot(history)\n", "plt.xlabel('Iteration number')\n", @@ -659,7 +655,7 @@ " plot = plt.imshow(np.transpose(data),\n", " vmin=-.05,\n", " vmax=.05,\n", - " cmap=cmap,extent=extent)\n", + " cmap=cmap, extent=extent)\n", "\n", " plt.xlabel('X position (km)')\n", " plt.ylabel('Depth (km)')\n", @@ -697,8 +693,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_IGNORE_OUTPUT\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "rtm = image[slices]\n", "plot_image(np.diff(rtm, axis=1))" ] @@ -727,8 +723,8 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_SKIP\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "lsrtm = image_up_dev[slices]\n", "plot_image(np.diff(lsrtm, axis=1))" ] @@ -757,8 +753,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_IGNORE_OUTPUT\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))[slices]\n", "plot_image(np.diff(dm_true, axis=1))" ] @@ -780,14 +776,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "plt.figure(figsize=(8,9))\n", - "x = np.linspace(0,1,101)\n", - "plt.plot(rtm[50,:],x,color=plt.gray(),linewidth=2)\n", - "plt.plot(lsrtm[50,:],x,'r',linewidth=2)\n", - "plt.plot(dm_true[50,:],x, 'k--',linewidth=2)\n", - "\n", - "plt.legend(['Initial reflectivity', 'Reflectivity via LSRTM','True Reflectivity'],fontsize=15)\n", + "# NBVAL_SKIP\n", + "plt.figure(figsize=(8, 9))\n", + "x = np.linspace(0, 1, 101)\n", + "plt.plot(rtm[50, :], x, color=plt.gray(), linewidth=2)\n", + "plt.plot(lsrtm[50, :], x, 'r', linewidth=2)\n", + "plt.plot(dm_true[50, :], x, 'k--', linewidth=2)\n", + "\n", + "plt.legend(['Initial reflectivity', 'Reflectivity via LSRTM', 'True Reflectivity'], fontsize=15)\n", "plt.ylabel('Depth (Km)')\n", "plt.xlabel('Amplitude')\n", "plt.gca().invert_yaxis()\n", @@ -811,14 +807,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "time = np.linspace(t0, tn, nt)\n", - "plt.figure(figsize=(8,9))\n", + "plt.figure(figsize=(8, 9))\n", "plt.ylabel('Time (ms)')\n", "plt.xlabel('Amplitude')\n", - "plt.plot(d_syn.data[:, 20],time, 'y', label='Calculated data (Last Iteration)')\n", - "plt.plot(d_obs.data[:, 20],time, 'm', label='Observed data')\n", - "plt.legend(loc=\"upper left\",fontsize=12)\n", + "plt.plot(d_syn.data[:, 20], time, 'y', label='Calculated data (Last Iteration)')\n", + "plt.plot(d_obs.data[:, 20], time, 'm', label='Observed data')\n", + "plt.legend(loc=\"upper left\", fontsize=12)\n", "ax = plt.gca()\n", "ax.invert_yaxis()\n", "plt.show()" diff --git a/examples/seismic/tutorials/14_creating_synthetics.ipynb b/examples/seismic/tutorials/14_creating_synthetics.ipynb index 86dc692d1d..a6dbb244bc 100644 --- a/examples/seismic/tutorials/14_creating_synthetics.ipynb +++ b/examples/seismic/tutorials/14_creating_synthetics.ipynb @@ -336,6 +336,7 @@ " kwargs = {**dict(zip(xyz, point)), 'surface': surface}\n", " model.add_surface_points(**kwargs)\n", "\n", + "\n", "# The points defining the base of the sand layer\n", "sand_points = [(322, 135, -783), (635, 702, -791), (221, 668, -772), (732, 235, -801), (442, 454, -702)]\n", "\n", diff --git a/examples/seismic/tutorials/15_tti_qp_pure.ipynb b/examples/seismic/tutorials/15_tti_qp_pure.ipynb index c936aae461..1ce1c7c253 100644 --- a/examples/seismic/tutorials/15_tti_qp_pure.ipynb +++ b/examples/seismic/tutorials/15_tti_qp_pure.ipynb @@ -69,19 +69,19 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "shape = (101,101) # 101x101 grid\n", - "spacing = (10.,10.) # spacing of 10 meters\n", - "origin = (0.,0.)\n", + "shape = (101, 101) # 101x101 grid\n", + "spacing = (10., 10.) # spacing of 10 meters\n", + "origin = (0., 0.)\n", "nbl = 0 # number of pad points\n", "\n", "model = demo_model('layers-tti', spacing=spacing, space_order=8,\n", " shape=shape, nbl=nbl, nlayers=1)\n", "\n", "# initialize Thomsem parameters to those used in Mu et al., (2020)\n", - "model.update('vp', np.ones(shape)*3.6) # km/s\n", + "model.update('vp', np.ones(shape)*3.6) # km/s\n", "model.update('epsilon', np.ones(shape)*0.23)\n", "model.update('delta', np.ones(shape)*0.17)\n", - "model.update('theta', np.ones(shape)*(45.*(np.pi/180.))) # radians" + "model.update('theta', np.ones(shape)*(45.*(np.pi/180.))) # radians" ] }, { @@ -106,8 +106,8 @@ "m = model.m\n", "\n", "# Use trigonometric functions from Devito\n", - "costheta = cos(theta)\n", - "sintheta = sin(theta)\n", + "costheta = cos(theta)\n", + "sintheta = sin(theta)\n", "cos2theta = cos(2*theta)\n", "sin2theta = sin(2*theta)\n", "sin4theta = sin(4*theta)" @@ -168,10 +168,10 @@ ], "source": [ "# Compute the dt and set time range\n", - "t0 = 0. # Simulation time start\n", - "tn = 150. # Simulation time end (0.15 second = 150 msec)\n", - "dt = (dvalue/(np.pi*vmax))*np.sqrt(1/(1+etamax*(max_cos_sin)**2)) # eq. above (cell 3)\n", - "time_range = TimeAxis(start=t0,stop=tn,step=dt)\n", + "t0 = 0. # Simulation time start\n", + "tn = 150. # Simulation time end (0.15 second = 150 msec)\n", + "dt = (dvalue/(np.pi*vmax))*np.sqrt(1/(1+etamax*(max_cos_sin)**2)) # eq. above (cell 3)\n", + "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "print(\"time_range; \", time_range)" ] }, @@ -200,7 +200,7 @@ "term1_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*costheta**4)*q.dx4\n", "term2_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*sintheta**4)*q.dy4\n", "term3_p = (2-delta*(sin2theta)**2 + 3*epsilon*(sin2theta)**2 + 2*delta*(cos2theta)**2)*((q.dy2).dx2)\n", - "term4_p = ( delta*sin4theta - 4*epsilon*sin2theta*costheta**2)*((q.dy).dx3)\n", + "term4_p = (delta*sin4theta - 4*epsilon*sin2theta*costheta**2)*((q.dy).dx3)\n", "term5_p = (-delta*sin4theta - 4*epsilon*sin2theta*sintheta**2)*((q.dy3).dx)\n", "\n", "stencil_p = solve(m*p.dt2 - (term1_p + term2_p + term3_p + term4_p + term5_p), p.forward)\n", @@ -214,30 +214,30 @@ "x, z = model.grid.dimensions\n", "t = model.grid.stepping_dim\n", "\n", - "update_q = Eq( pp[t+1,x,z],((pp[t,x+1,z] + pp[t,x-1,z])*z.spacing**2 + (pp[t,x,z+1] + pp[t,x,z-1])*x.spacing**2 -\n", - " b[x,z]*x.spacing**2*z.spacing**2) / (2*(x.spacing**2 + z.spacing**2)))\n", + "update_q = Eq(pp[t+1, x, z], ((pp[t, x+1, z] + pp[t, x-1, z])*z.spacing**2 + (pp[t, x, z+1] + pp[t, x, z-1])*x.spacing**2 -\n", + " b[x, z]*x.spacing**2*z.spacing**2) / (2*(x.spacing**2 + z.spacing**2)))\n", "\n", - "bc = [Eq(pp[t+1,x, 0], 0.)]\n", - "bc += [Eq(pp[t+1,x, shape[1]+2*nbl-1], 0.)]\n", - "bc += [Eq(pp[t+1,0, z], 0.)]\n", - "bc += [Eq(pp[t+1,shape[0]-1+2*nbl, z], 0.)]\n", + "bc = [Eq(pp[t+1, x, 0], 0.)]\n", + "bc += [Eq(pp[t+1, x, shape[1]+2*nbl-1], 0.)]\n", + "bc += [Eq(pp[t+1, 0, z], 0.)]\n", + "bc += [Eq(pp[t+1, shape[0]-1+2*nbl, z], 0.)]\n", "\n", "# set source and receivers\n", - "src = RickerSource(name='src',grid=model.grid,f0=0.02,npoint=1,time_range=time_range)\n", - "src.coordinates.data[:,0] = model.domain_size[0]* .5\n", - "src.coordinates.data[:,1] = model.domain_size[0]* .5\n", + "src = RickerSource(name='src', grid=model.grid, f0=0.02, npoint=1, time_range=time_range)\n", + "src.coordinates.data[:, 0] = model.domain_size[0]* .5\n", + "src.coordinates.data[:, 1] = model.domain_size[0]* .5\n", "# Define the source injection\n", - "src_term = src.inject(field=p.forward,expr=src * dt**2 / m)\n", + "src_term = src.inject(field=p.forward, expr=src * dt**2 / m)\n", "\n", - "rec = Receiver(name='rec',grid=model.grid,npoint=shape[0],time_range=time_range)\n", - "rec.coordinates.data[:, 0] = np.linspace(model.origin[0],model.domain_size[0], num=model.shape[0])\n", + "rec = Receiver(name='rec', grid=model.grid, npoint=shape[0], time_range=time_range)\n", + "rec.coordinates.data[:, 0] = np.linspace(model.origin[0], model.domain_size[0], num=model.shape[0])\n", "rec.coordinates.data[:, 1] = 2*spacing[1]\n", "# Create interpolation expression for receivers\n", "rec_term = rec.interpolate(expr=p.forward)\n", "\n", "# Operators\n", - "optime=Operator([update_p] + src_term + rec_term)\n", - "oppres=Operator([update_q] + bc)\n", + "optime = Operator([update_p] + src_term + rec_term)\n", + "oppres = Operator([update_q] + bc)\n", "\n", "# you can print the generated code for both operators by typing print(optime) and print(oppres)" ] @@ -2625,17 +2625,17 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "psave =np.empty ((time_range.num,model.grid.shape[0],model.grid.shape[1]))\n", + "psave = np.empty((time_range.num, model.grid.shape[0], model.grid.shape[1]))\n", "niter_poisson = 1200\n", "\n", "# This is the time loop.\n", - "for step in range(0,time_range.num-2):\n", - " q.data[:,:]=pp.data[(niter_poisson+1)%2,:,:]\n", + "for step in range(0, time_range.num-2):\n", + " q.data[:, :] = pp.data[(niter_poisson+1) % 2, :, :]\n", " optime(time_m=step, time_M=step, dt=dt)\n", - " pp.data[:,:]=0.\n", - " b.data[:,:]=p.data[(step+1)%3,:,:]\n", - " oppres(time_M = niter_poisson)\n", - " psave[step,:,:]=p.data[(step+1)%3,:,:]" + " pp.data[:, :] = 0.\n", + " b.data[:, :] = p.data[(step+1) % 3, :, :]\n", + " oppres(time_M=niter_poisson)\n", + " psave[step, :, :] = p.data[(step+1) % 3, :, :]" ] }, { @@ -2646,10 +2646,10 @@ "outputs": [], "source": [ "# Some useful definitions for plotting if nbl is set to any other value than zero\n", - "nxpad,nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\n", - "shape_pad = np.array(shape) + 2 * nbl\n", - "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])" + "nxpad, nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\n", + "shape_pad = np.array(shape) + 2 * nbl\n", + "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])" ] }, { @@ -2686,7 +2686,7 @@ "\n", "# Plot the wavefields, each normalized to scaled maximum of last time step\n", "kt = (time_range.num - 2) - 1\n", - "amax = 0.05 * np.max(np.abs(psave[kt,:,:]))\n", + "amax = 0.05 * np.max(np.abs(psave[kt, :, :]))\n", "\n", "nsnaps = 10\n", "factor = round(time_range.num/nsnaps)\n", @@ -2695,17 +2695,17 @@ "fig.suptitle(\"Snapshots\", size=14)\n", "for count, ax in enumerate(axes.ravel()):\n", " snapshot = factor*count\n", - " ax.imshow(np.transpose(psave[snapshot,:,:]), cmap=\"seismic\",\n", + " ax.imshow(np.transpose(psave[snapshot, :, :]), cmap=\"seismic\",\n", " vmin=-amax, vmax=+amax, extent=plt_extent)\n", - " ax.plot(model.domain_size[0]* .5, model.domain_size[1]* .5, \\\n", + " ax.plot(model.domain_size[0]* .5, model.domain_size[1]* .5,\n", " 'red', linestyle='None', marker='*', markersize=8, label=\"Source\")\n", " ax.grid()\n", - " ax.tick_params('both', length=2, width=0.5, which='major',labelsize=10)\n", - " ax.set_title(\"Wavefield at t=%.2fms\" % (factor*count*dt),fontsize=10)\n", + " ax.tick_params('both', length=2, width=0.5, which='major', labelsize=10)\n", + " ax.set_title(\"Wavefield at t=%.2fms\" % (factor*count*dt), fontsize=10)\n", "for ax in axes[1, :]:\n", - " ax.set_xlabel(\"X Coordinate (m)\",fontsize=10)\n", + " ax.set_xlabel(\"X Coordinate (m)\", fontsize=10)\n", "for ax in axes[:, 0]:\n", - " ax.set_ylabel(\"Z Coordinate (m)\",fontsize=10)" + " ax.set_ylabel(\"Z Coordinate (m)\", fontsize=10)" ] }, { diff --git a/examples/seismic/tutorials/16_ader_fd.ipynb b/examples/seismic/tutorials/16_ader_fd.ipynb index c98c83d15f..697ee9f8e3 100644 --- a/examples/seismic/tutorials/16_ader_fd.ipynb +++ b/examples/seismic/tutorials/16_ader_fd.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Necessary imports\n", "import devito as dv\n", "import sympy as sp\n", @@ -132,20 +132,26 @@ " return sp.Matrix([[f[0].dx2 + f[1].dxdy],\n", " [f[0].dxdy + f[1].dy2]])\n", "\n", + "\n", "def lapdiv(f):\n", " return f[0].dx3 + f[0].dxdy2 + f[1].dx2dy + f[1].dy3\n", "\n", + "\n", "def gradlap(f):\n", " return sp.Matrix([[f.dx3 + f.dxdy2],\n", " [f.dx2dy + f.dy3]])\n", "\n", + "\n", "def gradlapdiv(f):\n", " return sp.Matrix([[f[0].dx4 + f[0].dx2dy2 + f[1].dx3dy + f[1].dxdy3],\n", " [f[0].dx3dy + f[0].dxdy3 + f[1].dx2dy2 + f[1].dy4]])\\\n", "\n", + "\n", + "\n", "def biharmonic(f):\n", " return f.dx4 + 2*f.dx2dy2 + f.dy4\n", "\n", + "\n", "# First time derivatives\n", "pdt = rho*c2*dv.div(v)\n", "vdt = b*dv.grad(p)\n", @@ -230,7 +236,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "t0 = 0. # Simulation starts a t=0\n", "tn = 450. # Simulation last 0.45 seconds (450 ms)\n", "\n", @@ -283,7 +289,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "src_term = src.inject(field=p.forward, expr=src)\n", "\n", "op = dv.Operator([eq_p, eq_v] + src_term)\n", @@ -308,7 +314,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "extent = [0, 1000, 1000, 0]\n", "vmax = np.abs(np.amax(p.data[-1]))\n", "plt.imshow(c.data.T, cmap='Greys', extent=extent)\n", @@ -355,7 +361,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "ps = dv.TimeFunction(name='ps', grid=grid, space_order=16, staggered=dv.NODE)\n", "vs = dv.VectorTimeFunction(name='vs', grid=grid, space_order=16)\n", "\n", @@ -390,7 +396,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = np.abs(np.amax(ps.data[-1]))\n", "plt.imshow(c.data.T, cmap='Greys', extent=extent)\n", "plt.imshow(ps.data[-1].T, cmap='seismic', alpha=0.75, extent=extent, vmin=-vmax, vmax=vmax)\n", @@ -472,7 +478,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Reset the fields\n", "p.data[:] = 0\n", "ps.data[:] = 0\n", @@ -506,7 +512,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = np.amax(np.abs(p.data[-1]))\n", "\n", "fig, ax = plt.subplots(1, 3, figsize=(15, 10), tight_layout=True, sharey=True)\n", diff --git a/examples/seismic/tutorials/17_fourier_mode.ipynb b/examples/seismic/tutorials/17_fourier_mode.ipynb index 8db48cd4a9..6a239d7532 100644 --- a/examples/seismic/tutorials/17_fourier_mode.ipynb +++ b/examples/seismic/tutorials/17_fourier_mode.ipynb @@ -82,7 +82,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = demo_model('layers-isotropic', vp=3.0, origin=(0., 0.), shape=(101, 101), spacing=(10., 10.), nbl=40, nlayers=4)" ] }, @@ -103,7 +103,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_velocity(model)" ] }, @@ -113,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "\n", "# First, position source centrally in all dimensions, then set depth\n", @@ -712,7 +712,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Code(str(op.ccode), language='C')" ] }, @@ -745,7 +745,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt)" ] }, @@ -766,7 +766,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(12, 6))\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.real(freq_mode.data.T), cmap='seismic', vmin=-1e2, vmax=1e2)\n", @@ -1459,7 +1459,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Code(str(op.ccode), language='C')" ] }, @@ -1496,7 +1496,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data.fill(0)\n", "op(dt=model.critical_dt)" ] @@ -1518,7 +1518,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(12, 30))\n", "for i in range(5):\n", " plt.subplot(5, 2, 2*i+1)\n", diff --git a/examples/seismic/utils.py b/examples/seismic/utils.py index cacce8a3ee..e90fc1b028 100644 --- a/examples/seismic/utils.py +++ b/examples/seismic/utils.py @@ -84,7 +84,7 @@ def __init__(self, model, rec_positions, src_positions, t0, tn, **kwargs): self._t0w = kwargs.get('t0w') if self._src_type is not None and self._f0 is None: error("Peak frequency must be provided in KHz" + - " for source of type %s" % self._src_type) + f" for source of type {self._src_type}") self._grid = model.grid self._model = model @@ -228,15 +228,14 @@ def __call__(self, parser, args, values, option_string=None): # E.g., `('advanced', {'par-tile': True})` values = eval(values) if not isinstance(values, tuple) and len(values) >= 1: - raise ArgumentError(self, ("Invalid choice `%s` (`opt` must be " - "either str or tuple)" % str(values))) + raise ArgumentError(self, (f"Invalid choice `{str(values)}` (`opt` must be " + "either str or tuple)")) opt = values[0] except NameError: # E.g. `'advanced'` opt = values if opt not in configuration._accepted['opt']: - raise ArgumentError(self, ("Invalid choice `%s` (choose from %s)" - % (opt, str(configuration._accepted['opt'])))) + raise ArgumentError(self, ("Invalid choice `{}` (choose from {})".format(opt, str(configuration._accepted['opt'])))) setattr(args, self.dest, values) parser = ArgumentParser(description=description) diff --git a/examples/timestepping/superstep.ipynb b/examples/timestepping/superstep.ipynb index d71a9cd7cc..b1e8f34553 100644 --- a/examples/timestepping/superstep.ipynb +++ b/examples/timestepping/superstep.ipynb @@ -91,15 +91,15 @@ "# Spatial Domain\n", "shape = (301, 301)\n", "origin = (0., 0.)\n", - "extent = (3000, 3000) # 3kmx3km\n", + "extent = (3000, 3000) # 3kmx3km\n", "\n", "# Velocity\n", "background_velocity = 3500\n", "\n", "# Time Domain\n", "t0 = 0\n", - "t1 = 0.07 # (length of pulse)\n", - "t2 = 1.0 # (time for pulse to be reflected)\n", + "t1 = 0.07 # (length of pulse)\n", + "t2 = 1.0 # (time for pulse to be reflected)\n", "dt = 0.0020203\n", "superstep_size = 5\n", "\n", @@ -180,6 +180,7 @@ " trm = (np.pi * f * (t - 1 / f)) ** 2\n", " return A * (1 - 2 * trm) * np.exp(-trm)\n", "\n", + "\n", "nt1 = int(np.ceil((t1 - t0)/dt))\n", "t = np.linspace(t0, t1, nt1)\n", "rick = ricker(t, f=peak_freq)\n", @@ -459,7 +460,7 @@ "import shutil\n", "\n", "# Fetch and setup the Marmousi velocity field\n", - "url = 'https://github.com/devitocodes/data/raw/refs/heads/master/Simple2D/vp_marmousi_bi' # noqa: E501\n", + "url = 'https://github.com/devitocodes/data/raw/refs/heads/master/Simple2D/vp_marmousi_bi' # noqa: E501\n", "filename = Path('marmousi.np')\n", "shape = (1601, 401)\n", "if not filename.exists():\n", diff --git a/examples/userapi/02_apply.ipynb b/examples/userapi/02_apply.ipynb index 9a6c809309..9a391ad3c8 100644 --- a/examples/userapi/02_apply.ipynb +++ b/examples/userapi/02_apply.ipynb @@ -49,7 +49,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary = op.apply()" ] }, @@ -152,7 +152,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op.arguments()" ] }, @@ -179,7 +179,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data[:] = 0. # Explicit reset to initial value\n", "summary = op.apply(x_m=2, y_m=2, time_M=0)" ] @@ -280,7 +280,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u2 = TimeFunction(name='u', grid=grid, save=5)\n", "summary = op.apply(u=u2)" ] @@ -373,7 +373,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary = op2.apply(time_M=4)" ] }, @@ -430,7 +430,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary" ] }, @@ -466,7 +466,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import configuration\n", "configuration['profiling'] = 'advanced'\n", "\n", diff --git a/examples/userapi/03_subdomains.ipynb b/examples/userapi/03_subdomains.ipynb index cf0697f238..341f5edee8 100644 --- a/examples/userapi/03_subdomains.ipynb +++ b/examples/userapi/03_subdomains.ipynb @@ -247,6 +247,7 @@ "source": [ "class FullDomain(SubDomain):\n", " name = 'mydomain'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: z}" @@ -301,6 +302,7 @@ "source": [ "class InnerDomain(SubDomain):\n", " name = 'inner'\n", + "\n", " def define(self, dimensions):\n", " d = dimensions\n", " return {d: ('middle', 1, 1) for d in dimensions}" @@ -384,6 +386,7 @@ "source": [ "class Middle(SubDomain):\n", " name = 'middle'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: ('middle', 3, 4), y: ('middle', 4, 3)}" @@ -410,7 +413,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Function, Eq, Operator\n", "\n", "grid = Grid(shape=(10, 10))\n", @@ -474,6 +477,7 @@ "source": [ "class Left(SubDomain):\n", " name = 'left'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: ('left', 2), y: y}" @@ -494,6 +498,7 @@ "source": [ "class Right(SubDomain):\n", " name = 'right'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: x, y: ('right', 2)}" @@ -543,7 +548,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Operator([eq1, eq2, eq3])()" ] }, @@ -621,8 +626,8 @@ "metadata": {}, "outputs": [], "source": [ - "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", - "h = 1.0 # Desired grid spacing\n", + "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", + "h = 1.0 # Desired grid spacing\n", "# Set the grid to have a shape (201, 101, 101) for h=1:\n", "shape = (int(extent[0]/h+1), int(extent[1]/h+1), int(extent[2]/h+1))\n", "\n", @@ -632,23 +637,23 @@ "rho = np.zeros(shape)\n", "\n", "# Set up three horizontally separated layers:\n", - "l1 = int(0.5*shape[2])+1 # End of the water layer at 50m depth\n", - "l2 = int(0.5*shape[2])+1+int(4/h) # End of the soft rock section at 54m depth\n", + "l1 = int(0.5*shape[2])+1 # End of the water layer at 50m depth\n", + "l2 = int(0.5*shape[2])+1+int(4/h) # End of the soft rock section at 54m depth\n", "\n", "# Water layer model\n", - "vp[:,:,:l1] = 1.52\n", - "vs[:,:,:l1] = 0.\n", - "rho[:,:,:l1] = 1.05\n", + "vp[:, :, :l1] = 1.52\n", + "vs[:, :, :l1] = 0.\n", + "rho[:, :, :l1] = 1.05\n", "\n", "# Soft-rock layer model\n", - "vp[:,:,l1:l2] = 1.6\n", - "vs[:,:,l1:l2] = 0.4\n", - "rho[:,:,l1:l2] = 1.3\n", + "vp[:, :, l1:l2] = 1.6\n", + "vs[:, :, l1:l2] = 0.4\n", + "rho[:, :, l1:l2] = 1.3\n", "\n", "# Hard-rock layer model\n", - "vp[:,:,l2:] = 2.2\n", - "vs[:,:,l2:] = 1.2\n", - "rho[:,:,l2:] = 2.\n", + "vp[:, :, l2:] = 2.2\n", + "vs[:, :, l2:] = 1.2\n", + "rho[:, :, l2:] = 2.\n", "\n", "origin = (0, 0, 0)\n", "spacing = (h, h, h)" @@ -667,7 +672,7 @@ "metadata": {}, "outputs": [], "source": [ - "nbl = 20 # Number of absorbing boundary layers cells" + "nbl = 20 # Number of absorbing boundary layers cells" ] }, { @@ -686,12 +691,15 @@ "# Define our 'upper' and 'lower' SubDomains:\n", "class Upper(SubDomain):\n", " name = 'upper'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: ('left', l1+nbl)}\n", "\n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: ('right', shape[2]+nbl-l1)}" @@ -718,8 +726,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "so = 4 # FD space order (Note that the time order is by default 1).\n", + "# NBVAL_IGNORE_OUTPUT\n", + "so = 4 # FD space order (Note that the time order is by default 1).\n", "\n", "model = ModelElastic(space_order=so, vp=vp, vs=vs, b=1/rho, origin=origin, shape=shape,\n", " spacing=spacing, nbl=nbl)\n", @@ -827,7 +835,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([u_v_u, u_v_l, u_t_u, u_t_l] + src_xx + src_yy + src_zz, subs=model.spacing_map)\n", "op(dt=dt)" ] @@ -886,7 +894,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plots\n", "%matplotlib inline\n", @@ -952,7 +960,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "left = Left()\n", "right = Right()\n", "mid = Middle()\n", @@ -1031,12 +1039,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import SubDomainSet\n", "\n", + "\n", "class MySubDomains(SubDomainSet):\n", " name = 'mydomains'\n", "\n", + "\n", "sds_grid = Grid(shape=(10, 10))\n", "\n", "# Bounds for the various subdomains as (x_ltkn, x_rtkn, y_ltkn, y_rtkn)\n", @@ -1116,7 +1126,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Border\n", "\n", "# Reset the data\n", @@ -1189,7 +1199,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset the data\n", "sds_f.data[:] = 0\n", @@ -1263,7 +1273,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset the data\n", "sds_f.data[:] = 0\n", diff --git a/examples/userapi/04_boundary_conditions.ipynb b/examples/userapi/04_boundary_conditions.ipynb index d154dc64ba..3eb917bfbd 100644 --- a/examples/userapi/04_boundary_conditions.ipynb +++ b/examples/userapi/04_boundary_conditions.ipynb @@ -54,8 +54,10 @@ "\n", "so = 6 # Space order\n", "\n", + "\n", "class MainDomain(SubDomain): # Main section with no damping\n", " name = 'main'\n", + "\n", " def __init__(self, pmls, so, grid=None):\n", " # NOTE: These attributes are used in `define`, and thus must be\n", " # set up before `super().__init__` is called.\n", @@ -71,6 +73,7 @@ "\n", "class Left(SubDomain): # Left PML region\n", " name = 'left'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -82,6 +85,7 @@ "\n", "class Right(SubDomain): # Right PML region\n", " name = 'right'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -90,8 +94,10 @@ " x, y = dimensions\n", " return {x: ('right', self.pmls), y: y}\n", "\n", + "\n", "class Base(SubDomain): # Base PML region\n", " name = 'base'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -100,8 +106,10 @@ " x, y = dimensions\n", " return {x: ('middle', self.pmls, self.pmls), y: ('right', self.pmls)}\n", "\n", + "\n", "class FreeSurface(SubDomain): # Free surface region\n", " name = 'freesurface'\n", + "\n", " def __init__(self, pmls, so, grid=None):\n", " self.pmls = pmls\n", " self.so = so\n", @@ -410,6 +418,7 @@ "from devito import sign, norm\n", "from devito.symbolics import retrieve_functions, INT\n", "\n", + "\n", "def free_surface_top(eq, subdomain, update):\n", " \"\"\"\n", " Modify a stencil such that it is folded back on\n", @@ -458,6 +467,7 @@ " mapper.update({f: f.subs({yind: INT(abs(yind))})})\n", " return Eq(lhs, rhs.subs(mapper), subdomain=subdomain)\n", "\n", + "\n", "fs_p = free_surface_top(eq_p, freesurface, 'pressure')\n", "fs_v = free_surface_top(eq_v, freesurface, 'velocity')" ] diff --git a/examples/userapi/05_conditional_dimension.ipynb b/examples/userapi/05_conditional_dimension.ipynb index cd4c4cca60..5ae8451cd5 100644 --- a/examples/userapi/05_conditional_dimension.ipynb +++ b/examples/userapi/05_conditional_dimension.ipynb @@ -45,7 +45,7 @@ "\n", "# We define a 10x10 grid, dimensions are x, y\n", "shape = (10, 10)\n", - "grid = Grid(shape = shape)\n", + "grid = Grid(shape=shape)\n", "x, y = grid.dimensions\n", "\n", "# Define function 𝑓. We will initialize f's data with ones on its diagonal.\n", @@ -95,7 +95,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Eq, Operator\n", "op0 = Operator(Eq(f, f + 1))\n", "op0.apply()\n", @@ -115,7 +115,7 @@ "metadata": {}, "outputs": [], "source": [ - "#print(op0.ccode) # Print the generated code" + "# print(op0.ccode) # Print the generated code" ] }, { @@ -280,7 +280,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Gt\n", "\n", "f.data[:] = np.eye(10)\n", @@ -356,7 +356,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "f.data[:] = np.eye(10)\n", "\n", @@ -364,7 +364,7 @@ "print(op2.body.body[-1])\n", "op2.apply()\n", "\n", - "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data)))==0)\n", + "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data))) == 0)\n", "assert (np.count_nonzero(f.data) == 10)\n", "\n", "f.data" @@ -432,7 +432,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "from sympy import And\n", "from devito import Ne, Lt\n", @@ -450,10 +450,10 @@ "\n", "print(op3.body.body[-1])\n", "\n", - "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data)))==0)\n", + "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data))) == 0)\n", "assert (np.count_nonzero(f.data) == 10)\n", - "assert np.all(f.data[np.nonzero(f.data[:5,:5])] == 2)\n", - "assert np.all(f.data[5:,5:] == np.eye(5))\n", + "assert np.all(f.data[np.nonzero(f.data[:5, :5])] == 2)\n", + "assert np.all(f.data[5:, 5:] == np.eye(5))\n", "\n", "f.data" ] @@ -523,7 +523,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "h = Function(name='h', shape=grid.shape, dimensions=(x, ci))\n", "\n", @@ -581,14 +581,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "size, factor = 16, 4\n", "i = Dimension(name='i')\n", "ci = ConditionalDimension(name='ci', parent=i, factor=factor)\n", "\n", "g = Function(name='g', shape=(size,), dimensions=(i,))\n", "# Initialize g\n", - "g.data[:,]= list(range(size))\n", + "g.data[:,] = list(range(size))\n", "f = Function(name='f', shape=(int(size/factor),), dimensions=(ci,))\n", "\n", "op5 = Operator([Eq(f, g)])\n", @@ -703,7 +703,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# ConditionalDimension with Ne(f, 0) condition\n", "ci = ConditionalDimension(name='ci', parent=f.dimensions[-1],\n", " condition=Ne(f, 0))\n", @@ -768,8 +768,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "count = g.data[0] # Number of nonzeros\n", + "# NBVAL_IGNORE_OUTPUT\n", + "count = g.data[0] # Number of nonzeros\n", "\n", "# Dimension used only to nest different size of Functions under the same dim\n", "id_dim = Dimension(name='id_dim')\n", diff --git a/examples/userapi/06_sparse_operations.ipynb b/examples/userapi/06_sparse_operations.ipynb index c71d9d5be8..37f0888d19 100644 --- a/examples/userapi/06_sparse_operations.ipynb +++ b/examples/userapi/06_sparse_operations.ipynb @@ -75,7 +75,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "npoint = 5\n", "coords = np.random.rand(npoint, 2)/2 + .25\n", "base = np.floor(coords / grid.spacing)*grid.spacing\n", @@ -85,13 +85,13 @@ "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -244,19 +244,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -335,7 +335,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -365,7 +365,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator(s.inject(u, expr=s))\n", "op()" ] @@ -387,9 +387,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Linear weights\")\n", "plt.show()" @@ -458,19 +458,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -541,7 +541,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -571,7 +571,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator(s.inject(u, expr=s))\n", "op()" ] @@ -593,9 +593,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Sinc weights\")\n", "plt.show()" @@ -698,8 +698,8 @@ " coordinates=coords, r=2)\n", "\n", "\n", - "pos = tuple(product((-grid.spacing[1], 0, grid.spacing[1],2*grid.spacing[1]),\n", - " (-grid.spacing[1], 0, grid.spacing[1],2*grid.spacing[1])))\n", + "pos = tuple(product((-grid.spacing[1], 0, grid.spacing[1], 2*grid.spacing[1]),\n", + " (-grid.spacing[1], 0, grid.spacing[1], 2*grid.spacing[1])))\n", "interp_points = np.concatenate([base+p for p in pos])" ] }, @@ -720,19 +720,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -781,7 +781,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -811,7 +811,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data.fill(0)\n", "op = Operator(s.inject(u, expr=s))\n", "op()" @@ -834,9 +834,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=2, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=2, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Precomputed weights\")\n", "plt.show()" diff --git a/examples/userapi/07_functions_on_subdomains.ipynb b/examples/userapi/07_functions_on_subdomains.ipynb index 7724809163..a0df920fbc 100644 --- a/examples/userapi/07_functions_on_subdomains.ipynb +++ b/examples/userapi/07_functions_on_subdomains.ipynb @@ -227,7 +227,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_f = Eq(f, 1)\n", "eq_g = Eq(g, 1, subdomain=middle)\n", "\n", @@ -299,7 +299,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(f.data.T, vmin=0, vmax=3, origin='lower', extent=(1.5, 8.5, 1.5, 8.5))\n", "plt.colorbar()\n", "plt.title(\"f.data\")\n", @@ -351,7 +351,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_fg = Eq(g, g + f)\n", "Operator(eq_fg)()" ] @@ -382,7 +382,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(g.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 10.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"g.data\")\n", @@ -494,7 +494,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Equations operating on Functions defined on SubDomains must be applied over\n", "# the SubDomain, or a SubDomain representing some subset thereof.\n", "\n", @@ -542,7 +542,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(g.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 10.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"g.data\")\n", @@ -594,7 +594,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_gf = Eq(f, g)\n", "Operator(eq_gf)()" ] @@ -625,7 +625,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(f.data.T, vmin=0, vmax=3, origin='lower', extent=(1.5, 8.5, 1.5, 8.5))\n", "plt.colorbar()\n", "plt.title(\"f.data\")\n", @@ -693,7 +693,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec = src_rec.interpolate(expr=f)\n", "Operator(rec)()" ] @@ -761,7 +761,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec = src_rec.interpolate(expr=f+g)\n", "Operator(rec)()" ] @@ -829,7 +829,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "src = src_rec.inject(field=h, expr=1)\n", "Operator(src)()" ] @@ -860,7 +860,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(h.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 5.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"h.data\")\n", @@ -936,7 +936,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = Model(vp=vp, origin=origin, shape=shape, spacing=spacing,\n", " space_order=2, nbl=20, bcs=\"damp\")\n", "\n", @@ -1011,7 +1011,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Make a Function and set it equal to one to check the alignment of subdomain\n", "test_func = Function(name='testfunc', grid=model.grid)\n", "Operator(Eq(test_func, 1, subdomain=snapshotdomain))()\n", @@ -1153,7 +1153,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=8)\n", "\n", "pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", @@ -2433,7 +2433,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "def animate_wavefield(f, fg, v, interval=100):\n", " \"\"\"\n", " Create an animation of the wavefield.\n", @@ -2464,7 +2464,7 @@ " # Initialize with the first frame (frame index 0)\n", " im_fg = ax.imshow(fg.data[0].T, vmin=vmin, vmax=vmax, cmap=\"Greys\",\n", " extent=(-100, 1100, 1100, -100), zorder=1)\n", - " im_f = ax.imshow(f.data[0].T, vmin=vmin, vmax=vmax, cmap='seismic',\n", + " im_f = ax.imshow(f.data[0].T, vmin=vmin, vmax=vmax, cmap='seismic',\n", " extent=(0, 1000, 1000, 245), zorder=2)\n", "\n", " # Set axis limits\n", @@ -2490,6 +2490,7 @@ "\n", " return ani\n", "\n", + "\n", "ani = animate_wavefield(u_save, u_save_grid, model.vp)\n", "\n", "HTML(ani.to_html5_video())" @@ -2540,7 +2541,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.plot((-100., 1100., 1100., -100., -100.),\n", " (-100., -100., 520., 520., -100.),\n", " 'k--', label='Acoustic')\n", @@ -2680,7 +2681,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Make a trivial operator to check location of fields\n", "test_func = Function(name='testfunc', grid=grid1)\n", "Operator([Eq(test_func, 1, subdomain=upper),\n", @@ -2892,7 +2893,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import switchconfig\n", "\n", "# Note: switchconfig(safe_math=True) is only required here to get consistent norms for testing purposes\n", @@ -2931,7 +2932,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax_p = np.amax(np.abs(p.data[-1]))\n", "vmax_tau = np.amax(np.abs((tau[0, 0].data[-1] + tau[1, 1].data[-1])/2))\n", "vmax = max(vmax_p, vmax_tau)\n", @@ -2988,7 +2989,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = 0.05*np.amax(np.abs(rec.data))\n", "plt.imshow(rec.data, cmap='Greys', aspect='auto', vmax=vmax, vmin=-vmax)\n", "plt.xlabel(\"Receiver number\")\n", diff --git a/scripts/gen_sympy_funcs.py b/scripts/gen_sympy_funcs.py index dab404a567..44341be656 100644 --- a/scripts/gen_sympy_funcs.py +++ b/scripts/gen_sympy_funcs.py @@ -6,12 +6,12 @@ for fn in funcs: try: - strc = """class %s(DifferentiableOp, sympy.%s): - __sympy_class__ = sympy.%s - __new__ = DifferentiableOp.__new__\n\n""" % (fn, fn, fn) + strc = f"""class {fn}(DifferentiableOp, sympy.{fn}): + __sympy_class__ = sympy.{fn} + __new__ = DifferentiableOp.__new__\n\n""" exec(strc) print(strc) except: # Some are not classes such as sqrt - print("""def %s(x): - diffify(sympy.%s(x))\n\n""" % (fn, fn)) + print(f"""def {fn}(x): + diffify(sympy.{fn}(x))\n\n""") diff --git a/tests/test_adjoint.py b/tests/test_adjoint.py index d7c9bbf736..0dec46f6f1 100644 --- a/tests/test_adjoint.py +++ b/tests/test_adjoint.py @@ -114,8 +114,7 @@ def test_adjoint_F(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = inner(srca, solver.geometry.src) term2 = norm(rec) ** 2 - info(': %f, : %f, difference: %4.4e, ratio: %f' - % (term1, term2, (term1 - term2)/term1, term1 / term2)) + info(f': {term1:f}, : {term2:f}, difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}') assert np.isclose((term1 - term2)/term1, 0., atol=1.e-11) @pytest.mark.parametrize('mkey, shape, kernel, space_order, time_order, setup_func', [ @@ -192,8 +191,7 @@ def test_adjoint_J(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = np.dot(im.data.reshape(-1), dm.reshape(-1)) term2 = norm(du)**2 - info(': %f, : %f, difference: %4.4e, ratio: %f' - % (term1, term2, (term1 - term2)/term1, term1 / term2)) + info(f': {term1:f}, : {term2:f}, difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}') assert np.isclose((term1 - term2)/term1, 0., atol=1.e-12) @pytest.mark.parametrize('shape, coords', [ diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 0de45e4799..83915f57a8 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -34,7 +34,7 @@ def test_bench(mode, problem, op): pyversion = sys.executable baseline = os.path.realpath(__file__).split("tests/test_benchmark.py")[0] - benchpath = '%sbenchmarks/user/benchmark.py' % baseline + benchpath = f'{baseline}benchmarks/user/benchmark.py' command_bench = [pyversion, benchpath, mode, '-P', problem, '-d', '%d' % nx, '%d' % ny, '%d' % nz, '--tn', @@ -64,7 +64,7 @@ def test_bench(mode, problem, op): bench_corename = os.path.join('_'.join([base_filename, arch, shape, nbl, t, so, to, opt, at, nt, mpi, np, rank])) - bench_filename = "%s%s%s" % (dir_name, bench_corename, filename_suffix) + bench_filename = f"{dir_name}{bench_corename}{filename_suffix}" assert os.path.isfile(bench_filename) else: assert True diff --git a/tests/test_cinterface.py b/tests/test_cinterface.py index a2ce4b6eb5..1c9d1968cb 100644 --- a/tests/test_cinterface.py +++ b/tests/test_cinterface.py @@ -26,7 +26,7 @@ def test_basic(): ccode = str(ccode) hcode = str(hcode) - assert 'include "%s.h"' % name in ccode + assert f'include "{name}.h"' in ccode # The public `struct dataobj` only appears in the header file assert 'struct dataobj\n{' not in ccode diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index 4600305fb9..abbd3dd74b 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -540,11 +540,11 @@ def test_all_shortcuts(self, so): assert getattr(g, fd) for d in grid.dimensions: - assert 'd%s' % d.name in f._fd - assert 'd%s' % d.name in g._fd + assert f'd{d.name}' in f._fd + assert f'd{d.name}' in g._fd for o in range(2, min(7, so+1)): - assert 'd%s%s' % (d.name, o) in f._fd - assert 'd%s%s' % (d.name, o) in g._fd + assert f'd{d.name}{o}' in f._fd + assert f'd{d.name}{o}' in g._fd def test_shortcuts_mixed(self): grid = Grid(shape=(10,)) @@ -611,7 +611,7 @@ def test_shifted_div(self, shift, ndim): for i, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order) + ref += getattr(f, f'd{d.name}')(x0=x0, fd_order=order) assert df == ref.evaluate @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), @@ -624,7 +624,7 @@ def test_shifted_grad(self, shift, ndim): for i, (d, gi) in enumerate(zip(grid.dimensions, g)): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - gk = getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order).evaluate + gk = getattr(f, f'd{d.name}')(x0=x0, fd_order=order).evaluate assert gi == gk @pytest.mark.parametrize('side', [left, right, centered]) diff --git a/tests/test_dle.py b/tests/test_dle.py index c3330ad26a..88c63a1c4b 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -252,10 +252,10 @@ def test_leftright_subdims(self): eqns = [Eq(damp, 0.)] for d in damp.dimensions: # Left - dl = SubDimension.left(name='%sl' % d.name, parent=d, thickness=nbl) + dl = SubDimension.left(name=f'{d.name}l', parent=d, thickness=nbl) eqns.extend([Inc(damp.subs({d: dl}), 1.)]) # right - dr = SubDimension.right(name='%sr' % d.name, parent=d, thickness=nbl) + dr = SubDimension.right(name=f'{d.name}r', parent=d, thickness=nbl) eqns.extend([Inc(damp.subs({d: dr}), 1.)]) op = Operator(eqns, opt=('fission', 'blocking', {'blockrelax': 'device-aware'})) diff --git a/tests/test_docstrings.py b/tests/test_docstrings.py index dae7abb149..bb8416d245 100644 --- a/tests/test_docstrings.py +++ b/tests/test_docstrings.py @@ -25,5 +25,5 @@ 'symbolics.inspection', 'tools.utils', 'tools.data_structures' ]) def test_docstrings(modname): - module = import_module('devito.%s' % modname) + module = import_module(f'devito.{modname}') assert doctest.testmod(module).failed == 0 diff --git a/tests/test_dse.py b/tests/test_dse.py index b79605c099..ca0bb46d47 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -2326,10 +2326,7 @@ def test_blocking_options(self, rotate): 'cire-rotate': rotate, 'min-storage': True})) # Check code generation - if 'openmp' in configuration['language']: - prefix = ['t'] - else: - prefix = [] + prefix = ['t'] if 'openmp' in configuration['language'] else [] if rotate: assert_structure( op1, diff --git a/tests/test_dtypes.py b/tests/test_dtypes.py index a2a04e858f..f30fbd7b2a 100644 --- a/tests/test_dtypes.py +++ b/tests/test_dtypes.py @@ -160,10 +160,7 @@ def test_imag_unit(dtype: np.complexfloating, kwargs: dict[str, str]) -> None: unit_str = '_Complex_I' else: # C++ provides imaginary literals - if dtype == np.complex64: - unit_str = '1if' - else: - unit_str = '1i' + unit_str = '1if' if dtype == np.complex64 else '1i' # Set up an operator s = Symbol(name='s', dtype=dtype) @@ -191,10 +188,10 @@ def test_math_functions(dtype: np.dtype[np.inexact], if 'CXX' not in configuration['language']: if np.issubdtype(dtype, np.complexfloating): # Complex functions have a 'c' prefix - call_str = 'c%s' % call_str + call_str = f'c{call_str}' if dtype(0).real.itemsize <= 4: # Single precision have an 'f' suffix (half is promoted to single) - call_str = '%sf' % call_str + call_str = f'{call_str}f' # Operator setup a = Symbol(name='a', dtype=dtype) diff --git a/tests/test_gpu_openacc.py b/tests/test_gpu_openacc.py index ccaaa0bd6a..7086d365cd 100644 --- a/tests/test_gpu_openacc.py +++ b/tests/test_gpu_openacc.py @@ -112,7 +112,7 @@ def test_tile_insteadof_collapse(self, par_tile): 'acc parallel loop tile(32,4) present(u)' strtile = ','.join([str(i) for i in stile]) assert trees[3][1].pragmas[0].ccode.value ==\ - 'acc parallel loop tile(%s) present(src,src_coords,u)' % strtile + f'acc parallel loop tile({strtile}) present(src,src_coords,u)' @pytest.mark.parametrize('par_tile', [((32, 4, 4), (8, 8)), ((32, 4), (8, 8)), ((32, 4, 4), (8, 8, 8)), @@ -141,7 +141,7 @@ def test_multiple_tile_sizes(self, par_tile): 'acc parallel loop tile(8,8) present(u)' sclause = 'collapse(4)' if par_tile[-1] is None else 'tile(8,8,8,8)' assert trees[3][1].pragmas[0].ccode.value ==\ - 'acc parallel loop %s present(src,src_coords,u)' % sclause + f'acc parallel loop {sclause} present(src,src_coords,u)' def test_multi_tile_blocking_structure(self): grid = Grid(shape=(8, 8, 8)) diff --git a/tests/test_gpu_openmp.py b/tests/test_gpu_openmp.py index 3d3b0d4b09..679fee23b6 100644 --- a/tests/test_gpu_openmp.py +++ b/tests/test_gpu_openmp.py @@ -125,17 +125,15 @@ def test_multiple_eqns(self): 'omp target teams distribute parallel for collapse(3)' for i, f in enumerate([u, v]): assert op.body.maps[i].ccode.value ==\ - ('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target enter data map(to: {f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 0].ccode.value ==\ - ('omp target update from(%(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target update from({f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 1].ccode.value ==\ - ('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]]) ' - 'if(devicerm)' % {'n': f.name}) + (f'omp target exit data map(release: {f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]]) ' + 'if(devicerm)') def test_multiple_loops(self): grid = Grid(shape=(3, 3, 3)) @@ -165,17 +163,15 @@ def test_multiple_loops(self): # Check `u` and `v` for i, f in enumerate([u, v], 1): assert op.body.maps[i].ccode.value ==\ - ('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target enter data map(to: {f.name}[0:{f.name}_vec->size[0]]' + f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 0].ccode.value ==\ - ('omp target update from(%(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target update from({f.name}[0:{f.name}_vec->size[0]]' + f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 1].ccode.value ==\ - ('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]]) ' - 'if(devicerm)' % {'n': f.name}) + (f'omp target exit data map(release: {f.name}[0:{f.name}_vec->size[0]]' + f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]]) ' + 'if(devicerm)') # Check `f` assert op.body.maps[0].ccode.value ==\ diff --git a/tests/test_gradient.py b/tests/test_gradient.py index fd070a91f5..cd42b9b8c6 100644 --- a/tests/test_gradient.py +++ b/tests/test_gradient.py @@ -234,8 +234,8 @@ def initializer(data): # Test slope of the tests p1 = np.polyfit(np.log10(H), np.log10(error1), 1) p2 = np.polyfit(np.log10(H), np.log10(error2), 1) - info('1st order error, Phi(m0+dm)-Phi(m0): %s' % (p1)) - info(r'2nd order error, Phi(m0+dm)-Phi(m0) - : %s' % (p2)) + info(f'1st order error, Phi(m0+dm)-Phi(m0): {p1}') + info(rf'2nd order error, Phi(m0+dm)-Phi(m0) - : {p2}') assert np.isclose(p1[0], 1.0, rtol=0.1) assert np.isclose(p2[0], 2.0, rtol=0.1) @@ -295,9 +295,9 @@ def initializer(data): # Test slope of the tests p1 = np.polyfit(np.log10(H), np.log10(error1), 1) p2 = np.polyfit(np.log10(H), np.log10(error2), 1) - info('1st order error, Phi(m0+dm)-Phi(m0) with slope: %s compared to 1' % (p1[0])) + info(f'1st order error, Phi(m0+dm)-Phi(m0) with slope: {p1[0]} compared to 1') info(r'2nd order error, Phi(m0+dm)-Phi(m0) - with slope:' - ' %s compared to 2' % (p2[0])) + f' {p2[0]} compared to 2') assert np.isclose(p1[0], 1.0, rtol=0.1) assert np.isclose(p2[0], 2.0, rtol=0.1) diff --git a/tests/test_ir.py b/tests/test_ir.py index 90de31ddfd..684a7713d6 100644 --- a/tests/test_ir.py +++ b/tests/test_ir.py @@ -637,13 +637,13 @@ def test_single_eq(self, expr, expected, ti0, ti1, fa, grid): types = ['flow', 'anti'] if type != 'all': types.remove(type) - assert len(getattr(scope, 'd_%s' % type)) == 1 - assert all(len(getattr(scope, 'd_%s' % i)) == 0 for i in types) + assert len(getattr(scope, f'd_{type}')) == 1 + assert all(len(getattr(scope, f'd_{i}')) == 0 for i in types) else: - assert all(len(getattr(scope, 'd_%s' % i)) == 1 for i in types) + assert all(len(getattr(scope, f'd_{i}')) == 1 for i in types) # Check mode - assert getattr(dep, 'is_%s' % mode)() + assert getattr(dep, f'is_{mode}')() # Check cause if exp_cause == 'None': @@ -655,13 +655,13 @@ def test_single_eq(self, expr, expected, ti0, ti1, fa, grid): assert cause.name == exp_cause # Check mode restricted to the cause - assert getattr(dep, 'is_%s' % mode)(cause) + assert getattr(dep, f'is_{mode}')(cause) non_causes = [i for i in grid.dimensions if i is not cause] - assert all(not getattr(dep, 'is_%s' % mode)(i) for i in non_causes) + assert all(not getattr(dep, f'is_{mode}')(i) for i in non_causes) # Check if it's regular or irregular - assert getattr(dep.source, 'is_%s' % regular) or\ - getattr(dep.sink, 'is_%s' % regular) + assert getattr(dep.source, f'is_{regular}') or\ + getattr(dep.sink, f'is_{regular}') @pytest.mark.parametrize('exprs,expected', [ # Trivial flow dep @@ -723,7 +723,7 @@ def test_multiple_eqs(self, exprs, expected, ti0, ti1, ti3, fa): assert len(scope.d_all) == len(expected) for i in ['flow', 'anti', 'output']: - for dep in getattr(scope, 'd_%s' % i): + for dep in getattr(scope, f'd_{i}'): item = (dep.function.name, i, str(set(dep.cause))) assert item in expected expected.remove(item) diff --git a/tests/test_mpi.py b/tests/test_mpi.py index ede7989683..25d76fe82e 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -2541,9 +2541,9 @@ def test_nontrivial_operator(self, mode): t = grid.stepping_dim # SubDimensions to implement BCs - xl, yl = [SubDimension.left('%sl' % d.name, d, tkn) for d in [x, y]] - xi, yi = [SubDimension.middle('%si' % d.name, d, tkn, tkn) for d in [x, y]] - xr, yr = [SubDimension.right('%sr' % d.name, d, tkn) for d in [x, y]] + xl, yl = [SubDimension.left(f'{d.name}l', d, tkn) for d in [x, y]] + xi, yi = [SubDimension.middle(f'{d.name}i', d, tkn, tkn) for d in [x, y]] + xr, yr = [SubDimension.right(f'{d.name}r', d, tkn) for d in [x, y]] # Functions u = TimeFunction(name='f', grid=grid) @@ -3141,7 +3141,7 @@ def gen_serial_norms(shape, so): """ day = np.datetime64('today') try: - l = np.load("norms%s.npy" % len(shape), allow_pickle=True) + l = np.load(f"norms{len(shape)}.npy", allow_pickle=True) assert l[-1] == day except: tn = 500. # Final time @@ -3161,7 +3161,7 @@ def gen_serial_norms(shape, so): Ev = norm(v) Esrca = norm(srca) - np.save("norms%s.npy" % len(shape), (Eu, Erec, Ev, Esrca, day), allow_pickle=True) + np.save(f"norms{len(shape)}.npy", (Eu, Erec, Ev, Esrca, day), allow_pickle=True) class TestIsotropicAcoustic: diff --git a/tests/test_operator.py b/tests/test_operator.py index b05aa642b8..660192d52d 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -761,8 +761,7 @@ def verify_arguments(self, arguments, expected): condition = arguments[name] == v if not condition: - error('Wrong argument %s: expected %s, got %s' % - (name, v, arguments[name])) + error(f'Wrong argument {name}: expected {v}, got {arguments[name]}') assert condition def verify_parameters(self, parameters, expected): @@ -774,11 +773,11 @@ def verify_parameters(self, parameters, expected): parameters = [p.name for p in parameters] for expi in expected: if expi not in parameters + boilerplate: - error("Missing parameter: %s" % expi) + error(f"Missing parameter: {expi}") assert expi in parameters + boilerplate extra = [p for p in parameters if p not in expected and p not in boilerplate] if len(extra) > 0: - error("Redundant parameters: %s" % str(extra)) + error(f"Redundant parameters: {str(extra)}") assert len(extra) == 0 def test_default_functions(self): diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 018720a330..09b18031fe 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -245,10 +245,7 @@ def test_sparse_op(self, pickle, interp, op): interpolation=interp) u = Function(name='u', grid=grid, space_order=4) - if op == 'inject': - expr = sf.inject(u, sf) - else: - expr = sf.interpolate(u) + expr = sf.inject(u, sf) if op == 'inject' else sf.interpolate(u) pkl_expr = pickle.dumps(expr) new_expr = pickle.loads(pkl_expr) diff --git a/tests/test_symbolic_coefficients.py b/tests/test_symbolic_coefficients.py index cc540691a8..7f4e2e2f29 100644 --- a/tests/test_symbolic_coefficients.py +++ b/tests/test_symbolic_coefficients.py @@ -241,10 +241,7 @@ def test_with_timefunction(self, stagger): """Check compatibility of custom coefficients and TimeFunctions""" grid = Grid(shape=(11,), extent=(10.,)) x = grid.dimensions[0] - if stagger: - staggered = x - else: - staggered = None + staggered = x if stagger else None f = TimeFunction(name='f', grid=grid, space_order=2, staggered=staggered) g = TimeFunction(name='g', grid=grid, space_order=2, staggered=staggered) diff --git a/tests/test_tensors.py b/tests/test_tensors.py index dee3c40696..8f9d890a2c 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -300,7 +300,7 @@ def test_shifted_grad_of_vector(shift, ndim): for j, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i], 'd%s' % d.name)(x0=x0, fd_order=order) + ge = getattr(f[i], f'd{d.name}')(x0=x0, fd_order=order) ref.append(ge.evaluate) for i, d in enumerate(gf): @@ -317,7 +317,7 @@ def test_shifted_div_of_vector(shift, ndim): for i, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(v[i], 'd%s' % d.name)(x0=x0, fd_order=order) + ref += getattr(v[i], f'd{d.name}')(x0=x0, fd_order=order) assert df == ref.evaluate @@ -336,7 +336,7 @@ def test_shifted_div_of_tensor(shift, ndim): for j, d in reversed(list(enumerate(grid.dimensions))): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i, j], 'd%s' % d.name)(x0=x0, fd_order=order) + ge = getattr(f[i, j], f'd{d.name}')(x0=x0, fd_order=order) elems.append(ge.evaluate) ref.append(sum(elems)) @@ -388,7 +388,7 @@ def test_shifted_lap_of_tensor(shift, ndim): for i, d in enumerate(v.space_dimensions): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(v[j, i], 'd%s2' % d.name)(x0=x0, fd_order=order) + ref += getattr(v[j, i], f'd{d.name}2')(x0=x0, fd_order=order) assert df[j] == ref diff --git a/tests/test_tti.py b/tests/test_tti.py index 698df07f79..ecf4805111 100644 --- a/tests/test_tti.py +++ b/tests/test_tti.py @@ -73,5 +73,5 @@ def test_tti(shape, so, rot): res = linalg.norm((normal_u - normal_utti - normal_vtti).reshape(-1))**2 res /= np.linalg.norm(normal_u.reshape(-1))**2 - log("Difference between acoustic and TTI with all coefficients to 0 %2.4e" % res) + log(f"Difference between acoustic and TTI with all coefficients to 0 {res:2.4e}") assert np.isclose(res, 0.0, atol=1e-4) From 0ab3cb1c5137f0631c5ba91a1c1be8171b286ef5 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Sun, 4 Jan 2026 23:24:54 +0000 Subject: [PATCH 29/42] misc: Manually lint examples, tests, scripts, etc --- benchmarks/regression/benchmarks/arguments.py | 6 +- benchmarks/user/advisor/run_advisor.py | 3 +- benchmarks/user/benchmark.py | 38 +-- conftest.py | 29 +-- examples/.ruff.toml | 6 + examples/cfd/01_convection.ipynb | 5 +- examples/cfd/01_convection_revisited.ipynb | 2 +- examples/cfd/02_convection_nonlinear.ipynb | 2 +- examples/cfd/03_diffusion.ipynb | 2 +- examples/cfd/03_diffusion_nonuniform.ipynb | 3 +- examples/cfd/06_poisson.ipynb | 2 +- examples/cfd/07_cavity_flow.ipynb | 4 +- examples/cfd/08_shallow_water_equation.ipynb | 9 +- examples/cfd/09_Darcy_flow_equation.ipynb | 7 +- examples/cfd/example_diffusion.py | 24 +- examples/cfd/tools.py | 5 +- examples/finance/bs_ivbp.ipynb | 4 +- .../performance/02_advisor_roofline.ipynb | 23 +- .../seismic/abc_methods/01_introduction.ipynb | 74 +++--- examples/seismic/abc_methods/02_damping.ipynb | 94 ++++--- examples/seismic/abc_methods/03_pml.ipynb | 109 +++++--- examples/seismic/abc_methods/04_habc.ipynb | 236 ++++++++++++------ examples/seismic/acoustic/accuracy.ipynb | 73 ++++-- examples/seismic/acoustic/acoustic_example.py | 6 +- examples/seismic/elastic/elastic_example.py | 7 +- examples/seismic/inversion/fwi.py | 2 +- examples/seismic/model.py | 14 +- examples/seismic/plotting.py | 2 +- .../sa_01_iso_implementation1.ipynb | 20 +- .../sa_02_iso_implementation2.ipynb | 18 +- .../self_adjoint/sa_03_iso_correctness.ipynb | 59 +++-- examples/seismic/self_adjoint/test_utils.py | 7 +- .../self_adjoint/test_wavesolver_iso.py | 76 +++--- examples/seismic/source.py | 7 +- examples/seismic/test_seismic_utils.py | 9 +- examples/seismic/tti/tti_example.py | 5 +- examples/seismic/tutorials/02_rtm.ipynb | 2 +- examples/seismic/tutorials/03_fwi.ipynb | 2 +- examples/seismic/tutorials/04_dask.ipynb | 4 +- .../seismic/tutorials/04_dask_pickling.ipynb | 46 ++-- .../06_elastic_varying_parameters.ipynb | 9 +- .../tutorials/07.1_dispersion_relation.ipynb | 83 +++--- .../seismic/tutorials/07_DRP_schemes.ipynb | 17 +- .../seismic/tutorials/08_snapshotting.ipynb | 50 ++-- .../seismic/tutorials/12_time_blocking.ipynb | 200 +++++++-------- .../seismic/tutorials/13_LSRTM_acoustic.ipynb | 2 +- .../tutorials/14_creating_synthetics.ipynb | 16 +- .../seismic/tutorials/15_tti_qp_pure.ipynb | 14 +- examples/seismic/utils.py | 13 +- examples/seismic/viscoacoustic/operators.py | 2 +- .../viscoacoustic/viscoacoustic_example.py | 6 +- examples/seismic/viscoacoustic/wavesolver.py | 2 +- .../viscoelastic/viscoelastic_example.py | 6 +- examples/timestepping/ic_superstep.py | 2 +- examples/timestepping/superstep.ipynb | 10 +- examples/userapi/00_sympy.ipynb | 4 +- examples/userapi/02_apply.ipynb | 5 +- examples/userapi/03_subdomains.ipynb | 13 +- examples/userapi/04_boundary_conditions.ipynb | 2 +- .../userapi/05_conditional_dimension.ipynb | 2 +- .../userapi/07_functions_on_subdomains.ipynb | 7 +- requirements-testing.txt | 1 - tests/test_adjoint.py | 10 +- tests/test_benchmark.py | 13 +- tests/test_builtins.py | 4 +- tests/test_caching.py | 6 +- tests/test_cse.py | 10 +- tests/test_data.py | 103 +++++--- tests/test_derivatives.py | 8 +- tests/test_differentiable.py | 11 +- tests/test_dimension.py | 29 ++- tests/test_dle.py | 28 +-- tests/test_dse.py | 16 +- tests/test_dtypes.py | 4 +- tests/test_gpu_common.py | 2 +- tests/test_gpu_openacc.py | 8 +- tests/test_gpu_openmp.py | 29 ++- tests/test_gradient.py | 4 +- tests/test_iet.py | 6 +- tests/test_interpolation.py | 6 +- tests/test_ir.py | 52 ++-- tests/test_mpi.py | 51 ++-- tests/test_operator.py | 40 +-- tests/test_pickle.py | 30 ++- tests/test_sparse.py | 6 +- tests/test_staggered_utils.py | 6 +- tests/test_subdomains.py | 14 +- tests/test_symbolics.py | 2 +- tests/test_tensors.py | 26 +- tests/test_threading.py | 4 +- tests/test_unexpansion.py | 22 +- tests/test_visitors.py | 2 +- tests/test_warnings.py | 10 +- 93 files changed, 1239 insertions(+), 835 deletions(-) create mode 100644 examples/.ruff.toml diff --git a/benchmarks/regression/benchmarks/arguments.py b/benchmarks/regression/benchmarks/arguments.py index 662ccec7eb..35028db8f9 100644 --- a/benchmarks/regression/benchmarks/arguments.py +++ b/benchmarks/regression/benchmarks/arguments.py @@ -10,9 +10,9 @@ class Processing: def setup(self): grid = Grid(shape=(5, 5, 5)) - funcs = [Function(name='f%d' % n, grid=grid) for n in range(30)] - tfuncs = [TimeFunction(name='u%d' % n, grid=grid) for n in range(30)] - stfuncs = [SparseTimeFunction(name='su%d' % n, grid=grid, npoint=1, nt=100) + funcs = [Function(name=f'f{n}', grid=grid) for n in range(30)] + tfuncs = [TimeFunction(name=f'u{n}', grid=grid) for n in range(30)] + stfuncs = [SparseTimeFunction(name=f'su{n}', grid=grid, npoint=1, nt=100) for n in range(30)] v = TimeFunction(name='v', grid=grid, space_order=2) diff --git a/benchmarks/user/advisor/run_advisor.py b/benchmarks/user/advisor/run_advisor.py index 23fdaa1f75..8d20607da4 100644 --- a/benchmarks/user/advisor/run_advisor.py +++ b/benchmarks/user/advisor/run_advisor.py @@ -140,8 +140,7 @@ def run_with_advisor(path, output, name, exec_args): advixe_logger.setLevel(logging.INFO) advixe_formatter = logging.Formatter('%(asctime)s: %(message)s') - logger_datetime = '%d.%d.%d.%d.%d.%d' % (dt.year, dt.month, - dt.day, dt.hour, dt.minute, dt.second) + logger_datetime = f'{dt.year}.{dt.month}.{dt.day}.{dt.hour}.{dt.minute}.{dt.second}' advixe_handler = logging.FileHandler(f'{output}/{name}_{logger_datetime}.log') advixe_handler.setFormatter(advixe_formatter) advixe_logger.addHandler(advixe_handler) diff --git a/benchmarks/user/benchmark.py b/benchmarks/user/benchmark.py index 4773f94dbb..f2f29ffe6a 100644 --- a/benchmarks/user/benchmark.py +++ b/benchmarks/user/benchmark.py @@ -1,4 +1,5 @@ import os +from contextlib import suppress import click import numpy as np @@ -73,8 +74,10 @@ def run_op(solver, operator, **options): # Get the operator if exist try: op = getattr(solver, operator) - except AttributeError: - raise AttributeError(f"Operator {operator} not implemented for {solver}") + except AttributeError as e: + raise AttributeError( + f"Operator {operator} not implemented for {solver}" + ) from e # This is a bit ugly but not sure how to make clean input creation for different op if operator == "forward": @@ -164,7 +167,10 @@ def from_opt(ctx, param, value): # E.g. `'advanced'` opt = value if opt not in configuration._accepted['opt']: - raise click.BadParameter("Invalid choice `{}` (choose from {})".format(opt, str(configuration._accepted['opt']))) + raise click.BadParameter( + f'Invalid choice `{opt} ' + f'(choose from {str(configuration._accepted["opt"])})' + ) return value def config_blockshape(ctx, param, value): @@ -180,7 +186,7 @@ def config_blockshape(ctx, param, value): # 1. integers, not strings # 2. sanity check the (hierarchical) blocking shape normalized_value = [] - for i, block_shape in enumerate(value): + for block_shape in value: # If hierarchical blocking is activated, say with N levels, here in # `bs` we expect to see 3*N entries bs = [int(x) for x in block_shape.split()] @@ -204,7 +210,10 @@ def config_autotuning(ctx, param, value): elif value != 'off': # Sneak-peek at the `block-shape` -- if provided, keep auto-tuning off if ctx.params['block_shape']: - warning("Skipping autotuning (using explicit block-shape `{}`)".format(str(ctx.params['block_shape']))) + warning( + 'Skipping autotuning' + f'(using explicit block-shape `{str(ctx.params["block_shape"])}`)' + ) level = False else: # Make sure to always run in preemptive mode @@ -272,8 +281,8 @@ def run(problem, **kwargs): # Note: the following piece of code is horribly *hacky*, but it works for now for i, block_shape in enumerate(block_shapes): for n, level in enumerate(block_shape): - for d, s in zip(['x', 'y', 'z'], level): - options['%s%d_blk%d_size' % (d, i, n)] = s + for d, s in zip(['x', 'y', 'z'], level, strict=True): + options[f'{d}{i}_blk{n}_size'] = s solver = setup(space_order=space_order, time_order=time_order, **kwargs) if warmup: @@ -345,7 +354,7 @@ def run_jit_backdoor(problem, **kwargs): if not os.path.exists(cfile): # First time we run this problem, let's generate and jit-compile code - op.cfunction + _ = op.cfunction info(f"You may now edit the generated code in `{cfile}`. " "Then save the file, and re-run this benchmark.") return @@ -403,9 +412,10 @@ def test(problem, **kwargs): set_log_level('DEBUG', comm=MPI.COMM_WORLD) if MPI.COMM_WORLD.size > 1 and not configuration['mpi']: - warning("It seems that you're running over MPI with %d processes, but " - "DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`..." - % MPI.COMM_WORLD.size) + warning( + f'It seems that you are running over MPI with {MPI.COMM_WORLD.size} ' + 'processes, but DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`...' + ) configuration['mpi'] = 'basic' except (TypeError, ModuleNotFoundError): # MPI not available @@ -417,8 +427,6 @@ def test(problem, **kwargs): benchmark(standalone_mode=False) - try: + # In case MPI not available + with suppress(TypeError): MPI.Finalize() - except TypeError: - # MPI not available - pass diff --git a/conftest.py b/conftest.py index 7933978f1a..65bfddb71c 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ import os import sys +from contextlib import suppress from subprocess import check_call import pytest @@ -51,17 +52,18 @@ def skipif(items, whole_module=False): langs = configuration._accepted['language'] if any(i == f'device-{l}' and configuration['language'] == l for l in langs)\ and isinstance(configuration['platform'], Device): - skipit = "language `{}` for device unsupported".format(configuration['language']) + skipit = f'language `{configuration["language"]}` for device unsupported' break if any(i == f'device-{k}' and isinstance(configuration['compiler'], v) for k, v in compiler_registry.items()) and\ isinstance(configuration['platform'], Device): - skipit = "compiler `{}` for device unsupported".format(configuration['compiler']) + skipit = f'compiler `{configuration["compiler"]}` for device unsupported' break # Skip if must run on GPUs but not currently on a GPU if i in ('nodevice', 'nodevice-omp', 'nodevice-acc') and\ not isinstance(configuration['platform'], Device): - skipit = ("must run on device, but currently on `{}`".format(configuration['platform'].name)) + skipit = 'must run on device, but currently on ' + skipit += f'`{configuration["platform"].name}`' break # Skip if it won't run with nvc on CPU backend if i == 'cpu64-nvc' and \ @@ -186,16 +188,11 @@ def parallel(item, m): testname = get_testname(item) # Only spew tracebacks on rank 0. # Run xfailing tests to ensure that errors are reported to calling process - args = [ - "-n", "1", pyversion, "-m", "pytest", "-s", "--runxfail", "-v", - "--timeout=600", "--timeout-method=thread", "-o faulthandler_timeout=660", - testname - ] + args = ["-n", "1", pyversion, "-m", "pytest", "-s", "--runxfail", "-qq", testname] if nprocs > 1: args.extend([ - ":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", - "-s", "--runxfail", "-v", "--timeout=600", "--timeout-method=thread", - "-o faulthandler_timeout=660", testname + ":", "-n", str(nprocs - 1), pyversion, "-m", "pytest", + "-s", "--runxfail", "-v", "--no-summary", testname ]) # OpenMPI requires an explicit flag for oversubscription. We need it as some # of the MPI tests will spawn lots of processes @@ -253,10 +250,8 @@ def pytest_generate_tests(metafunc): @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_call(item): inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0) - try: + with suppress(ValueError): inside_pytest_marker = int(inside_pytest_marker) - except ValueError: - pass if inside_pytest_marker: outcome = yield @@ -287,15 +282,13 @@ def pytest_runtest_makereport(item, call): result = outcome.get_result() inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0) - try: + with suppress(ValueError): inside_pytest_marker = int(inside_pytest_marker) - except ValueError: - pass if inside_pytest_marker: return if item.get_closest_marker("parallel") or \ - item.get_closest_marker("decoupler"): + item.get_closest_marker("decoupler"): # noqa: SIM102 if call.when == 'call' and result.outcome == 'skipped': result.outcome = 'passed' diff --git a/examples/.ruff.toml b/examples/.ruff.toml new file mode 100644 index 0000000000..f517388941 --- /dev/null +++ b/examples/.ruff.toml @@ -0,0 +1,6 @@ +# Extend the `pyproject.toml` file in the parent directory +extend = "../pyproject.toml" + +# Use a different line length for examples only +# TODO: Shorten line lengths in examples +line-length = 120 diff --git a/examples/cfd/01_convection.ipynb b/examples/cfd/01_convection.ipynb index 8e4cf8e8e4..13ee3ac72e 100644 --- a/examples/cfd/01_convection.ipynb +++ b/examples/cfd/01_convection.ipynb @@ -109,7 +109,7 @@ "# Repeat initialisation, so we can re-run the cell\n", "init_hat(field=u, dx=dx, dy=dy, value=2.)\n", "\n", - "for n in range(nt + 1):\n", + "for _ in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", "\n", @@ -122,7 +122,8 @@ " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", " u[:, -1] = 1. # top\n", - " # Note that in the above expressions the NumPy index -1 corresponds to the final point of the array along the indexed dimension,\n", + " # Note that in the above expressions the NumPy index -1 corresponds to the\n", + " # final point of the array along the indexed dimension,\n", " # i.e. here u[-1, :] is equivalent to u[80, :].\n" ] }, diff --git a/examples/cfd/01_convection_revisited.ipynb b/examples/cfd/01_convection_revisited.ipynb index b05445a07d..ae762a6d16 100644 --- a/examples/cfd/01_convection_revisited.ipynb +++ b/examples/cfd/01_convection_revisited.ipynb @@ -103,7 +103,7 @@ "# Repeat initialisation, so we can re-run the cell\n", "init_smooth(field=u, dx=dx, dy=dy)\n", "\n", - "for n in range(nt + 1):\n", + "for _ in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", "\n", diff --git a/examples/cfd/02_convection_nonlinear.ipynb b/examples/cfd/02_convection_nonlinear.ipynb index 8181eca6f8..9291b1444f 100644 --- a/examples/cfd/02_convection_nonlinear.ipynb +++ b/examples/cfd/02_convection_nonlinear.ipynb @@ -110,7 +110,7 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "for n in range(nt + 1): # loop across number of time steps\n", + "for _ in range(nt + 1): # loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", " u[1:, 1:] = (un[1:, 1:] -\n", diff --git a/examples/cfd/03_diffusion.ipynb b/examples/cfd/03_diffusion.ipynb index 14d786e4f8..5406ba5861 100644 --- a/examples/cfd/03_diffusion.ipynb +++ b/examples/cfd/03_diffusion.ipynb @@ -57,7 +57,7 @@ "outputs": [], "source": [ "def diffuse(u, nt):\n", - " for n in range(nt + 1):\n", + " for _ in range(nt + 1):\n", " un = u.copy()\n", " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " nu * dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", diff --git a/examples/cfd/03_diffusion_nonuniform.ipynb b/examples/cfd/03_diffusion_nonuniform.ipynb index 3e320ea717..09121abc22 100644 --- a/examples/cfd/03_diffusion_nonuniform.ipynb +++ b/examples/cfd/03_diffusion_nonuniform.ipynb @@ -80,7 +80,7 @@ "outputs": [], "source": [ "def diffuse(u, nt, visc):\n", - " for n in range(nt + 1):\n", + " for _ in range(nt + 1):\n", " un = u.copy()\n", " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " visc*dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", @@ -257,7 +257,6 @@ ], "source": [ "from devito import Grid, TimeFunction, Eq, solve, Function\n", - "from sympy.abc import a\n", "\n", "# Initialize `u` for space order 2\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", diff --git a/examples/cfd/06_poisson.ipynb b/examples/cfd/06_poisson.ipynb index edb8759681..eff42e3a5b 100644 --- a/examples/cfd/06_poisson.ipynb +++ b/examples/cfd/06_poisson.ipynb @@ -81,7 +81,7 @@ "source": [ "%%time\n", "# NBVAL_IGNORE_OUTPUT\n", - "for it in range(nt):\n", + "for _ in range(nt):\n", " pd = p.copy()\n", " p[1:-1, 1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", " (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -\n", diff --git a/examples/cfd/07_cavity_flow.ipynb b/examples/cfd/07_cavity_flow.ipynb index 0832cbdd57..132305efaa 100644 --- a/examples/cfd/07_cavity_flow.ipynb +++ b/examples/cfd/07_cavity_flow.ipynb @@ -176,7 +176,7 @@ " pn = np.empty_like(p)\n", " pn = p.copy()\n", "\n", - " for q in range(nit):\n", + " for _ in range(nit):\n", " pn = p.copy()\n", " p[1:-1, 1:-1] = (((pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dy**2 +\n", " (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dx**2) /\n", @@ -211,7 +211,7 @@ " vn = np.empty_like(v)\n", " b = np.zeros((nx, ny))\n", "\n", - " for n in range(0, nt):\n", + " for _ in range(0, nt):\n", " un = u.copy()\n", " vn = v.copy()\n", "\n", diff --git a/examples/cfd/08_shallow_water_equation.ipynb b/examples/cfd/08_shallow_water_equation.ipynb index 467972c0bb..14c041cc71 100644 --- a/examples/cfd/08_shallow_water_equation.ipynb +++ b/examples/cfd/08_shallow_water_equation.ipynb @@ -90,7 +90,7 @@ " Operator that solves the equations expressed above.\n", " It computes and returns the discharge fluxes M, N and wave height eta from\n", " the 2D Shallow water equation using the FTCS finite difference method.\n", - " \n", + "\n", " Parameters\n", " ----------\n", " eta : TimeFunction\n", @@ -113,7 +113,7 @@ " animations.\n", " \"\"\"\n", "\n", - " eps = np.finfo(grid.dtype).eps\n", + " # eps = np.finfo(grid.dtype).eps\n", "\n", " # Friction term expresses the loss of amplitude from the friction with the seafloor\n", " frictionTerm = g * alpha**2 * sqrt(M**2 + N**2) / D**(7./3.)\n", @@ -121,7 +121,10 @@ " # System of equations\n", " pde_eta = Eq(eta.dt + M.dxc + N.dyc)\n", " pde_M = Eq(M.dt + (M**2/D).dxc + (M*N/D).dyc + g*D*eta.forward.dxc + frictionTerm*M)\n", - " pde_N = Eq(N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N)\n", + " pde_N = Eq(\n", + " N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc\n", + " + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N\n", + " )\n", "\n", " stencil_eta = solve(pde_eta, eta.forward)\n", " stencil_M = solve(pde_M, M.forward)\n", diff --git a/examples/cfd/09_Darcy_flow_equation.ipynb b/examples/cfd/09_Darcy_flow_equation.ipynb index fe4979ccc0..2a90cf92c1 100644 --- a/examples/cfd/09_Darcy_flow_equation.ipynb +++ b/examples/cfd/09_Darcy_flow_equation.ipynb @@ -109,7 +109,7 @@ " self.sqrt_eig[0, 0] = 0.0\n", "\n", " self.size = []\n", - " for j in range(self.dim):\n", + " for _ in range(self.dim):\n", " self.size.append(size)\n", "\n", " self.size = tuple(self.size)\n", @@ -365,8 +365,8 @@ "'''\n", "Function to generate 'u' from 'a' using Devito\n", "\n", - "parameters \n", - "-----------------\n", + "Parameters\n", + "----------\n", "perm: Array of size (s, s)\n", " This is \"a\"\n", "f: Array of size (s, s)\n", @@ -375,7 +375,6 @@ "\n", "\n", "def darcy_flow_2d(perm, f):\n", - "\n", " # a(x) is the coefficients\n", " # f is the forcing function\n", " # initialize a, f with inputs permeability and forcing\n", diff --git a/examples/cfd/example_diffusion.py b/examples/cfd/example_diffusion.py index ccaf5f3faf..d4fc284ca0 100644 --- a/examples/cfd/example_diffusion.py +++ b/examples/cfd/example_diffusion.py @@ -51,8 +51,10 @@ def execute_python(ui, spacing=0.01, a=0.5, timesteps=500): uyy = (u[t0, i, j+1] - 2*u[t0, i, j] + u[t0, i, j-1]) / dy2 u[t1, i, j] = u[t0, i, j] + dt * a * (uxx + uyy) runtime = time.time() - tstart - log("Python: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Python: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -73,8 +75,10 @@ def execute_numpy(ui, spacing=0.01, a=0.5, timesteps=500): uyy = (u[t0, 1:-1, 2:] - 2*u[t0, 1:-1, 1:-1] + u[t0, 1:-1, :-2]) / dy2 u[t1, 1:-1, 1:-1] = u[t0, 1:-1, 1:-1] + a * dt * (uxx + uyy) runtime = time.time() - tstart - log("Numpy: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Numpy: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -108,8 +112,10 @@ def diffusion_stencil(): u[t0, :-2, 1:-1], u[t0, 1:-1, 2:], u[t0, 1:-1, :-2], dt, spacing) runtime = time.time() - tstart - log("Lambdify: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Lambdify: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -133,8 +139,10 @@ def execute_devito(ui, spacing=0.01, a=0.5, timesteps=500): tstart = time.time() op.apply(u=u, t=timesteps, dt=dt) runtime = time.time() - tstart - log("Devito: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Devito: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u.data[1, :], runtime diff --git a/examples/cfd/tools.py b/examples/cfd/tools.py index 5221176581..76daeed19b 100644 --- a/examples/cfd/tools.py +++ b/examples/cfd/tools.py @@ -29,9 +29,8 @@ def plot_field(field, xmin=0., xmax=2., ymin=0., ymax=2., zmin=None, zmax=None, elif(zmin is None and zmax is not None): if np.min(field) >= zmax: warning("zmax is less than field's minima. Figure deceptive.") - elif(zmin is not None and zmax is None): - if np.max(field) <= zmin: - warning("zmin is larger than field's maxima. Figure deceptive.") + elif(zmin is not None and zmax is None) and np.max(field) <= zmin: + warning("zmin is larger than field's maxima. Figure deceptive.") x_coord = np.linspace(xmin, xmax, field.shape[0]) y_coord = np.linspace(ymin, ymax, field.shape[1]) fig = pyplot.figure(figsize=(11, 7), dpi=100) diff --git a/examples/finance/bs_ivbp.ipynb b/examples/finance/bs_ivbp.ipynb index 44457bcda0..807054aa2b 100644 --- a/examples/finance/bs_ivbp.ipynb +++ b/examples/finance/bs_ivbp.ipynb @@ -519,8 +519,8 @@ "\n", "endBF = timer.time()\n", "\n", - "print(\"devito pde timesteps: %12.6s, %12.6fs runtime\" % (nt-1, endDevito - startDevito))\n", - "print(\"call_value_bs timesteps: %12.6s, %12.6fs runtime\" % (len(time), endBF - startBF))\n", + "print(f\"devito pde timesteps: {nt - 1}, {endDevito - startDevito:12.6f}s runtime\")\n", + "print(f\"call_value_bs timesteps: {len(time)}, {endBF - startBF:12.6f}s runtime\")\n", "\n", "s2 = np.linspace(smin, smax, shape[0])\n", "plt.figure(figsize=(12, 10))\n", diff --git a/examples/performance/02_advisor_roofline.ipynb b/examples/performance/02_advisor_roofline.ipynb index aeb3c5aa2d..2fbc63174d 100644 --- a/examples/performance/02_advisor_roofline.ipynb +++ b/examples/performance/02_advisor_roofline.ipynb @@ -108,8 +108,12 @@ ], "source": [ "# NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py --path $DEVITO_JUPYTER/benchmarks/user/benchmark.py --exec-args \"run -P acoustic -d 64 64 64 -so 4 --tn 50 --autotune off\" --output $DEVITO_JUPYTER/examples/performance/profilings --name JupyterProfiling\n" + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py \\\n", + " --path $DEVITO_JUPYTER/benchmarks/user/benchmark.py \\\n", + " --exec-args \"run -P acoustic -d 64 64 64 -so 4 --tn 50 --autotune off\" \\\n", + " --output $DEVITO_JUPYTER/examples/performance/profilings \\\n", + " --name JupyterProfiling" ] }, { @@ -167,8 +171,12 @@ ], "source": [ "# NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode overview --name $DEVITO_JUPYTER/examples/performance/resources/OverviewRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py \\\n", + " --mode overview \\\n", + " --name $DEVITO_JUPYTER/examples/performance/resources/OverviewRoof \\\n", + " --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling \\\n", + " $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py" ] }, { @@ -231,8 +239,11 @@ ], "source": [ "# NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode top-loops --name $DEVITO_JUPYTER/examples/performance/resources/TopLoopsRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py \\\n", + " --mode top-loops \\\n", + " --name $DEVITO_JUPYTER/examples/performance/resources/TopLoopsRoof \\\n", + " --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling \\\n" ] }, { diff --git a/examples/seismic/abc_methods/01_introduction.ipynb b/examples/seismic/abc_methods/01_introduction.ipynb index b470078b62..65cd36e38b 100644 --- a/examples/seismic/abc_methods/01_introduction.ipynb +++ b/examples/seismic/abc_methods/01_introduction.ipynb @@ -357,22 +357,22 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel)\n", - " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(vel), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(vel), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -561,7 +561,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -1026,22 +1034,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec)/10.\n", - " extent = [fscaled*x0, fscaled*x1, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec)/10.\n", + " extent = [fscaled*x0, fscaled*x1, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/02_damping.ipynb b/examples/seismic/abc_methods/02_damping.ipynb index 3ade379331..1c21b1e464 100644 --- a/examples/seismic/abc_methods/02_damping.ipynb +++ b/examples/seismic/abc_methods/02_damping.ipynb @@ -390,7 +390,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -423,10 +429,12 @@ "pzm = 0\n", "\n", "for i in range(0, nptx):\n", - " if(X0[i] == xm): pxm = i\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", "for j in range(0, nptz):\n", - " if(Z0[j] == zm): pzm = j\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", @@ -450,22 +458,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", - " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -596,7 +610,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -1155,22 +1177,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile with Damping - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile with Damping - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/03_pml.ipynb b/examples/seismic/abc_methods/03_pml.ipynb index 2f19230259..b8a346dfeb 100644 --- a/examples/seismic/abc_methods/03_pml.ipynb +++ b/examples/seismic/abc_methods/03_pml.ipynb @@ -386,7 +386,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -420,10 +426,12 @@ "pzm = 0\n", "\n", "for i in range(0, nptx):\n", - " if(X0[i] == xm): pxm = i\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", "for j in range(0, nptz):\n", - " if(Z0[j] == zm): pzm = j\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", @@ -452,22 +460,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", - " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -584,7 +598,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -1129,9 +1151,18 @@ "metadata": {}, "outputs": [], "source": [ - "stencil02 = [Eq(u.forward, solve(pde02, u.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]\n", - "stencil1 = [Eq(phi1.forward, solve(pde1, phi1.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]\n", - "stencil2 = [Eq(phi2.forward, solve(pde2, phi2.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]" + "stencil02 = [\n", + " Eq(u.forward, solve(pde02, u.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]\n", + "stencil1 = [\n", + " Eq(phi1.forward, solve(pde1, phi1.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]\n", + "stencil2 = [\n", + " Eq(phi2.forward, solve(pde2, phi2.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]" ] }, { @@ -1324,22 +1355,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile with PML - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile with PML - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/04_habc.ipynb b/examples/seismic/abc_methods/04_habc.ipynb index 75a563b1ba..7ba4148b02 100644 --- a/examples/seismic/abc_methods/04_habc.ipynb +++ b/examples/seismic/abc_methods/04_habc.ipynb @@ -443,7 +443,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain, d1_domain, d2_domain, d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -469,10 +475,12 @@ "pzm = 0\n", "\n", "for i in range(0, nptx):\n", - " if(X0[i] == xm): pxm = i\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", "for j in range(0, nptz):\n", - " if(Z0[j] == zm): pzm = j\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", @@ -496,22 +504,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", - " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx, 0:-npmlz]), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -635,7 +649,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src', grid=grid, f0=f0, npoint=nsource, time_range=time_range, staggered=NODE, dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -1136,7 +1158,11 @@ " cte41 = (1/(dt**2))\n", " cte51 = (1/(4*hz**2))*vel[x, z]**2\n", "\n", - " aux1 = (cte21*(u3[x+1, z] + u1[x, z]) + cte31*u1[x+1, z] + cte41*(u2[x, z]+u2[x+1, z]) + cte51*(u3[x+1, z+1] + u3[x+1, z-1] + u1[x, z+1] + u1[x, z-1]))/cte11\n", + " aux1 = (\n", + " cte21*(u3[x+1, z] + u1[x, z])\n", + " + cte31*u1[x+1, z] + cte41*(u2[x, z]+u2[x+1, z])\n", + " + cte51*(u3[x+1, z+1] + u3[x+1, z-1] + u1[x, z+1] + u1[x, z-1])\n", + " )/cte11\n", " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", @@ -1147,7 +1173,11 @@ " cte42 = (1/(dt**2))\n", " cte52 = (1/(4*hz**2))*vel[x, z]*vel[x, z]\n", "\n", - " aux2 = (cte22*(u3[x-1, z] + u1[x, z]) + cte32*u1[x-1, z] + cte42*(u2[x, z]+u2[x-1, z]) + cte52*(u3[x-1, z+1] + u3[x-1, z-1] + u1[x, z+1] + u1[x, z-1]))/cte12\n", + " aux2 = (\n", + " cte22*(u3[x-1, z] + u1[x, z])\n", + " + cte32*u1[x-1, z] + cte42*(u2[x, z]+u2[x-1, z])\n", + " + cte52*(u3[x-1, z+1] + u3[x-1, z-1] + u1[x, z+1] + u1[x, z-1])\n", + " )/cte12\n", " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", @@ -1158,31 +1188,46 @@ " cte43 = (1/(dt**2))\n", " cte53 = (1/(4*hx**2))*vel[x, z]*vel[x, z]\n", "\n", - " aux3 = (cte23*(u3[x, z-1] + u1[x, z]) + cte33*u1[x, z-1] + cte43*(u2[x, z]+u2[x, z-1]) + cte53*(u3[x+1, z-1] + u3[x-1, z-1] + u1[x+1, z] + u1[x-1, z]))/cte13\n", + " aux3 = (\n", + " cte23*(u3[x, z-1] + u1[x, z])\n", + " + cte33*u1[x, z-1] + cte43*(u2[x, z]+u2[x, z-1])\n", + " + cte53*(u3[x+1, z-1] + u3[x-1, z-1] + u1[x+1, z] + u1[x-1, z])\n", + " )/cte13\n", " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])\n", "\n", " # Red point right side\n", - " stencil4 = [Eq(u[t+1, nptx-1-k, nptz-1-k], (1-weightsz[nptx-1-k, nptz-1-k])*u3[nptx-1-k, nptz-1-k] +\n", - " weightsz[nptx-1-k, nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-1-k, nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt)))) for k in range(0, npmlz)]\n", + " stencil4 = [\n", + " Eq(\n", + " u[t+1, nptx-1-k, nptz-1-k],\n", + " (1-weightsz[nptx-1-k, nptz-1-k])*u3[nptx-1-k, nptz-1-k]\n", + " + weightsz[nptx-1-k, nptz-1-k]*(((-(1/(4*hx))\n", + " + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt)))) for k in range(0, npmlz)\n", + " ]\n", "\n", " # Red point left side\n", - " stencil5 = [Eq(u[t+1, k, nptz-1-k], (1-weightsx[k, nptz-1-k])*u3[k, nptz-1-k]\n", - " + weightsx[k, nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k, nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt)))) for k in range(0, npmlx)]" + " stencil5 = [\n", + " Eq(\n", + " u[t+1, k, nptz-1-k],\n", + " (1-weightsx[k, nptz-1-k])*u3[k, nptz-1-k]\n", + " + weightsx[k, nptz-1-k]*(((-(1/(4*hx))\n", + " + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt)))\n", + " ) for k in range(0, npmlx)\n", + " ]" ] }, { @@ -1228,9 +1273,16 @@ " c231 = gama221 - gama231\n", " c241 = -gama221 - gama241\n", "\n", - " aux1 = (u2[x, z]*(-c111*c221-c121*c211) + u3[x+1, z]*(-c111*c231-c131*c211) + u2[x+1, z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", - " + u1[x, z]*(-c121*c221) + u1[x+1, z]*(-c121*c241-c141*c221) + u3[x+2, z]*(-c131*c231) +u2[x+2, z]*(-c131*c241-c141*c231)\n", - " + u1[x+2, z]*(-c141*c241))/(c111*c211)\n", + " aux1 = (\n", + " u2[x, z]*(-c111*c221-c121*c211)\n", + " + u3[x+1, z]*(-c111*c231-c131*c211)\n", + " + u2[x+1, z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", + " + u1[x, z]*(-c121*c221)\n", + " + u1[x+1, z]*(-c121*c241-c141*c221)\n", + " + u3[x+2, z]*(-c131*c231)\n", + " + u2[x+2, z]*(-c131*c241-c141*c231)\n", + " + u1[x+2, z]*(-c141*c241)\n", + " )/(c111*c211)\n", " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", @@ -1255,9 +1307,16 @@ " c232 = gama222 - gama232\n", " c242 = -gama222 - gama242\n", "\n", - " aux2 = (u2[x, z]*(-c112*c222-c122*c212) + u3[x-1, z]*(-c112*c232-c132*c212) + u2[x-1, z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", - " + u1[x, z]*(-c122*c222) + u1[x-1, z]*(-c122*c242-c142*c222) + u3[x-2, z]*(-c132*c232) +u2[x-2, z]*(-c132*c242-c142*c232)\n", - " + u1[x-2, z]*(-c142*c242))/(c112*c212)\n", + " aux2 = (\n", + " u2[x, z]*(-c112*c222-c122*c212)\n", + " + u3[x-1, z]*(-c112*c232-c132*c212)\n", + " + u2[x-1, z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", + " + u1[x, z]*(-c122*c222)\n", + " + u1[x-1, z]*(-c122*c242-c142*c222)\n", + " + u3[x-2, z]*(-c132*c232)\n", + " + u2[x-2, z]*(-c132*c242-c142*c232)\n", + " + u1[x-2, z]*(-c142*c242)\n", + " )/(c112*c212)\n", " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", @@ -1282,9 +1341,16 @@ " c233 = gama223 - gama233\n", " c243 = -gama223 - gama243\n", "\n", - " aux3 = (u2[x, z]*(-c113*c223-c123*c213) + u3[x, z-1]*(-c113*c233-c133*c213) + u2[x, z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", - " + u1[x, z]*(-c123*c223) + u1[x, z-1]*(-c123*c243-c143*c223) + u3[x, z-2]*(-c133*c233) +u2[x, z-2]*(-c133*c243-c143*c233)\n", - " + u1[x, z-2]*(-c143*c243))/(c113*c213)\n", + " aux3 = (\n", + " u2[x, z]*(-c113*c223-c123*c213)\n", + " + u3[x, z-1]*(-c113*c233-c133*c213)\n", + " + u2[x, z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", + " + u1[x, z]*(-c123*c223)\n", + " + u1[x, z-1]*(-c123*c243-c143*c223)\n", + " + u3[x, z-2]*(-c133*c233)\n", + " + u2[x, z-2]*(-c133*c243-c143*c233)\n", + " + u1[x, z-2]*(-c143*c243)\n", + " )/(c113*c213)\n", " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])" ] @@ -1336,9 +1402,23 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "if(habctype != 2):\n", - " op = Operator([stencil0] + src_term + [stencil01, stencil3, stencil02, stencil2, stencil1] + bc + rec_term, subs=grid.spacing_map)\n", + " op = Operator(\n", + " [stencil0]\n", + " + src_term\n", + " + [stencil01, stencil3, stencil02, stencil2, stencil1]\n", + " + bc\n", + " + rec_term,\n", + " subs=grid.spacing_map\n", + " )\n", "else:\n", - " op = Operator([stencil0] + src_term + [stencil01, stencil3, stencil02, stencil2, stencil1, stencil02, stencil4, stencil5] + bc + rec_term, subs=grid.spacing_map)" + " op = Operator(\n", + " [stencil0]\n", + " + src_term\n", + " + [stencil01, stencil3, stencil02, stencil2, stencil1, stencil02, stencil4, stencil5]\n", + " + bc\n", + " + rec_term,\n", + " subs=grid.spacing_map\n", + " )" ] }, { @@ -1438,9 +1518,12 @@ " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", - " if(i == 1): plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", - " if(i == 2): plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", - " if(i == 3): plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", + " if(i == 1):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", + " if(i == 2):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", + " if(i == 3):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", " plot.grid()\n", " ax = plot.gca()\n", " divider = make_axes_locatable(ax)\n", @@ -1496,26 +1579,29 @@ "outputs": [], "source": [ "def graph2drec(rec, i):\n", - " plot.figure()\n", - " plot.figure(figsize=(16, 8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " x0pml = x0 + npmlx*hxv\n", - " x1pml = x1 - npmlx*hxv\n", - " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " if(i == 1): plot.title('Receivers Signal Profile - Devito with HABC A1')\n", - " if(i == 2): plot.title('Receivers Signal Profile - Devito with HABC A2')\n", - " if(i == 3): plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " x0pml = x0 + npmlx*hxv\n", + " x1pml = x1 - npmlx*hxv\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " if(i == 1):\n", + " plot.title('Receivers Signal Profile - Devito with HABC A1')\n", + " if(i == 2):\n", + " plot.title('Receivers Signal Profile - Devito with HABC A2')\n", + " if(i == 3):\n", + " plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/acoustic/accuracy.ipynb b/examples/seismic/acoustic/accuracy.ipynb index 711dd1a9cd..7c76eb9192 100644 --- a/examples/seismic/acoustic/accuracy.ipynb +++ b/examples/seismic/acoustic/accuracy.ipynb @@ -88,7 +88,7 @@ "t0 = 0.\n", "tn = dt * (nt-1)\n", "time = np.linspace(t0, tn, nt)\n", - "print(\"t0, tn, dt, nt; %.4f %.4f %.4f %d\" % (t0, tn, dt, nt))\n", + "print(f't0, tn, dt, nt; {t0:.4f} {tn:.4f} {dt:.4f} {nt:d}')\n", "# Source peak frequency in KHz\n", "f0 = .09" ] @@ -211,7 +211,7 @@ " dt = kwargs.get('dt', model.critical_dt)\n", " # Fourier constants\n", " nf = int(nt/2 + 1)\n", - " fnyq = 1. / (2 * dt)\n", + " # fnyq = 1. / (2 * dt)\n", " df = 1.0 / time[-1]\n", " faxis = df * np.arange(nf)\n", "\n", @@ -263,8 +263,14 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "print(f\"Numerical data min,max,abs; {np.min(ref_rec.data):+.6e} {np.max(ref_rec.data):+.6e} {np.max(np.abs(ref_rec.data)):+.6e}\")\n", - "print(f\"Analytic data min,max,abs; {np.min(U_t):+.6e} {np.max(U_t):+.6e} {np.max(np.abs(U_t)):+.6e}\")" + "print(\n", + " f'Numerical data min,max,abs; {np.min(ref_rec.data):+.6e} '\n", + " f'{np.max(ref_rec.data):+.6e} {np.max(np.abs(ref_rec.data)):+.6e}'\n", + ")\n", + "print(\n", + " f'Analytic data min,max,abs; {np.min(U_t):+.6e} '\n", + " f'{np.max(U_t):+.6e} {np.max(np.abs(U_t)):+.6e}'\n", + ")" ] }, { @@ -303,8 +309,10 @@ "plt.figure(figsize=(8, 8))\n", "amax = np.max(np.abs(ref_u.data[1, :, :]))\n", "plt.imshow(ref_u.data[1, :, :], vmin=-1.0 * amax, vmax=+1.0 * amax, cmap=\"seismic\")\n", - "plt.plot(2*sx+40, 2*sz+40, 'r*', markersize=11, label='source') # plot position of the source in model, add nbl for correct position\n", - "plt.plot(2*rx+40, 2*rz+40, 'k^', markersize=8, label='receiver') # plot position of the receiver in model, add nbl for correct position\n", + "# plot position of the source in model, add nbl for correct position\n", + "plt.plot(2*sx+40, 2*sz+40, 'r*', markersize=11, label='source')\n", + "# plot position of the receiver in model, add nbl for correct position\n", + "plt.plot(2*rx+40, 2*rz+40, 'k^', markersize=8, label='receiver')\n", "plt.legend()\n", "plt.xlabel('x position (m)')\n", "plt.ylabel('z position (m)')\n", @@ -408,8 +416,16 @@ " rec_coordinates = np.empty((1, 2))\n", " rec_coordinates[:, :] = 260.\n", "\n", - " geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", - " t0=t0, tn=tn, src_type='Ricker', f0=f0, t0w=1.5/f0)\n", + " geometry = AcquisitionGeometry(\n", + " model,\n", + " rec_coordinates,\n", + " src_coordinates,\n", + " t0=t0,\n", + " tn=tn,\n", + " src_type='Ricker',\n", + " f0=f0,\n", + " t0w=1.5/f0\n", + " )\n", "\n", " # Note: incorrect data size will be generated here due to AcquisitionGeometry bug ...\n", " # temporarily fixed below by resizing the output from the solver\n", @@ -428,7 +444,10 @@ "\n", " ratio_d = dt[i-1]/dt[i] if i > 0 else 1.0\n", " ratio_e = error_time[i-1]/error_time[i] if i > 0 else 1.0\n", - " print(f\"error for dt={dt[i]:.4f} is {error_time[i]:12.6e} -- ratio dt^2,ratio err; {ratio_d**2:12.6f} {ratio_e:12.6f} \\n\")\n", + " print(\n", + " f'error for dt={dt[i]:.4f} is {error_time[i]:12.6e} '\n", + " f'-- ratio dt^2,ratio err; {ratio_d**2:12.6f} {ratio_e:12.6f} \\n'\n", + " )\n", " errors_plot.append((geometry.time_axis.time_values, U_t1[:-1] - ref_rec1_data[:-1, 0]))" ] }, @@ -457,10 +476,14 @@ "theory = [error_time[0]*th/theory[0] for th in theory]\n", "plt.loglog([t for t in dt], error_time, '-ob', label=('Numerical'), linewidth=4, markersize=10)\n", "plt.loglog([t for t in dt], theory, '-^r', label=('Theory (2nd order)'), linewidth=4, markersize=10)\n", - "for x, y, a in zip([t for t in dt], theory, [(f'dt = {t} ms') for t in dt]):\n", - " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", - " textcoords='offset points', size=20,\n", - " horizontalalignment='left', verticalalignment='top')\n", + "for x, y, a in zip([t for t in dt], theory, [(f'dt = {t} ms') for t in dt], strict=True):\n", + " plt.annotate(\n", + " a, xy=(x, y), xytext=(4, 2),\n", + " textcoords='offset points',\n", + " size=20,\n", + " horizontalalignment='left',\n", + " verticalalignment='top'\n", + " )\n", "plt.xlabel(\"Time-step $dt$ (ms)\", fontsize=20)\n", "plt.ylabel(\"$|| u_{num} - u_{ana}||_2$\", fontsize=20)\n", "plt.tick_params(axis='both', which='both', labelsize=20)\n", @@ -574,11 +597,8 @@ "\n", "set_log_level(\"ERROR\")\n", "ind_o = -1\n", - "for spc in orders:\n", - " ind_o += 1\n", - " ind_spc = -1\n", - " for nn, h in shapes:\n", - " ind_spc += 1\n", + "for ind_o, spc in enumerate(orders):\n", + " for ind_spc, (nn, h) in enumerate(shapes):\n", " time = np.linspace(0., 150., nt)\n", "\n", " model_space = ModelBench(vp=c0, origin=(0., 0.), spacing=(h, h), bcs=\"damp\",\n", @@ -605,9 +625,14 @@ "\n", " # Compare to reference solution\n", " # Note: we need to normalize by the factor of grid spacing squared\n", - " errorl2[ind_o, ind_spc] = np.linalg.norm(loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2) / np.sqrt(U_t.shape[0] - 1)\n", + " errorl2[ind_o, ind_spc] = np.linalg.norm(\n", + " loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2\n", + " ) / np.sqrt(U_t.shape[0] - 1)\n", " timing[ind_o, ind_spc] = np.max([v for _, v in summary.timings.items()])\n", - " print(f\"starting space order {spc} with ({nn}, {nn}) grid points the error is {errorl2[ind_o, ind_spc]} for {timing[ind_o, ind_spc]} seconds runtime\")" + " print(\n", + " f'starting space order {spc} with ({nn}, {nn}) grid points the error is '\n", + " f'{errorl2[ind_o, ind_spc]} for {timing[ind_o, ind_spc]} seconds runtime'\n", + " )" ] }, { @@ -635,9 +660,11 @@ "plt.figure(figsize=(20, 10))\n", "for i in range(0, 5):\n", " plt.loglog(errorl2[i, :], timing[i, :], stylel[i], label=(f'order {orders[i]}'), linewidth=4, markersize=10)\n", - " for x, y, a in zip(errorl2[i, :], timing[i, :], [(f'dx = {sc} m') for sc in dx]):\n", - " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", - " textcoords='offset points', size=20)\n", + " for x, y, a in zip(errorl2[i, :], timing[i, :], [(f'dx = {sc} m') for sc in dx], strict=True):\n", + " plt.annotate(\n", + " a, xy=(x, y), xytext=(4, 2),\n", + " textcoords='offset points', size=20\n", + " )\n", "plt.xlabel(\"$|| u_{num} - u_{ref}||_{inf}$\", fontsize=20)\n", "plt.ylabel(\"Runtime (sec)\", fontsize=20)\n", "plt.tick_params(axis='both', which='both', labelsize=20)\n", diff --git a/examples/seismic/acoustic/acoustic_example.py b/examples/seismic/acoustic/acoustic_example.py index 9a994340e9..95820cb3cb 100644 --- a/examples/seismic/acoustic/acoustic_example.py +++ b/examples/seismic/acoustic/acoustic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import Constant, Function, norm, smooth from devito.logger import info diff --git a/examples/seismic/elastic/elastic_example.py b/examples/seismic/elastic/elastic_example.py index 5ce82a696b..45da5a6c34 100644 --- a/examples/seismic/elastic/elastic_example.py +++ b/examples/seismic/elastic/elastic_example.py @@ -1,9 +1,10 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass + from devito import norm from devito.logger import info from examples.seismic import demo_model, seismic_args, setup_geometry diff --git a/examples/seismic/inversion/fwi.py b/examples/seismic/inversion/fwi.py index b1445e6466..2eda7c73f7 100644 --- a/examples/seismic/inversion/fwi.py +++ b/examples/seismic/inversion/fwi.py @@ -111,6 +111,6 @@ def fwi_gradient(vp_in): update_with_box(model0.vp, alpha, direction) # Log the progress made - print('Objective value is %f at iteration %d' % (phi, i+1)) + print(f'Objective value is {phi} at iteration {i + 1}') assert np.isclose(history[-1], 3828, atol=1e1, rtol=0) diff --git a/examples/seismic/model.py b/examples/seismic/model.py index e3eb312710..610f98313c 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -1,10 +1,10 @@ +from contextlib import suppress + import numpy as np from sympy import finite_diff_weights as fd_w -try: +with suppress(ImportError): import pytest -except: - pass from devito import ( Abs, Constant, Eq, Function, Grid, Inc, Operator, SubDimension, SubDomain, div, sin, @@ -41,7 +41,7 @@ def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): """ eqs = [Eq(damp, 1.0 if abc_type == "mask" else 0.0)] - for (nbl, nbr), d in zip(padsizes, damp.dimensions): + for (nbl, nbr), d in zip(padsizes, damp.dimensions, strict=True): if not fs or d is not damp.dimensions[-1]: dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbl) # left @@ -92,7 +92,7 @@ def define(self, dimensions): Definition of the upper section of the domain for wrapped indices FS. """ - return {d: (d if not d == dimensions[-1] else ('left', self.size)) + return {d: (d if d != dimensions[-1] else ('left', self.size)) for d in dimensions} @@ -109,7 +109,7 @@ def __init__(self, origin, spacing, shape, space_order, nbl=20, self.origin = tuple([dtype(o) for o in origin]) self.fs = fs # Default setup - origin_pml = [dtype(o - s*nbl) for o, s in zip(origin, spacing)] + origin_pml = [dtype(o - s*nbl) for o, s in zip(origin, spacing, strict=True)] shape_pml = np.array(shape) + 2 * self.nbl # Model size depending on freesurface @@ -234,7 +234,7 @@ def domain_size(self): """ Physical size of the domain as determined by shape and spacing """ - return tuple((d-1) * s for d, s in zip(self.shape, self.spacing)) + return tuple((d-1) * s for d, s in zip(self.shape, self.spacing, strict=True)) class SeismicModel(GenericModel): diff --git a/examples/seismic/plotting.py b/examples/seismic/plotting.py index a5974d212b..907b371627 100644 --- a/examples/seismic/plotting.py +++ b/examples/seismic/plotting.py @@ -69,7 +69,7 @@ def plot_velocity(model, source=None, receiver=None, colorbar=True, cmap="jet"): model.origin[1] + domain_size[1], model.origin[1]] slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2)) - if getattr(model, 'vp', None) is not None: + if getattr(model, 'vp', None) is not None: # noqa: SIM108 field = model.vp.data[slices] else: field = model.lam.data[slices] diff --git a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb index 051e925b36..204870d4b2 100644 --- a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb +++ b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb @@ -511,14 +511,14 @@ "shape = (nx, nz) # Number of grid points\n", "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", "nxpad, nzpad = nx+2*npad, nz+2*npad\n", "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -645,7 +645,7 @@ "tn = dtype(2000.) # Simulation time end (1 second = 1000 msec)\n", "dt = compute_critical_dt(m)\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "print(\"Time min, max, dt, num; %10.6f %10.6f %10.6f %d\" % (t0, tn, dt, int(tn//dt) + 1))\n", + "print(f'Time min, max, dt, num; {t0:10.6f} {tn:10.6f} {dt:10.6f} {int(tn//dt) + 1}')\n", "print(\"time_range; \", time_range)" ] }, @@ -716,8 +716,14 @@ "\n", "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", - "print(f\"rec_coordinates X min/max; {np.min(rec.coordinates.data[:, 0]):+12.4f} {np.max(rec.coordinates.data[:, 0]):+12.4f}\")\n", - "print(f\"rec_coordinates Z min/max; {np.min(rec.coordinates.data[:, 1]):+12.4f} {np.max(rec.coordinates.data[:, 1]):+12.4f}\")\n", + "print(\n", + " f'rec_coordinates X min/max; {np.min(rec.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(rec.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(rec.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(rec.coordinates.data[:, 1]):+12.4f}'\n", + ")\n", "\n", "# We can plot the time signature to see the wavelet\n", "src.show()" @@ -1393,6 +1399,7 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", + "import copy\n", "\n", "# Run the operator for the Q=25 model\n", "print(f\"m min/max; {np.min(m.data):+12.6e} {np.max(m.data):+12.6e}\")\n", @@ -1405,7 +1412,6 @@ "# summary = op(time=time_range.num-1, h_x=dx, h_z=dz, dt=dt)\n", "\n", "# Save the Q=25 results and run the Q=100 case\n", - "import copy\n", "uQ25 = copy.copy(u)\n", "recQ25 = copy.copy(rec)\n", "\n", diff --git a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb index d290485757..2df14289a9 100644 --- a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb +++ b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb @@ -320,14 +320,14 @@ "shape = (nx, nz) # Number of grid points\n", "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", "nxpad, nzpad = nx + 2 * npad, nz + 2 * npad\n", "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -486,7 +486,7 @@ "tn = 1200.0 # Simulation time end (1 second = 1000 msec)\n", "dt = compute_critical_dt(m0)\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "print(\"Time min, max, dt, num; %10.6f %10.6f %10.6f %d\" % (t0, tn, dt, int(tn//dt) + 1))\n", + "print(f'Time min, max, dt, num; {t0:10.6f} {tn:10.6f} {dt:10.6f} {int(tn//dt) + 1}')\n", "print(\"time_range; \", time_range)\n", "\n", "# Source at 1/4 X, 1/2 Z, Ricker with 10 Hz center frequency\n", @@ -501,8 +501,14 @@ "\n", "print(f\"src_coordinate X; {src_nl.coordinates.data[0, 0]:+12.4f}\")\n", "print(f\"src_coordinate Z; {src_nl.coordinates.data[0, 1]:+12.4f}\")\n", - "print(f\"rec_coordinates X min/max; {np.min(rec_nl.coordinates.data[:, 0]):+12.4f} {np.max(rec_nl.coordinates.data[:, 0]):+12.4f}\")\n", - "print(f\"rec_coordinates Z min/max; {np.min(rec_nl.coordinates.data[:, 1]):+12.4f} {np.max(rec_nl.coordinates.data[:, 1]):+12.4f}\")" + "print(\n", + " f'rec_coordinates X min/max; {np.min(rec_nl.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(rec_nl.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(rec_nl.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(rec_nl.coordinates.data[:, 1]):+12.4f}'\n", + ")" ] }, { diff --git a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb index c626bf6926..c4933e85ee 100644 --- a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb +++ b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb @@ -266,13 +266,13 @@ " ntpad = 20 * (nt - 1) + 1\n", " tmaxpad = dt * (ntpad - 1)\n", " time_axis_pad = TimeAxis(start=tmin, stop=tmaxpad, step=dt)\n", - " timepad = np.linspace(tmin, tmaxpad, ntpad)\n", + " # timepad = np.linspace(tmin, tmaxpad, ntpad)\n", " print(time_axis)\n", " print(time_axis_pad)\n", " srcpad = RickerSource(name='srcpad', grid=v.grid, f0=fpeak, npoint=1,\n", " time_range=time_axis_pad, t0w=t0w)\n", " nf = int(ntpad / 2 + 1)\n", - " fnyq = 1.0 / (2 * dt)\n", + " # fnyq = 1.0 / (2 * dt)\n", " df = 1.0 / tmaxpad\n", " faxis = df * np.arange(nf)\n", "\n", @@ -559,10 +559,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", - "print(\"\\nlinearity forward F %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity forward F {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} {np.sqrt(np.mean(rec2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -607,10 +608,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(src2.data**2))\n", "diff = (src1.data - src2.data) / rms2\n", - "print(\"\\nlinearity adjoint F %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(src1.data**2)), np.sqrt(np.mean(src2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity adjoint F {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(src1.data**2)):+16.10e} {np.sqrt(np.mean(src2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -663,8 +665,10 @@ "sum_s = np.dot(src1.data.reshape(-1), src2.data.reshape(-1))\n", "sum_r = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1))\n", "diff = (sum_s - sum_r) / (sum_s + sum_r)\n", - "print(\"\\nadjoint F %s (so=%d) sum_s, sum_r, diff; %+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, sum_s, sum_r, diff))\n", + "print(\n", + " f'\\nadjoint F {shape} (so=8) sum_s, sum_r, diff; '\n", + " f'{sum_s:+16.10e} {sum_r:+16.10e} {diff:+16.10e}'\n", + ")\n", "assert np.isclose(diff, 0., atol=1.e-12)" ] }, @@ -766,8 +770,11 @@ "# Assert the 2nd order error has slope dh^4\n", "p1 = np.polyfit(np.log10(scale), np.log10(norm1), 1)\n", "p2 = np.polyfit(np.log10(scale), np.log10(norm2), 1)\n", - "print(\"\\nlinearization F %s (so=%d) 1st (%.1f) = %.4f, 2nd (%.1f) = %.4f\" %\n", - " (shape, 8, dh**2, p1[0], dh**4, p2[0]))\n", + "print(\n", + " f'\\nlinearization F {shape} (so=8) '\n", + " f'1st ({dh**2:.1f}) = {p1[0]:.4f}, '\n", + " f'2nd ({dh**4:.1f}) = {p2[0]:.4f}'\n", + ")\n", "assert np.isclose(p1[0], dh**2, rtol=0.1)\n", "assert np.isclose(p2[0], dh**4, rtol=0.1)" ] @@ -888,10 +895,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", - "print(\"\\nlinearity forward J %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity forward J {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} {np.sqrt(np.mean(rec2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -946,10 +954,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(dm2.data**2))\n", "diff = (dm1.data - dm2.data) / rms2\n", - "print(\"\\nlinearity adjoint J %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(dm1.data**2)), np.sqrt(np.mean(dm2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))" + "print(\n", + " f'\\nlinearity adjoint J {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(dm1.data**2)):+16.10e} {np.sqrt(np.mean(dm2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")" ] }, { @@ -1021,8 +1030,10 @@ "sum_m = np.dot(dm1.data.reshape(-1), dm2.data.reshape(-1))\n", "sum_d = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1))\n", "diff = (sum_m - sum_d) / (sum_m + sum_d)\n", - "print(\"\\nadjoint J %s (so=%d) sum_m, sum_d, diff; %16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, sum_m, sum_d, diff))\n", + "print(\n", + " f'\\nadjoint J {shape} (so=8) sum_m, sum_d, diff; '\n", + " f'{sum_m:16.10e} {sum_d:+16.10e} {diff:+16.10e}'\n", + ")\n", "assert np.isclose(diff, 0., atol=1.e-11)\n", "\n", "del rec0, u0" diff --git a/examples/seismic/self_adjoint/test_utils.py b/examples/seismic/self_adjoint/test_utils.py index 02527c85b5..6b4f37c36a 100644 --- a/examples/seismic/self_adjoint/test_utils.py +++ b/examples/seismic/self_adjoint/test_utils.py @@ -1,9 +1,10 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except: - pass + from devito import Function, Grid from examples.seismic.self_adjoint import setup_w_over_q diff --git a/examples/seismic/self_adjoint/test_wavesolver_iso.py b/examples/seismic/self_adjoint/test_wavesolver_iso.py index 1271174bfd..77e665f931 100644 --- a/examples/seismic/self_adjoint/test_wavesolver_iso.py +++ b/examples/seismic/self_adjoint/test_wavesolver_iso.py @@ -1,10 +1,11 @@ +from contextlib import suppress + import numpy as np from scipy.special import hankel2 -try: +with suppress(ImportError): import pytest -except: - pass + from devito import Eq, Function, Grid, Operator, info from examples.seismic import AcquisitionGeometry, Model, RickerSource, TimeAxis from examples.seismic.self_adjoint import ( @@ -40,10 +41,12 @@ def test_linearity_forward_F(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 - info("linearity forward F %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity forward F {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(rec1.data**2)):+16.10e}' + f'{np.sqrt(np.mean(rec2.data**2)):+16.10e}' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -69,10 +72,12 @@ def test_linearity_adjoint_F(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(src2.data**2)) diff = (src1.data - src2.data) / rms2 - info("linearity adjoint F %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(src1.data**2)), np.sqrt(np.mean(src2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity adjoint F {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(src1.data**2)):+16.10e}' + f'{np.sqrt(np.mean(src2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -95,8 +100,10 @@ def test_adjoint_F(self, shape, dtype, so): sum_s = np.dot(src1.data.reshape(-1), src2.data.reshape(-1)) sum_r = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1)) diff = (sum_s - sum_r) / (sum_s + sum_r) - info("adjoint F %s (so=%d) sum_s, sum_r, diff; %+16.10e %+16.10e %+16.10e" % - (shape, so, sum_s, sum_r, diff)) + info( + f'adjoint F {shape} ({so=}) sum_s, sum_r, diff; ' + f'{sum_s:+16.10e} {sum_r:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-12) @pytest.mark.parametrize('shape', shapes) @@ -165,8 +172,10 @@ def test_linearization_F(self, shape, dtype, so): # Assert the 2nd order error has slope dh^4 p1 = np.polyfit(np.log10(scale), np.log10(norm1), 1) p2 = np.polyfit(np.log10(scale), np.log10(norm2), 1) - info("linearization F %s (so=%d) 1st (%.1f) = %.4f, 2nd (%.1f) = %.4f" % - (shape, so, dh**2, p1[0], dh**4, p2[0])) + info( + f'linearization F {shape} ({so=}) ' + f'1st ({dh**2:.1f}) = {p1[0]:.4f}, 2nd ({dh**4:.1f}) = {p2[0]:.4f}' + ) # we only really care the 2nd order err is valid, not so much the 1st order error assert np.isclose(p1[0], dh**2, rtol=0.25) @@ -213,10 +222,12 @@ def test_linearity_forward_J(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 - info("linearity forward J %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity forward J {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(rec2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -262,10 +273,12 @@ def test_linearity_adjoint_J(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(dm2.data**2)) diff = (dm1.data - dm2.data) / rms2 - info("linearity adjoint J %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(dm1.data**2)), np.sqrt(np.mean(dm2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity adjoint J {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(dm1.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(dm2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) @pytest.mark.parametrize('shape', shapes) @pytest.mark.parametrize('dtype', dtypes) @@ -311,8 +324,10 @@ def test_adjoint_J(self, shape, dtype, so): sum_m = np.dot(dm1.data.reshape(-1), dm2.data.reshape(-1)) sum_d = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1)) diff = (sum_m - sum_d) / (sum_m + sum_d) - info("adjoint J %s (so=%d) sum_m, sum_d, diff; %16.10e %+16.10e %+16.10e" % - (shape, so, sum_m, sum_d, diff)) + info( + f'adjoint J {shape} ({so=}) sum_m, sum_d, diff; ' + f'{sum_m:16.10e} {sum_d:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-11) @pytest.mark.parametrize('dtype', dtypes) @@ -358,8 +373,10 @@ def test_derivative_symmetry(self, dtype, so): g1f2 = np.dot(g1.data, f2.data) diff = (f1g2 + g1f2) / (f1g2 - g1f2) - info("skew symmetry (so=%d) -- f1g2, g1f2, diff; %+16.10e %+16.10e %+16.10e" % - (so, f1g2, g1f2, diff)) + info( + f'skew symmetry ({so=}) -- f1g2, g1f2, diff; ' + f'{f1g2:+16.10e} {g1f2:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-12) @pytest.mark.parametrize('dtype', dtypes) @@ -459,7 +476,10 @@ def analytic_response(): arms = np.max(np.abs(uAna)) drms = np.max(np.abs(diff)) - info(f"Maximum absolute numerical,analytic,diff; {nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}") + info( + 'Maximum absolute numerical,analytic,diff; ' + f'{nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}' + ) # This isnt a very strict tolerance ... tol = 0.1 diff --git a/examples/seismic/source.py b/examples/seismic/source.py index 743c91b327..ca86c41c30 100644 --- a/examples/seismic/source.py +++ b/examples/seismic/source.py @@ -63,7 +63,9 @@ def __init__(self, start=None, step=None, num=None, stop=None): else: raise ValueError("Only three of start, step, num and stop may be set") except: - raise ValueError("Three of args start, step, num and stop may be set") + raise ValueError( + "Three of args start, step, num and stop may be set" + ) from None if not isinstance(num, int): raise TypeError("input argument must be of type int") @@ -74,7 +76,8 @@ def __init__(self, start=None, step=None, num=None, stop=None): self.num = int(num) def __str__(self): - return f"TimeAxis: start={self.start:g}, stop={self.stop:g}, step={self.step:g}, num={self.num:g}" + return f'TimeAxis: start={self.start:g}, stop={self.stop:g}, ' + \ + f'step={self.step:g}, num={self.num:g}' def _rebuild(self): return TimeAxis(start=self.start, stop=self.stop, num=self.num) diff --git a/examples/seismic/test_seismic_utils.py b/examples/seismic/test_seismic_utils.py index 76f5ff4560..3a70a330e6 100644 --- a/examples/seismic/test_seismic_utils.py +++ b/examples/seismic/test_seismic_utils.py @@ -1,7 +1,8 @@ -try: +from contextlib import suppress + +with suppress(ImportError): import pytest -except: - pass + import numpy as np from devito import norm @@ -26,7 +27,7 @@ def test_damp(nbl, bcs): except AttributeError: center = model.damp - assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape)]) + assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape, strict=True)]) assert center == bcs[1] switch_bcs = not_bcs(bcs[0]) diff --git a/examples/seismic/tti/tti_example.py b/examples/seismic/tti/tti_example.py index 1452f8d671..5e22901eb8 100644 --- a/examples/seismic/tti/tti_example.py +++ b/examples/seismic/tti/tti_example.py @@ -1,9 +1,8 @@ import numpy as np +from contrextlib import suppress -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import Constant, Function, info, norm, smooth from examples.seismic import demo_model, seismic_args, setup_geometry diff --git a/examples/seismic/tutorials/02_rtm.ipynb b/examples/seismic/tutorials/02_rtm.ipynb index 11072cedbc..cf90a02585 100644 --- a/examples/seismic/tutorials/02_rtm.ipynb +++ b/examples/seismic/tutorials/02_rtm.ipynb @@ -500,7 +500,7 @@ "op_imaging = ImagingOperator(model, image)\n", "\n", "for i in range(nshots):\n", - " print('Imaging source %d out of %d' % (i+1, nshots))\n", + " print(f'Imaging source {i + 1} out of {nshots}')\n", "\n", " # Update source location\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", diff --git a/examples/seismic/tutorials/03_fwi.ipynb b/examples/seismic/tutorials/03_fwi.ipynb index cf66ab32b8..c077628d20 100644 --- a/examples/seismic/tutorials/03_fwi.ipynb +++ b/examples/seismic/tutorials/03_fwi.ipynb @@ -607,7 +607,7 @@ " update_with_box(model0.vp, alpha, direction)\n", "\n", " # Log the progress made\n", - " print('Objective value is %f at iteration %d' % (phi, i+1))" + " print(f'Objective value is {phi} at iteration {i + 1}')" ] }, { diff --git a/examples/seismic/tutorials/04_dask.ipynb b/examples/seismic/tutorials/04_dask.ipynb index 4ca73c02ed..6ac2b18977 100644 --- a/examples/seismic/tutorials/04_dask.ipynb +++ b/examples/seismic/tutorials/04_dask.ipynb @@ -981,7 +981,9 @@ "import matplotlib.pyplot as plt\n", "\n", "# Plot model error\n", - "plt.plot(range(1, maxiter+1), model_error); plt.xlabel('Iteration number'); plt.ylabel('L2-model error')\n", + "plt.plot(range(1, maxiter+1), model_error)\n", + "plt.xlabel('Iteration number')\n", + "plt.ylabel('L2-model error')\n", "plt.show()" ] }, diff --git a/examples/seismic/tutorials/04_dask_pickling.ipynb b/examples/seismic/tutorials/04_dask_pickling.ipynb index bfdd4e136a..f78ea912e3 100644 --- a/examples/seismic/tutorials/04_dask_pickling.ipynb +++ b/examples/seismic/tutorials/04_dask_pickling.ipynb @@ -79,17 +79,6 @@ "outputs": [], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "\n", - "# Set up inversion parameters.\n", - "param = {'t0': 0.,\n", - " 'tn': 1000., # Simulation last 1 second (1000 ms)\n", - " 'f0': 0.010, # Source peak frequency is 10Hz (0.010 kHz)\n", - " 'nshots': 5, # Number of shots to create gradient from\n", - " 'shape': (101, 101), # Number of grid points (nx, nz).\n", - " 'spacing': (10., 10.), # Grid spacing in m. The domain size is now 1km by 1km.\n", - " 'origin': (0, 0), # Need origin to define relative source and receiver locations.\n", - " 'nbl': 40} # nbl thickness.\n", - "\n", "import numpy as np\n", "\n", "from scipy import optimize\n", @@ -107,6 +96,19 @@ "from examples.seismic import plot_image\n", "\n", "\n", + "# Set up inversion parameters.\n", + "param = {\n", + " 't0': 0.,\n", + " 'tn': 1000., # Simulation last 1 second (1000 ms)\n", + " 'f0': 0.010, # Source peak frequency is 10Hz (0.010 kHz)\n", + " 'nshots': 5, # Number of shots to create gradient from\n", + " 'shape': (101, 101), # Number of grid points (nx, nz).\n", + " 'spacing': (10., 10.), # Grid spacing in m. The domain size is now 1km by 1km.\n", + " 'origin': (0, 0), # Need origin to define relative source and receiver locations.\n", + " 'nbl': 40 # nbl thickness.\n", + "}\n", + "\n", + "\n", "def get_true_model():\n", " ''' Define the test phantom; in this case we are using\n", " a simple circle so we can easily see what is going on.\n", @@ -144,7 +146,8 @@ " \"\"\" Returns the current model. This is used by the\n", " worker to get the current model.\n", " \"\"\"\n", - " pkl = pickle.load(open(filename, \"rb\"))\n", + " with open(filename, 'rb') as fh:\n", + " pkl = pickle.load(fh)\n", "\n", " return pkl['model']\n", "\n", @@ -152,13 +155,15 @@ "def dump_model(filename, model):\n", " ''' Dump model to disk.\n", " '''\n", - " pickle.dump({'model': model}, open(filename, \"wb\"))\n", + " with open(filename, \"wb\") as fh:\n", + " pickle.dump({'model': model}, fh)\n", "\n", "\n", "def load_shot_data(shot_id, dt):\n", " ''' Load shot data from disk, resampling to the model time step.\n", " '''\n", - " pkl = pickle.load(open(\"shot_%d.p\" % shot_id, \"rb\"))\n", + " with open(f\"shot_{shot_id}.p\", \"rb\") as fh:\n", + " pkl = pickle.load(fh)\n", "\n", " return pkl['geometry'], pkl['rec'].resample(dt)\n", "\n", @@ -166,7 +171,8 @@ "def dump_shot_data(shot_id, rec, geometry):\n", " ''' Dump shot data to disk.\n", " '''\n", - " pickle.dump({'rec': rec, 'geometry': geometry}, open('shot_%d.p' % shot_id, \"wb\"))\n", + " with open(f'shot_{shot_id}.p', \"wb\") as fh:\n", + " pickle.dump({'rec': rec, 'geometry': geometry}, fh)\n", "\n", "\n", "def generate_shotdata_i(param):\n", @@ -300,7 +306,11 @@ "nreceivers = 101\n", "# Set up receiver data and geometry.\n", "rec_coordinates = np.empty((nreceivers, len(param['shape'])))\n", - "rec_coordinates[:, 1] = np.linspace(param['spacing'][0], true_model.domain_size[0] - param['spacing'][0], num=nreceivers)\n", + "rec_coordinates[:, 1] = np.linspace(\n", + " param['spacing'][0],\n", + " true_model.domain_size[0] - param['spacing'][0],\n", + " num=nreceivers\n", + ")\n", "rec_coordinates[:, 0] = 980. # 20m from the right end\n", "# Geometry\n", "geometry = AcquisitionGeometry(true_model, rec_coordinates, src_coordinates,\n", @@ -914,7 +924,9 @@ "import matplotlib.pyplot as plt\n", "\n", "# Plot model error\n", - "plt.plot(range(1, maxiter+1), relative_error); plt.xlabel('Iteration number'); plt.ylabel('L2-model error')\n", + "plt.plot(range(1, maxiter+1), relative_error)\n", + "plt.xlabel('Iteration number')\n", + "plt.ylabel('L2-model error')\n", "plt.show()" ] }, diff --git a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb index e53394a3dc..72161dbd30 100644 --- a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb +++ b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb @@ -638,7 +638,10 @@ "# Time update\n", "u_v = Eq(v2.forward, solve(pde_v2, v2.forward))\n", "# The stress equation isn't time dependent so we don't need solve.\n", - "u_t = Eq(tau0, model.damp * (l * diag(div(v2.forward)) + mu * (grad(v2.forward) + grad(v2.forward).transpose(inner=False))))\n", + "u_t = Eq(\n", + " tau0,\n", + " model.damp * (l * diag(div(v2.forward)) + mu * (grad(v2.forward) + grad(v2.forward).transpose(inner=False)))\n", + ")\n", "\n", "rec_term2 = rec2.interpolate(expr=v2[0])\n", "rec_term2 += rec3.interpolate(expr=v2[1])\n", @@ -949,7 +952,9 @@ "\n", "# First order elastic wave equation\n", "pde_v = v_rsfd.dt - ro * div45(tau_rsfd)\n", - "pde_tau = tau_rsfd.dt - l * diag(div45(v_rsfd.forward)) - mu * (grad45(v_rsfd.forward) + grad45(v_rsfd.forward).transpose(inner=False))\n", + "pde_tau = tau_rsfd.dt \\\n", + " - l * diag(div45(v_rsfd.forward)) \\\n", + " - mu * (grad45(v_rsfd.forward) + grad45(v_rsfd.forward).transpose(inner=False))\n", "# Time update\n", "u_v = Eq(v_rsfd.forward, model.damp * solve(pde_v, v_rsfd.forward))\n", "u_t = Eq(tau_rsfd.forward, model.damp * solve(pde_tau, tau_rsfd.forward))\n", diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index b914c43c33..efae154d9c 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -317,7 +317,7 @@ "N = 2\n", "weights = sym.finite_diff_weights(N, x, 0)\n", "\n", - "for ii, (derivative, th) in enumerate(zip(weights, ['ᵗʰ', 'ˢᵗ', 'ⁿᵈ'])):\n", + "for ii, (derivative, th) in enumerate(zip(weights, ['ᵗʰ', 'ˢᵗ', 'ⁿᵈ'], strict=True)):\n", " for jj, w in enumerate(derivative[ii:]):\n", " print(\n", " f'Weights for {ii}{th} derivative on the {jj + ii + 1} point(s)'\n", @@ -475,7 +475,7 @@ "\n", " # Fix beta, vary alpha\n", " alines = []\n", - " for r, v in zip(courant, velocity):\n", + " for r, v in zip(courant, velocity, strict=True):\n", " data = np.array([dispersion_ratio(weights, h, dt, v, k, a) for a in linspace])\n", " line, = ax[0].plot(linspace, data, label=f'{r=:.3g}')\n", " alines.append(line)\n", @@ -499,7 +499,7 @@ "\n", " # Fix alpha, vary beta\n", " blines = []\n", - " for r, v in zip(courant, velocity):\n", + " for r, v in zip(courant, velocity, strict=True):\n", " data = np.array([dispersion_ratio(weights, h, dt, v, b/h, alpha) for b in linspace])\n", " line, = ax[1].plot(linspace, data, label=f'{r=:.3g}')\n", " blines.append(line)\n", @@ -535,14 +535,14 @@ " ax[1].set_title(f'α={a:.3g}')\n", " ax[0].set_title(f'β={b:.3g}')\n", " k = b/h\n", - " for line, r, v in zip(alines, courant, velocity):\n", + " for line, v in zip(alines, velocity, strict=True):\n", " new_data = np.array([dispersion_ratio(weights, h, dt, v, k, a_) for a_ in linspace])\n", " line.set_ydata(new_data)\n", " bvline.set_xdata((b, b))\n", " aann.set_text(f'α={a:.3g}')\n", " aann.xy = (a, ylim[0] + (ylim[1] - ylim[0])*2/3)\n", "\n", - " for line, r, v in zip(blines, courant, velocity):\n", + " for line, v in zip(blines, velocity, strict=True):\n", " new_data = np.array([dispersion_ratio(weights, h, dt, v, b_/h, a) for b_ in linspace])\n", " line.set_ydata(new_data)\n", " avline.set_xdata((a, a))\n", @@ -1002,7 +1002,7 @@ "}]\n", "constraints += [{\n", " 'type': 'eq',\n", - " 'fun': lambda x: np.sum([xi*m**(2*jj) for m, xi in enumerate(x)])\n", + " 'fun': lambda x: np.sum([xi*m**(2*jj) for m, xi in enumerate(x)]) # noqa: B023\n", "} for jj in range(2, (len(initial_guess) + 1)//2)]" ] }, @@ -1051,12 +1051,18 @@ "def objective(a):\n", " x = np.linspace(0, np.pi/2, 201)\n", " m = np.arange(1, len(a) + 1)\n", - " y = x**2 + a[0] + 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(a[1:], m)], axis=0)\n", + " y = x**2 + a[0] + 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(a[1:], m, strict=False)], axis=0)\n", " return sp.integrate.trapezoid(y**2, x=x)\n", "\n", "\n", "print(f'Value of objective function at initial guess: {objective(initial_guess)}')\n", - "opt1 = sp.optimize.minimize(objective, initial_guess, method='SLSQP', constraints=constraints, options=dict(ftol=1e-15, maxiter=500))\n", + "opt1 = sp.optimize.minimize(\n", + " objective,\n", + " initial_guess,\n", + " method='SLSQP',\n", + " constraints=constraints,\n", + " options=dict(ftol=1e-15, maxiter=500)\n", + ")\n", "print(opt1)" ] }, @@ -1111,36 +1117,26 @@ }, "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "cb77139907c04e22b10e06210f00cfc6", - "version_major": 2, - "version_minor": 0 - }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAGQCAYAAAC+tZleAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAqFFJREFUeJzs3XdY1fX/xvHnYQ8ZAoKAoLi3OHKXO1PLHKWVmtvKhmWammlmw9Ww9c20XC21oVZuTTNH7r0HLmSKgGw45/z+OIrxU8sBHMb9uK5znfMZ5/N+HTIO5z7vYTCbzWZEREREREREREQKKBtrFyAiIiIiIiIiIvJvFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiBRCZ86cwWAwMHfuXGuXIiIiUmRMmDABg8GQY19WVhavvfYaQUFB2NjY0KVLFwCSkpIYNGgQpUuXxmAw8PLLL+d/wSIixYgCLBERYO7cuRgMhpveRo8ebbW6vv/+e6ZPn2619kVERAqz///+7uTkREBAAO3bt+eTTz7hypUr/3mN2bNnM23aNB577DHmzZvHK6+8AsB7773H3Llzee655/jmm2/o06dPXr8cEZFizWA2m83WLkJExNrmzp1L//79mThxIiEhITmO1axZk9DQUKvU9fDDD3Pw4EHOnDmTY7/ZbCY9PR17e3tsbW2tUpuIiEhB9//f3zMzM4mMjGTDhg2sWbOG4OBgfv31V2rXrg1YeltlZWXh5OSUfY0nnniCTZs2ceHChRzXbty4MXZ2dmzatClfX5OISHFlZ+0CREQKkg4dOtCgQQNrl/Gfrn2LLCIiIv/t/7+/jxkzhj/++IOHH36Yzp07c+TIEZydnbGzs8POLudHpOjoaDw9PW+4ZnR0NNWrV8+1Gk0mExkZGXp/FxG5BQ0hFBG5DQaDgQkTJtywv1y5cvTr1y97+9pQhc2bNzN8+HBKlSqFq6srXbt2JSYm5obnr1ixghYtWuDm5oa7uzv33Xcf33//PQAtW7Zk2bJlnD17NnvoQ7ly5YBbz4H1xx9/cP/99+Pq6oqnpyePPvooR44cyXHOtfk9Tp48Sb9+/fD09MTDw4P+/fuTkpKS49w1a9bQvHlzPD09KVGiBFWqVOH111+/8x+giIhIAdO6dWvGjRvH2bNn+fbbb4Gcc2Bde69dv349hw4dyn4v3rBhAwaDgbCwMJYtW5a9/1pv6fT0dN58800qVqyIo6MjQUFBvPbaa6Snp+do32Aw8MILL/Ddd99Ro0YNHB0dWblyJQDh4eEMGDAAPz8/HB0dqVGjBrNnz87x/Gt1LFq0iHfffZcyZcrg5OREmzZtOHny5A2vd9u2bXTs2JGSJUvi6upK7dq1+fjjj3Occ/ToUR577DG8vLxwcnKiQYMG/Prrr7ny8xYRuVfqgSUi8g8JCQnExsbm2Ofj43PH13nxxRcpWbIkb775JmfOnGH69Om88MILLFy4MPucuXPnMmDAAGrUqMGYMWPw9PRkz549rFy5kqeeeoqxY8eSkJDAhQsX+OijjwAoUaLELdtcu3YtHTp0oHz58kyYMIHU1FQ+/fRTmjVrxu7du7PDr2t69OhBSEgIkyZNYvfu3Xz11Vf4+voyZcoUAA4dOsTDDz9M7dq1mThxIo6Ojpw8eZLNmzff8c9DRESkIOrTpw+vv/46q1evZvDgwTmOlSpVim+++YZ3332XpKQkJk2aBEC1atX45ptveOWVVyhTpgyvvvpq9vkmk4nOnTuzadMmhgwZQrVq1Thw4AAfffQRx48fZ8mSJTna+OOPP1i0aBEvvPACPj4+lCtXjqioKBo3bpwdcJUqVYoVK1YwcOBAEhMTb5gsfvLkydjY2DBixAgSEhKYOnUqvXr1Ytu2bdnnrFmzhocffhh/f3+GDRtG6dKlOXLkCL///jvDhg0DLO/7zZo1IzAwkNGjR+Pq6sqiRYvo0qULP//8M127ds3ln76IyJ1RgCUi8g9t27a9Yd/dTBXo7e3N6tWrs7/FNZlMfPLJJyQkJODh4UFCQgIvvfQSDRs2ZMOGDTmGC1xrr127dgQGBnL58mV69+79n22OHDkSLy8vtm7dipeXFwBdunShbt26vPnmm8ybNy/H+XXr1uXrr7/O3r506RJff/11doC1Zs0aMjIyWLFixV2FeCIiIgVdmTJl8PDw4NSpUzccc3V1pXfv3nz11VfY2trmeC/u3bs3b7zxBoGBgTn2f/vtt6xdu5Y///yT5s2bZ++vWbMmzz77LFu2bKFp06bZ+48dO8aBAwdyDEUcNGgQRqORAwcO4O3tDcCzzz7Lk08+yYQJE3jmmWdwdnbOPj8tLY29e/fi4OAAQMmSJRk2bBgHDx6kZs2aGI1GnnnmGfz9/dm7d2+O4ZD//Btn2LBhBAcHs2PHDhwdHQEYOnQozZs3Z9SoUQqwRMTqNIRQROQfPv/8c9asWZPjdjeGDBmSYxnu+++/H6PRyNmzZwFLOHTlyhVGjx59w1wX/3/57tsRERHB3r176devX3Z4BVC7dm3atWvH8uXLb3jOs88+m2P7/vvv59KlSyQmJgJk/4G7dOlSTCbTHdckIiJSGJQoUeK2ViO8HT/++CPVqlWjatWqxMbGZt9at24NwPr163Oc36JFixzhldls5ueff+aRRx7BbDbnuEb79u1JSEhg9+7dOa7Rv3//7PAKLO/nAKdPnwZgz549hIWF8fLLL98wl9e1vzni4uL4448/6NGjB1euXMlu89KlS7Rv354TJ04QHh6eKz8jEZG7pR5YIiL/0LBhw1yZxD04ODjHdsmSJQG4fPkyQPY3vTVr1rzntoDsYKxKlSo3HKtWrRqrVq0iOTkZV1fX26rR3d2dnj178tVXXzFo0CBGjx5NmzZt6NatG4899hg2Nvr+Q0REioakpCR8fX1z5VonTpzgyJEjlCpV6qbHo6Ojc2z//5WPY2JiiI+PZ+bMmcycOfO2rpEbf3OcPHkSs9nMuHHjGDdu3C3bDQwMvOU1RETymgIsEZF7YDQab7rf1tb2pvvvZjhiXvmvGp2dndm4cSPr169n2bJlrFy5koULF9K6dWtWr159y+eLiIgUFhcuXCAhIYGKFSvmyvVMJhO1atXiww8/vOnxoKCgHNv/HAp47flgGaLYt2/fm16jdu3aObZz42+Oa+2OGDGC9u3b3/Sc3PoZiYjcLQVYIiK3oWTJksTHx+fYl5GRQURExF1dr0KFCgAcPHjwX/8gvN3hhGXLlgUsc2n8f0ePHsXHxydH76vbZWNjQ5s2bWjTpg0ffvgh7733HmPHjmX9+vU3nS9MRESkMPnmm28Abhna3KkKFSqwb98+2rRpc1dTApQqVQo3NzeMRmOuvc/+82+OW12zfPnyANjb2+v9XUQKLI0BERG5DRUqVGDjxo059s2cOfOWPbD+y4MPPoibmxuTJk0iLS0tx7F/fmPq6upKQkLCf17P39+f0NBQ5s2blyNoO3jwIKtXr6Zjx453XGNcXNwN+0JDQwFuWApcRESksPnjjz94++23CQkJoVevXrlyzR49ehAeHs6sWbNuOJaamkpycvK/Pt/W1pbu3bvz888/c/DgwRuOx8TE3HFN9erVIyQkhOnTp9/wZdy1vzl8fX1p2bIlX3755U2/nLubdkVEcpt6YImI3IZBgwbx7LPP0r17d9q1a8e+fftYtWrVXa/O5+7uzkcffcSgQYO47777eOqppyhZsiT79u0jJSUle8XA+vXrs3DhQoYPH859991HiRIleOSRR256zWnTptGhQweaNGnCwIEDSU1N5dNPP8XDw4MJEybccY0TJ05k48aNdOrUibJlyxIdHc3//vc/ypQpk2NlJRERkYJuxYoVHD16lKysLKKiovjjjz9Ys2YNZcuW5ddff71hQZW71adPHxYtWsSzzz7L+vXradasGUajkaNHj7Jo0SJWrVr1n3NtTp48mfXr19OoUSMGDx5M9erViYuLY/fu3axdu/amXzD9GxsbG7744gseeeQRQkND6d+/P/7+/hw9epRDhw6xatUqwLKQTfPmzalVqxaDBw+mfPnyREVFsXXrVi5cuMC+ffvu+uciIpIbFGCJiNyGwYMHExYWxtdff83KlSu5//77WbNmDW3atLnraw4cOBBfX18mT57M22+/jb29PVWrVuWVV17JPmfo0KHs3buXOXPm8NFHH1G2bNlbBlht27Zl5cqVvPnmm4wfPx57e3tatGjBlClTbpgk9nZ07tyZM2fOMHv2bGJjY/Hx8aFFixa89dZbeHh43PXrFhERyW/jx48HwMHBAS8vL2rVqsX06dPp378/bm5uudaOjY0NS5Ys4aOPPmL+/PksXrwYFxcXypcvz7Bhw6hcufJ/XsPPz4/t27czceJEfvnlF/73v//h7e1NjRo1mDJlyl3V1b59e9avX89bb73FBx98gMlkokKFCgwePDj7nOrVq7Nz507eeust5s6dy6VLl/D19aVu3brZPz8REWsymAvSjMIiIiIiIiIiIiL/j+bAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBZmftAqToMZlMXLx4ETc3NwwGg7XLERERwWw2c+XKFQICArCx0fd390rv9SIiUtDovb7oU4Alue7ixYsEBQVZuwwREZEbnD9/njJlyli7jEJP7/UiIlJQ6b2+6FKAJbnOzc0NsPzicHd3t3I1IiIikJiYSFBQUPZ7lNwbvdeLiEhBo/f6ok8BluS6a0MJ3N3d9UetiIgUKBruljv0Xi8iIgWV3uuLLg0MFRERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEDTHFjFzMaNG5k2bRq7du0iIiKCxYsX06VLl+zjZrOZN998k1mzZhEfH0+zZs344osvqFSpUq7XYjQayczMzPXrStFib2+Pra2ttcsQERERERERK1KAVcwkJydTp04dBgwYQLdu3W44PnXqVD755BPmzZtHSEgI48aNo3379hw+fBgnJ6dcqcFsNhMZGUl8fHyuXE+KPk9PT0qXLq0JGUVERERERIopBVjFTIcOHejQocNNj5nNZqZPn84bb7zBo48+CsD8+fPx8/NjyZIlPPHEE7lSw7XwytfXFxcXF4UScktms5mUlBSio6MB8Pf3t3JFIiIiIiIiYg0KsCRbWFgYkZGRtG3bNnufh4cHjRo1YuvWrbcMsNLT00lPT8/eTkxMvGUbRqMxO7zy9vbOveKlyHJ2dgYgOjoaX19fDScUEREREREphjSJu2SLjIwEwM/PL8d+Pz+/7GM3M2nSJDw8PLJvQUFBtzz32pxXLi4uuVCxFBfX/r1ozjQREREREZHiSQGW3LMxY8aQkJCQfTt//vx/PkfDBuVO6N+LiIiIiIhI8aYAS7KVLl0agKioqBz7o6Kiso/djKOjI+7u7jlukjsmTJhAaGiotcsQEcl3ZrOZbdu2WbsMERERyUPpWUbikjOsXYYUEpoDS7KFhIRQunRp1q1blx2aJCYmsm3bNp577jnrFlcA9OvXj3nz5t2w/8SJE1SsWNEKFYmIFF0zZsxg6NChDB8+nA8++MDa5YiIiMgdMJvNXE7JJDIhjagraUQlpBGZmEZUYhqRCWnEJSRiSAzHNS2Cir4leOvlF6xdshQCCrCKmaSkJE6ePJm9HRYWxt69e/Hy8iI4OJiXX36Zd955h0qVKhESEsK4ceMICAigS5cu1iu6AHnooYeYM2dOjn2lSpW64+tkZGTg4OCQW2XdsczMTOzt7a3WvojIvzlx4gQjRowAIDg42MrViIiIyD+ZzWYuJWcQEZ9GeHwqEQmpXIxPJSLhWkCVSsaVWEoZowk0XCLQEEuAIZbKhku0uvq4lOHqwl8OcCyxPKAAS/6bAqxiZufOnbRq1Sp7e/jw4QD07duXuXPn8tprr5GcnMyQIUOIj4+nefPmrFy5EicnJ2uVXKA4OjredDjln3/+yciRI9m3bx9eXl707duXd955Bzs7y/9iLVu2pGbNmtjZ2fHtt99Sq1Yt3nzzTVq1asXatWsZNWoUhw8fJjQ0lDlz5lClSpUc1//yyy955513uHTpEg8//DCzZs3Cw8Mj+/hXX33FBx98QFhYGOXKleOll15i6NChAJw5c4aQkBAWLFjA//73P7Zt28aMGTPo3bs3w4cPZ/78+dja2jJo0CAiIyNJSEhgyZIlefdDFBH5F1lZWfTp04eUlBTatGnDiy++aO2SREREipWk9Cwi4lOvhlNpXIxP5WJ82tWQynJzz7pMkCH66i2G8oZY7jfEEmC4RIDhEi526f+ZNpjsXDB7BFHZv2b+vDAp9BRgFTMtW7bEbDbf8rjBYGDixIlMnDgxH6sq3MLDw+nYsSP9+vVj/vz5HD16lMGDB+Pk5MSECROyz5s3bx7PPfccmzdvBiAiIgKAsWPH8sEHH1CqVCmeffZZBgwYkH0OwMmTJ1m0aBG//fYbiYmJDBw4kKFDh/Ldd98B8N133zF+/Hg+++wz6taty549exg8eDCurq707ds3+zqjR4/mgw8+oG7dujg5OTFlyhS+++475syZQ7Vq1fj4449ZsmRJjoBTRCS/TZkyhW3btuHh4cGcOXOwsdF0nSIiIrnFZDITm5TO+cspnI+zhFQX46/3oLoYn0piWhbOpBFkiCHYEE2wIZqahmg6XH0cZBuDs91/z1tlLuGHwSMIPMpYbp7B1x97BGHjXBK0WJPcAQVYYnVms5nUTGO+t+tsb3vHq9v9/vvvlChRInu7Q4cOVK5cmaCgID777DMMBgNVq1bl4sWLjBo1ivHjx2d/+KpUqRJTp07Nfu61AOvdd9+lRYsWgCVk6tSpE2lpadm93tLS0pg/fz6BgYEAfPrpp3Tq1IkPPviA0qVL8+abb/LBBx/QrVs3wDKX2eHDh/nyyy9zBFgvv/xy9jnXrjNmzBi6du0KwGeffcby5cvv6OchIpKbdu/enR38f/rppwQFBVm3IBERkULm2txT5+NSOH85hQuXUzkfd/X+6nZGlgkw400iIYYIQmwiqXctnDJEE+QYfX2I363aMdhgcC8DJcuCZ1nwDIJ/hlUeZTDYOebPi5ZiQwGWWF1qppHq41fle7uHJ7bHxeHO/hdo1aoVX3zxRfa2q6srzz//PE2aNMkRhjVr1oykpCQuXLiQPX9L/fr1b3rN2rVrZz/29/cHIDo6Ovt5wcHB2eEVQJMmTTCZTBw7dgw3NzdOnTrFwIEDGTx4cPY5WVlZOYYYAjRo0CD7cUJCAlFRUTRs2DB7n62tLfXr18dkMt3+D0REJJekpaXRp08fsrKy6N69O71797Z2SSIiIgVScnoWZy+lXO1FZQmlLlztUXXhcgrJGdc7B7iTRHlDJOUMkdSzibQEVg6RhNhEUYKUf2/IydMSUJUsd/3madk2eASBnfXm9JXiSQGWyB1wdXW96xUHXV1db7r/n5OpXwvBbjdESkpKAmDWrFk0atQoxzFbW9vbal9EpCAYM2YMhw8fxs/PjxkzZtxxD1kREZGiJCE1k3OXUjhzKZmzl5I5cykl+z7mSnqOc51Ip4IhgkqGCNoZIgmxj6SyXRRlicTd/G89qQyW3lLeFaBkCHiF/COkKgvOJfP0NYrcKQVYYnXO9rYcntjeKu3mhmrVqvHzzz9jNpuzP3Bt3rwZNzc3ypQpc8/XP3fuHBcvXiQgIACAv//+GxsbG6pUqYKfnx8BAQGcPn2aXr163fY1PTw88PPzY8eOHTzwwAMAGI1Gdu/eTWho6D3XLCJyJ9auXcv06dMB+Prrr/Hx8bFuQSIiInns2lC/7IAqNoVzcdcCqxTikm+cY8qDJCoawmlte5Ga9hFUc4iknOkC3llRGPh/8xz/c9PNH7wqgHd58K549fHV0Mpei3VJ4aEAS6zOYDDc8VC+gmTo0KFMnz6dF198kRdeeIFjx47x5ptvMnz48FyZfNjJyYm+ffvy/vvvk5iYyEsvvUSPHj2yV0N86623eOmll/Dw8OChhx4iPT2dnTt3cvny5exVJm/mxRdfZNKkSVSsWJGqVavy6aefcvnyZfV6EJF8FRcXlz1f33PPPUenTp2sXJGIiEjuSc0wEhabzOnYJE5FW+5PxyRz5lIyV9KybvIMM35cpplNOKFO0dR2jKSC4SL+medwzbyU89R/ZlzOXuBT6Xo45V3B8tirPDiWQKQoKLypgUgBERgYyPLlyxk5ciR16tTBy8uLgQMH8sYbb+TK9StWrEi3bt3o2LEjcXFxPPzww/zvf//LPj5o0CBcXFyYNm0aI0eOxNXVlVq1avHyyy//63VHjRpFZGQkTz/9NLa2tgwZMoT27dvfMPRQRCSvmM1mnn32WS5evEiVKlV4//33rV2SiIjIHTObzUQlpnMqJonTMUmcikm++jiZ8PjUWz7PjRSaukVxn0skNWwvUNZ4hlIpp7DPvDrszwT8/6e7l4FSlcGnyj/uq4Crei9L0Wcwm83m/z5N5PYlJibi4eFBQkIC7u7uOY6lpaURFhZGSEhI9ip7UjCYTCaqVatGjx49ePvtt61dTg76dyNSNH3zzTc8/fTT2NnZsXXr1hyLTeS2f3tvkjunn6eIFEeZRhNnYpM5HpXEyeik7N5Up2OSckyc/v95O8H9Xpe5zzmSarYXKJMZRskrJ7BPCr/5Ewy2lp5TpaqAT+Xr9z6V1ZvqX+i9qehTDyyRYurs2bOsXr2aFi1akJ6ezmeffUZYWBhPPfWUtUsTkWLgzJkzPP/88wBMmDAhT8MrERGRO5FlNHHmUgonoq5wPCqJ49FXOBF1hbDYZDKNN+//YWtjINjLhZolTTR2vUh1wgjOOIFH4nFs405iiMu8eWPugeBXA3yrX7/3qQR2jnn4CkUKJwVYIsWUjY0Nc+fOZcSIEZjNZmrWrMnatWupVq2atUsTkSLOaDTSp08frly5QrNmzRg9erS1SxIRkWLIaDJz9pKlR9WJqCscj7bcn45JJsN481XBXR1sqejnRsVSJajpkUpt27OUyzhJycQj2ETuh/Nnb96Yo/vVkKr6P8KqalrpT+QOKMASKaaCgoLYvHmztcsQkWJo2rRpbNq0CTc3N7755hvNvSciInnuUlI6RyOvcCQikcMRiRyNuMLJmCQysm4eVDnb21LJrwSVfN2o7OtKLbcrVDadwjvxKIbI/XBuHyRF3rwxz7LgXwf8a0Pp2pbAyqMMaLEkkXuiAEtERETyze7duxk3bhwAn3zyCSEhIVauSEREipIso4nTsckciUjkSMSVq/eJRF9Jv+n5TvY2VPJ1o5JfCSr7uVHJtwRVPbLwTzqCzcXNEL4Ltu+ClNibPNtgmZfqWljlXwdK11KvKpE8ogBLRERE8kVKSgq9evUiKyuL7t2707dvX2uXJCIihVh8SgaHrwZVRyMSORKZyPGoW/eqKuftQtXS7lTzd6eavxtVS7tTxs0Gm+iDEL4NLuyEfbsg7tSNT7axtwz5868N/qGWsMqvBji45u2LFJFsCrBEREQkX4waNYqjR4/i7+/Pl19+iUFDKURE5DZdSkrnQHgCB8MTrt4nEh6fetNzXR1sqervTtXSblfDKstjV0c7SAiH83/D2W2weQdEHgDTTSZY96oAgfWhTAPLfelamlhdxMoUYImIiEieW7FiBZ999hkAc+fOxdvb28oViYhIQRVzJT07qDoQnsCh8AQuJqTd9NwgL2eqlXanqr871f0tgVVQSRdsbAxgMkL0ETi/Bnb+Dee2QcK5Gy/i4g2BDa6GVfUgoB64eOXxqxSRO6UAS0RERPJUbGwsAwYMAOCll17iwQcftHJFAtC1a1c2bNhAmzZt+Omnn6xdjogUU3HJGew9f5n9Fyy9qg6GJxCZePOwqryPKzUDPagV6EHNQA+qB7jj4Wx//YSMFLi4Gw5ttYRV57dDekLOixhsLBOrBzexBFZlGlgmXVevYJECTwGWiIiI5Bmz2cyQIUOIjIykevXqTJ482dolyVXDhg1jwIABzJs3z9qliEgxkZZp5NDFRPadj2fv1du5uJQbzjMYoEKpEtQMcM8OrKoHuOPmZJ/zxMxUOL0Zwv6CM39B+O4bhwPau0LQfZbAKqiRJbBydMvDVykieUUBlojctTNnzhASEsKePXsIDQ1lw4YNtGrVisuXL+Pp6Wnt8kSkAJg1axaLFy/G3t6e7777DmdnZ2uXJFe1bNmSDRs2WLsMESmizGYzYbHJ2UHV3vPxHIlIJNNovuHcCqVcqVPG0xJWlfGgur+7Zb6q/y8rHS7suB5YXdgBxoyc55QoDcGNLYFVcGPwqwm2+tgrUhTo/2SR29CvX7/sb6jt7Ozw8vKidu3aPPnkk/Tr1w8bG5vsc8uVK8fZs2cBcHZ2pkKFCgwbNoxBgwZln3Mt6LnG19eX5s2bM23aNMqXL3/H9U2YMIElS5awd+/eu3yFdycoKIiIiAh8fHxu+zkzZ87k+++/Z/fu3Vy5ckVhl0gRdvjwYV5++WUAJk2aRGhoqFXrKUw2btzItGnT2LVrFxERESxevJguXbrkOOfzzz9n2rRpREZGUqdOHT799FMaNmxonYJFpNhLTMtkz7l4dp29zN7z8ew7H09C6o2To/uUcCA0yJM6ZTwJDfakdhnPnMMA/8lkhIi9cOoPS2h1fhtk/b/hhW4BEHI/lLsfyjWHkuU0HFCkiFKAJXKbHnroIebMmYPRaCQqKoqVK1cybNgwfvrpJ3799Vfs7K7/7zRx4kQGDx5MSkoKP/74I4MHDyYwMJAOHTrkuOaxY8dwc3PjxIkTDBkyhEceeYT9+/dja2ub3y/vrtja2lK6dOk7ek5KSgoPPfQQDz30EGPGjMmjykTE2tLS0njqqadITU3lwQcf5JVXXrF2SYVKcnIyderUYcCAAXTr1u2G4wsXLmT48OHMmDGDRo0aMX36dNq3b8+xY8fw9fUFIDQ0lKysrBueu3r1agICAvL8NYhI0RYen8rOM3HsPHOZnWcvczQyEfP/61zlaGdDzUAPQoM8s29lSjr/+yq0iRctgdXJdXB6A6TG5Tzu6ns9sAp5ALzKK7ASKSYUYIncJkdHx+ywJjAwkHr16tG4cWPatGnD3Llzc/SwcnNzyz531KhRTJ06lTVr1twQYPn6+uLp6Ym/vz/jx4+nV69enDx5kipVqtzQ/oYNG3jttdc4dOgQ9vb21KhRg++//57169fz1ltvAWT/MTBnzhz69etHfHw8I0aMYOnSpaSnp9OgQQM++ugj6tSpA1zvufXqq68ybtw4Ll++TIcOHZg1axZubpa5AUwmE++//z4zZ87k/Pnz+Pn58cwzzzB27NgbhhDejmu9MTRsRaRoGz16NPv27aNUqVLMmzcvR09V+W8dOnS44T3jnz788EMGDx5M//79AZgxYwbLli1j9uzZjB49GiBXe+Wmp6eTnp6evZ2YmJhr1xaRgs9oMnMkIpFdZy+z40wcu85eJuImqwKW9XahfnBJ6pYtSd0gT6qUdsPe9j9+/2emwtktltDq1B8QfTjncUd3S1BVvqXl3qeyAiuRYkoBlsg9aN26NXXq1OGXX37JEWBdYzKZWLx4MZcvX8bBweFfr3VtXpiMjIwbjmVlZdGlSxcGDx7MDz/8QEZGBtu3b8dgMNCzZ08OHjzIypUrWbt2LQAeHh4APP744zg7O7NixQo8PDz48ssvadOmDcePH8fLy7I08KlTp1iyZAm///47ly9fpkePHkyePJl3330XgDFjxjBr1iw++ugjmjdvTkREBEePHr37H5qIFHnLly/n448/BmDu3Ll33FNT/l1GRga7du3K0YvVxsaGtm3bsnXr1jxpc9KkSdlflohI0ZeWaWT32ctsvxpW7T57meQMY45z7GwM1Ahwp0E5LxqULUn9ciXxdXO6vQYSwuH4Sji2wjKXVY5hgQYIrAcV2kDFNhBYH2xvMcRQRIoVBVhifWYzZN64+kies3fJlW9vqlatyv79+3PsGzVqFG+88Qbp6elkZWXh5eV104DrmoiICN5//30CAwNv2vsqMTGRhIQEHn74YSpUqABAtWrVso+XKFECOzu7HB8SN23axPbt24mOjsbR0RGA999/nyVLlvDTTz8xZMgQwBKyzZ07N7vHVZ8+fVi3bh3vvvsuV65c4eOPP+azzz6jb9++AFSoUIHmzZvfzY9KRIqBqKio7F5BL730Eh07drRyRUVPbGwsRqMRPz+/HPv9/Pzu6AuGtm3bsm/fPpKTkylTpgw//vgjTZo0uem5Y8aMYfjw4dnbiYmJBAUF3d0LEJEC51pg9ffpS/x9Oo695+PJMJpynOPmaEe9siVpULYkDcp5USfIAxeH2/w4aTZD5H5LYHVsOUTsy3ncLQAqtoYKraF8K3DxyqVXJiJFiQIssb7MFHjPCnNxvH4RHFzv+TJms/mGcfwjR46kX79+REREMHLkSIYOHUrFihVveG6ZMmUwm82kpKRQp04dfv7555v21PLy8qJfv360b9+edu3a0bZtW3r06IG/v/8t69q3bx9JSUl4e3vn2J+amsqpU6eyt8uVK5cdXgH4+/sTHR0NwJEjR0hPT6dNmza398MQkWLNZDLRt29foqOjqV27NlOmTLF2SfIvrvXavR2Ojo7ZX4aISOGXmmFk9zlLYLXtFoFVaXcnGpX3yu5hVdnPDVubO/jyNyvdMvH6seWW3laJ4f84aICghlD5IcvNt5qGBYrIf1KAJXKPjhw5QkhISI59Pj4+VKxYkYoVK/Ljjz9Sq1YtGjRoQPXq1XOc99dff+Hu7o6vr2+OEOlm5syZw0svvcTKlStZuHAhb7zxBmvWrKFx48Y3PT8pKQl/f/+bzjX1z1X/7O1zdsk2GAyYTJY/YLTcvYjciY8//phVq1bh5OTEDz/8gJPTbQ4lkTvi4+ODra0tUVFROfZHRUVpuKaI3FR6lpHdZ+PZciqWv09fYu/5eDKNOWdcL+3uRJMK3jQu70Xj8t4Ee7n8+2TrN5ORAifXwKElcGI1ZCRdP2bvYulhVaUDVGoPJUrd+wsTkWJFAZZYn72LpTeUNdq9R3/88QcHDhz419W1goKC6NmzJ2PGjGHp0qU5joWEhOQIk/5L3bp1qVu3LmPGjKFJkyZ8//33NG7cGAcHB4zGnPMS1KtXj8jISOzs7ChXrtydvKxslSpVwtnZmXXr1v3rEEgRkT179jBq1CgAPvrooxsCe8k9Dg4O1K9fn3Xr1tGlSxfA0vtt3bp1vPDCC9YtTkQKBLPZzNHIK2w+GctfJ2LZHhZHambOvxX9PZxoXP4eAyv4R2i1GI6vhszk68fc/C09rKp0tEzAbq8vNkTk7inAEuszGHJlKF9eS09PJzIyEqPRSFRUFCtXrmTSpEk8/PDDPP300//63GHDhlGzZk127txJgwYN7rjtsLAwZs6cSefOnQkICODYsWOcOHEiu91y5coRFhbG3r17KVOmDG5ubrRt25YmTZrQpUsXpk6dSuXKlbl48SLLli2ja9eut1WHk5MTo0aN4rXXXsPBwYFmzZoRExPDoUOHGDhw4B2/DoDIyEgiIyM5efIkAAcOHMDNzY3g4ODsieVFpHBJTk7mySefJDMzky5duvDMM89Yu6RCLykpKfv3JJD9O97Ly4vg4GCGDx9O3759adCgAQ0bNmT69OkkJydnzz8mIsVPREIqm07EsulkLJtPXiI2KT3HcZ8SjjSr6E3TCt73FlgBZKbBiVVXQ6tVOeez9QiGGo9C9a6Wydg1NFBEcokCLJHbtHLlSvz9/bGzs6NkyZLUqVOHTz75hL59+/7n8vDVq1fnwQcfZPz48SxfvvyO23ZxceHo0aPMmzePS5cu4e/vz/PPP5/9IbF79+788ssvtGrVivj4eObMmUO/fv1Yvnw5Y8eOpX///sTExFC6dGkeeOCBGyb+/Tfjxo3Dzs6O8ePHc/HiRfz9/Xn22Wfv+DVcM2PGjBwrWT3wwAMA2TWLSOHz8ssvc+zYMQICAvjqq6/u/gORZNu5cyetWrXK3r42gXrfvn2ZO3cuPXv2JCYmhvHjxxMZGUloaCgrV668o9/vIlK4pWRkseXkJTadjOWvEzGciknOcdzZ3pZG5b1oXtGH5pV8qOLndm+/n81mOPc37PvBMkQwPeH6MY9gqNHFcgtQaCUiecNgNpvN/32ayO1LTEzEw8ODhIQE3N3dcxxLS0sjLCyMkJAQzY0it03/bkQKrp9++onHH38cg8HAunXrcoQuBcm/vTfJndPPUyT/mc1mTscms+FYDBuORbPtdFyOiddtDFC7jGd2YFU32BNHO9t7b/jSKdi3APYvhPiz1/e7B0LNblCjq0IrKRD03lT0qQeWiIiI3JVz584xePBgAEaPHl1gwysRkcIqNcPI36cvseFYNOuPxXAuLiXH8TIlnWlZpRTNK5aiSXlvPFzsb3GlO5SWCAd+tPS2urDj+n6HElD9UajzBJRtDv8xCkFEJDcpwBIREZE7lpWVxVNPPUV8fDz33XdfjqHBIiJy985eSmb9UUtg9ffpS6RnXe9lZW9roFGINy2rlKJlFV8qlHLNvWHbZjNc3A0758DBn6/Pa2WwgQptLKFVlY7gcO8LIYmI3A0FWCIiInLHJkyYwObNm3Fzc+OHH37A3j6XvvUXESlmTCYz+y7Es/pwFGsOR3EyOinH8QAPJ1pW9aVVFV+aVvDG1TGXP8KlJcKBRbBrLkQeuL7fpwrUexpqPQ5uml9PRKxPAZaIiIjckbVr1/Lee+8BMGvWLCpUqGDlikRECpf0LCNbTl1i9aEo1h2JIvrK9RUD7WwM3FfOi5ZVStGqqi+VfEvkzeIYcadh20zY8y1kXLHss3W0TMRevz8EN9a8ViJSoCjAEhERkdsWFRVF7969MZvNDBkyhJ49e1q7JBGRQiEhJZP1x6JZfTiSP4/FkJxhzD5WwtGOllVK0a66Hy2r+OLhnEe9Ws1mCNsI22bAsRXA1fW8fCpDgwFQuye4eOVN2yIi90gBlliFFr+UO6F/LyIFg8lkonfv3kRFRVGzZk2mT59u7ZJERAq0qMQ0Vh6MZNWhSLaHxZFluv43jZ+7I+2q+9Guemkal/fKnRUDb8WYZZnXavPHEH3o+v6K7aDxs1C+tSZkF5ECTwGW5Ktrc6SkpKTg7Oxs5WqksEhJsUwiqjl2RKxr8uTJrF27FmdnZxYuXKjf4yIiNxGZkMaKgxEsPxDBzrOX+ef3cJX9SvBg9dK0q+5HrUAPbGzyeIheVrplJcFNH8HlM5Z99i4Q+hQ0fAZKVc7b9kVEcpECLMlXtra2eHp6Eh0dDYCLi0vejOmXIsFsNpOSkkJ0dDSenp7Y2ubhN5Mi8q82b97M+PHjAfj888+pXr26lSsSESk4IhPSWH7AElrtOpcztKoX7EmHmv60q+5HOR/X/CkoIwV2z4PNn8CVi5Z9Lt7QeCjcNxCcS+ZPHSIiuUgBluS70qVLA2SHWCL/xdPTM/vfjYjkv0uXLvHkk09iNBrp1asX/fr1s3ZJIiJWF5WYxrL913ta/VP9siXpWMufDjVLE+CZj71Vs9Jh5xz4631IjrHsc/OHpi9C/X7gkE8BmohIHlCAJfnOYDDg7++Pr68vmZmZ1i5HCjh7e3v1vBKxIrPZzIABAzh//jyVKlXiiy++UM9ZESm2EtMyWXkgkqX7wtly6lKOnlYNroVWtUrj75HPQ6xNRjjwI6x/F+LPWfZ5loXmL0NoL7BzzN96RETygAIssRpbW1sFEyIiBdwnn3zCr7/+ioODA4sWLcLNzc3aJYmI5Kv0LCPrj8awdG84645Gk5Flyj5Wv2xJHq7tT4ea/pT2cLJOgWF/wcrREHXQsl2iNLQcBXX7gK3mDxWRokMBloiIiNzUzp07GTlyJAAffvghoaGh1i1IRCSfmExmtoXFsXRvOMsPRJCYlpV9rJJvCbrUDaRznQCCvFysV2T8OVg9Dg4vsWw7eUDzVyyTsztYsS4RkTyiAEtERERukJiYyBNPPEFmZiZdu3Zl6NCh1i5JRCTPnYlN5sdd5/lldzgRCWnZ+0u7O/FoaACdQwOo7u9u3aHUWRmw+WPLPFdZaWCwgQYDoNVYcPGyXl0iInlMAZaIiIjkYDabeeaZZzh16hRly5bl66+/1rxXIlJkJadnsexABD/tvMD2M3HZ+92d7OhYy59HQwNpFOKFjU0B+D14cQ8seR6iD1m2yzaHDlOgdE3r1iUikg8UYImIiEgOM2fOZMGCBdjZ2bFgwQJKltRy6yJStJjNZnacucyPO8+z7EAEKRlGAGwM8EDlUjxeP4i21X1xtCsg87VmpsGGSbDlUzAbwcUbHpoCtR4DfcEgIsWEAiwRERHJtnv3bl566SUAJk2aROPGja1ckYhI7olMSOPn3Rf4ced5zlxKyd4f4uPK4w3K0K1uGetNxn4rMcfhx74QfdiyXbM7dJgKrj7WrUtEJJ8pwBIREREA4uPjeeyxx8jIyODRRx/l1VdftXZJIiL3zGQys+lkLN/+fZZ1R6MxmswAuDrY0qm2Pz0aBFG/bMmCOVR6/4/w2zDITAZXX3hkOlTtZO2qRESsQgGWiIiIYDab6d+/P2FhYZQrV445c+YUzA9zIiK36VJSOj/tusD3289x9h+9rRqW86LHfUF0qFkaV8cC+nEoMxVWjILd8yzb5e6H7l+Dm5916xIRsaIC+htbrMVoNDJhwgS+/fZbIiMjCQgIoF+/frzxxhv6ICMiUoR99NFHLFmyBAcHB3788UfNeyUihZLZbGbn2ct89/dZlh+IJMNoAsDN0Y7u9cvQq1EwlfzcrFzlf0iKhh+egPBdgAFavAYtRoFNAZmPS0TEShRgSQ5Tpkzhiy++YN68edSoUYOdO3fSv39/PDw8sudEERGRomXLli2MGjUKsARZDRo0sHJFIiJ3Jik9i192X+C7v89xLOpK9v5agR70bhzMI3UCcHEoBB99Yo7Dd90h/hw4ecJjs6FiG2tXJSJSIBSC3+KSn7Zs2cKjjz5Kp06WsfXlypXjhx9+YPv27VauTERE8kJMTAw9evQgKyuLJ554gueee87aJYmI3LbzcSnM3XKGRTvOcyU9CwAnexs61wmgd+Oy1C7jad0C70TkQZj/KKTEgld5eOpH8Klo7apERAoMBViSQ9OmTZk5cybHjx+ncuXK7Nu3j02bNvHhhx/e8jnp6emkp6dnbycmJuZHqSIico+MRiO9e/cmPDycKlWqMHPmTA0XF5ECz2w2sy0sjjmbw1hzOIqrc7JT3seVPk3K0q1eGTyc7a1b5J2K2A/zO0PqZShdG/os1iqDIiL/jwIsyWH06NEkJiZStWpVbG1tMRqNvPvuu/Tq1euWz5k0aRJvvfVWPlYpIiK54b333mP16tU4Ozvz008/4eZWwOeFEZFiLT3LyG/7Ipi9KYzDEde/MH2gcin6NytHi0qlsLEphCF8XBh8290SXpW5D3r9BM6e1q5KRKTAUYAlOSxatIjvvvuO77//nho1arB3715efvllAgIC6Nu3702fM2bMGIYPH569nZiYSFBQUH6VLCIid2HdunW8+eabAHzxxRfUrFnTyhWJiNxcQmom3/59ljmbzxCbZOn172RvQ7d6ZejftFzBn5T93yTHwrfdIDka/GpB75/BycPaVYmIFEgKsCSHkSNHMnr0aJ544gkAatWqxdmzZ5k0adItAyxHR0ccHR3zs0wREbkHFy9e5KmnnsJsNjNw4MBb/n4XEbGmyIQ0Zm8O4/tt50i6Or+Vv4cTTzcpx5MNg/B0cbByhffIZISfB0HcafAMht4/KbwSEfkXCrAkh5SUFGxsbHLss7W1xWQyWakiERHJTdcma4+OjqZ27dp8+umn1i5JRCSHk9FX+PLP0yzZG06m0TLBVRU/N55tWZ6Hawdgb2vzH1coJP76AE6vBztneGoRuJW2dkUiIgWaAizJ4ZFHHuHdd98lODiYGjVqsGfPHj788EMGDBhg7dJERCQXjB07lr/++gs3Nzd++uknnJ2drV2SiAgAe8/H87/1J1l9OCp7X8NyXjzXsgItq5QqWotMnN8OGyZZHj/8IfhWs249IiKFgAIsyeHTTz9l3LhxDB06lOjoaAICAnjmmWcYP368tUsTEZF79MsvvzB16lQAvv76aypVqmTlikREYNfZy3y87gQbj8dk72tX3Y9nW1SgftmSVqwsj5iMsHwkmE1QuyeEPmXtikRECgUFWJKDm5sb06dPZ/r06dYuRUREctGxY8fo168fAK+88gqPP/64dQsSkWJv55k4Pl53gr9OxAJga2Oga91Anm1Rnoq+hXhi9v+y51uI2AuO7vDgu9auRkSk0FCAJSIiUsQlJSXRrVs3rly5wv3338+UKVOsXZKIFGPbw+L4eN1xNp+8BICdjYHu9crwfKuKBHu7WLm6/yczFSL2Q8Q+iD8LSVFgzAQbW3DzB6/yUL4leFe4veulJcC6iZbHLUdDiVJ5VrqISFGjAEtERKQIM5vNDBo0iMOHD+Pv78+iRYuwt7e3dlkiUpRlJFuCnvQrkJECmcmQmcrJ6Cv8uvcix6KScMaG5rbONKpWju7NahLgHwBOBSS8SkuEQ7/Akd8h7E8wZvz3c0pVgxavQY2u8G9zde1bCCmx4F0RGg7JvZpFRIoBBVgiIiJF2PTp01m4cCF2dnb8+OOPlC6tVa5E5B6YTHDlIlw6CZdOWW6J4ZbAKikKkqIhI+mmT60IDAdw+MfOk1dvAE4eULIceJYFn8oQ1BDK3AcuXnn5iq5LuACbPoJ9C3K+BldfCKxv6WXlVhpsHcGUCQnhELkfzm2FmCPwU3/YNgN6fgslfG/exp5vLPf3DQZbfZkgInInFGCJiIgUURs3bmTkyJEAfPjhhzRr1szKFUlBk5KSQrVq1Xj88cd5//33rV2OFDQpcZaA5toQuugjEHcaslL/+7n2Lhgd3LiUaU9Mmi2pOGLGQKkSDvh7OuNoMFl6aKUnWno8ZSZbhtdF7LPc/smnMoS0gOqdIbgp2ObyR5iMZMuKgNtmgjH9ept1noCqD1se/1uvqtR42PYlbP4Yzm+DeZ2h7283Dg+M2Gf5edo6QO0eufsaRESKAQVYIiIiRdDFixfp0aMHRqORp556ihdeeMHaJUkB9O6779K4cWNrlyEFgTHTElSd3QwXtl+d8+nczc+1sbf0lPKuaOmV5BEEbn5QwnKLwZPPNkXw/fZzZBrNAHSq7c+r7SpTrlSJm18zIxkun7XMM3X5DEQetIRBl05A7HHLbccscA+E+v2hwQBw9b73133ub1jynCWYAyjb3DIUMOSBfw+t/snZE1qOglqPwdyHLb2xvukCg9aCvfP183Zf7X1VtVP+9SoTESlCFGCJiIgUMRkZGTz++ONERUVRs2ZNZs6cieF2P4hJsXHixAmOHj3KI488wsGDB61djuS3rAxLQHR2C5zbAue3Q2bKjeeVLAf+dSw3v1rgUxE8gm/aCyopPYuZf57iq007SMkwAnB/JR9ea1+VWmU8/r0eB1fwq265/VPyJTj/NxxbDkeXWYYrrn/H0tup2TBo8jw43OXcWbvnw28vg9loCcYe/ggqPXj7wdX/510B+v0Os9tD1EHY94MlaAPITIMDiyyP6/a5u+uLiBRzCrBERESKmJEjR7Jlyxbc3d355ZdfcHV1tXZJcoc2btzItGnT2LVrFxERESxevJguXbrkOOfzzz9n2rRpREZGUqdOHT799FMaNmx4222MGDGCadOmsWXLllyuXgqs+PNwcg2cWGuZnPz/z1Xl5AnBTSC4MQTWg9K1wLnkf17WZDLz0+4LTFt1jJgrliF4dYI8GdW+Ck0r+txbza7elh5LVTtBpw/h0BLY+ilEHrAEWfsXwmNfWwK2O7FhCmx4z/K4ZndLeOX0HyHb7fCuAPe/CitHw5ZPoV5fy4qFUYcsQyRdfCyrFoqIyB1TgCUiIlKEfP/993zyyScAfPPNN1SqVMnKFcndSE5Opk6dOgwYMIBu3brdcHzhwoUMHz6cGTNm0KhRI6ZPn0779u05duwYvr6WyaNDQ0PJysq64bmrV69mx44dVK5cmcqVKyvAKsrMZgjfDYeXwInVEHM053HXUpahcsFNoGwzKFUVbGzuqIntYXFM/P0QB8MTASjr7cLoh6ryUM3Sud/z084R6vSEWo9bVglcPc4yxPCrtvDIxxD61O1dZ+ec6+HVA69Bq9fvvtfVzdTtAxsmW4YlHv0dqj9qGRoJloDLxjb32hIRKUYUYImIiBQR+/fvZ9CgQQCMHTuWzp07W7kiuVsdOnSgQ4cOtzz+4YcfMnjwYPr37w/AjBkzWLZsGbNnz2b06NEA7N2795bP//vvv1mwYAE//vgjSUlJZGZm4u7uzvjx4296fnp6Ounp6dnbiYmJd/GqJF+YTBC+Ew4vtdwSzl8/ZrCBMg2hUluo2A5K177jwOqa83EpTF5xlGUHIgBwc7TjxTYV6du0HI52eRzQ2NhY5puq0BqWvgDHlsGSoZbXV+eJf39u2EZYPsLyuNVYy3xXuc2xBDQcDBunwabpUK3z9f8OHkG5356ISDGhAEtERKQIiI+Pp3v37qSmptKuXTveeusta5ckeSQjI4Ndu3YxZsyY7H02Nja0bduWrVu33tY1Jk2axKRJkwCYO3cuBw8evGV4de18/Zsq4KIOw97v4NBiyzxR1ziUgMrtLavpVWh1W0MC/01Sehb/W3+SrzaFkZFlwsYATzQMZni7yviUcLzHF3GHXLzgie9g2auw82vLZOxOHlDlFuFvehL8PAhMWVDzMXhgZN7V1vAZ+OtDuLgbrkRYhm8CeCrAEhG5WwqwRERECjmj0UivXr04efIkwcHBfP/999jaaohKURUbG4vRaMTPzy/Hfj8/P44ePXqLZ92bMWPGMHz48OztxMREgoL0QdzqUuLg4M+W4Orinuv7HdygykNQvQtUbJNzJby7ZDab+X1/BG//fpjoq/NcNSnvzfhHqlPN3/2er3/XDAbo+D4YM2DPN/DbMCjb9ObzWW39DJKioGQIPPpZ7g4b/P9KlIKSZS3DCGNPXO+B5Rmcd22KiBRxCrBEREQKuTfffJPly5fj5OTEL7/8go/PPU6aLMVKv379/vMcR0dHHB3zuXeN3JzZDGf+gp2zLavyGTMs+23soPJDUOdJqNgW7J1yrcnTMUm8+esh/joRC1jmuRrbsRrtqvsVjBVObWwsIdbZLRB3Cta9DZ3ez3lOUjRstswPSJvxuRLq/SfvSpYA69KJ6z2wPBRgiYjcLQVYIiIihdhPP/3Eu+++C8CsWbOoX7++lSuSvObj44OtrS1RUVE59kdFRVG6dGkrVSV5LiMFDiyCbV9C9OHr+/1qQmgvqN0DXHM3vE7LNPK/9SeZ8edpMowmHOxsGNqyAs+2qICTfQHr5WnvBA9/CPMfhR1fWSZ0D6x3/fjGaZCZDAH1oEbX/KnJpxKcWAWXTv2jB5Z6LoqI3C0FWCIiIoXUgQMHsnvPDB8+nN69e1u3IMkXDg4O1K9fn3Xr1tGlSxcATCYT69at44UXXrBucZL74s/Djlmwax6kxVv22btYJiuv1xf86+TJULg/j8cwbslBzsWlAPBA5VJM7FyDcj6uud5WrinfEmp0s6xQuHve9QDLZIIDP1oet34jb4cO/pN3Bct9+C5Iv7rwgUeZ/GlbRKQIUoAlIiJSCMXFxdGlSxeSk5Np06YNU6ZMsXZJkouSkpI4efJk9nZYWBh79+7Fy8uL4OBghg8fTt++fWnQoAENGzZk+vTpJCcnZ69KKEVAzDH46wM48BOYjZZ9nmWh4RCo2xucPfOk2cvJGby97DC/7LZMBF/a3Yk3H6nOQzVLF4zhgv+lzpOWAOvEWstwS4PB0mMt9TLYu0LIA/lXi3cly/2FnZZ7F29wKMABoIhIAacAS0REpJDJysriiSee4PTp05QrV46FCxdiZ6e39KJk586dtGrVKnv72gTqffv2Ze7cufTs2ZOYmBjGjx9PZGQkoaGhrFy58oaJ3aUQijwAG9+Hw0sBs2VfyAPQ6DnLaoI2eTN0z2w2s+JgJOOXHiQ2KQODAfo1LcerD1ahhGMh+v1SrjnYOkLiBYg5Cr7V4Oxmy7HgRmBrn3+1+FwNsK4FkB4aPigici8K0buRiIiIALz++uusWbMGFxcXlixZgre3t7VLklzWsmVLzGbzv57zwgsvaMhgURK+2zJP07Hl1/dVfRgeGAEBdfO06ejENMYtPciqQ5Z51Sr5lmBy99rUL1syT9vNEw4uEHI/nFwLJ9ZYAqwzmyzHyjbL31pK+FlWhMy4YtnW/FciIvdEAZaIiEgh8sMPPzBt2jQA5syZQ506daxckYjck9gTsG4iHPn16g6DZZLxB0aAX408bdpsNrN4Tzhv/nqIK2lZ2NkYGNqqIs+3qoCjXQGbpP1OVGxnCbBOroGmL1pWJwRL76z8ZDBY5sGK2GvZ1gqEIiL3RAGWiIhIIbFnzx4GDhwIwOjRo+nRo4eVKxKRu3YlEjZMht3zLUPMDDZQq4cluLo29CwPxSVn8PovB1h5KBKAOmU8mNy9NtX83fO87TxXqR2sHAVnt1omUE+JBTtnywqE+c2n0vUASz2wRETuiQIsERGRQiAmJoYuXbqQmppKhw4deOedd6xdkojcjax02Pq5ZZ6rzGTLvsodoM148KueLyWsOxLFqJ8PEJuUjp2NgVfaVeaZB8pjZ2uTL+3nOe8K4FUe4k7D0uct+4LuAzsHK9TyjzBSc2CJiNwTBVgiIiIFXGZmJj179uTcuXNUqlSJ77//HlvbQjy8R6S4Or4KVo62BCsAZe6DdhOhbNN8aT4pPYt3fj/Mgh3nActcVx/1DKVmoEe+tJ+vGj0HK0ZaJnIHKJvPwwev8a5w/bF6YImI3BMFWCIiIgXciBEjWL9+PSVKlGDJkiV4enpauyQRuRPx52H5CDi+0rJdorQluKrdwzJPUj7Yez6el37Yw7m4FAwGGNgshBHtq+BkX0TD8EZDLHOIrR4Ll05BzW7WqeOfw0E9NQeWiMi9UIAlIiJSgH311Vd88sknAHzzzTdUr54/Q4xEJBeYjLDjK8sk7RlJYGMPTZ63zHPl6JY/JZjMfL0pjCkrj5JlMhPo6cz7j9ehSYVisHppuWYwZAOYzfkWFN6gVFXwLAsuXuDkaZ0aRESKCAVYIiIiBdTGjRsZOnQoAG+99RZdunSxbkEicvuij8KvL8CFHZbtoMbQ+RMoVSXfSohLzuDVRXtZfywGgE61/HmvWy08nO3zrYYCwVrhFYCdI7ywE2xsrVuHiEgRoABLRESkAAoLC6N79+7Z81+NGzfO2iXljvQrkHgREi5Y7lMuQVo8pMZDWoJlgmtTJhgzwJgJNbpCw8HWrlrk9plMsG0GrJ0AxnRwcIN2b0H9/mCTf5Ok/336EsMW7CEqMR0HOxvefKQ6TzUMxqAQJf9ZY/J4EZEiSAGWiIhIAXPlyhU6d+5MbGws9evXZ/bs2YXvQ2dSNETsg6hDEHsCLp2A2OOQevnOruNfJ2/qE8kLCeGw5DkI+9OyXelBeORjcA/ItxJMJjOfrT/J9LXHMZmhQilXPnuqHtX83fOtBhERkbygAEtERKQAMRqNPPXUUxw8eBB/f3+WLl2Ki4uLtcv6d1npEL4bzm2B8zsgYi9cibj1+Y4e4BFo+VDvWsoyL4yzJzi6g70z2DqArb3l5l0xn16EyD06/KtlyGBaAti7wIPvQIMB+TpsLCE1k+EL97LuaDQAj9Uvw8RHa+DioD/5RUSk8NO7mYiISAEyduxYfv/9d5ycnFiyZAmBgYHWLulGJhNE7oMTa+H0BsscP8b0/3eSAXwqQ+ma4FMFfCpatkuWy7fJq0XyhTET1rwJf39u2Q6oB91mWf7N56PjUVd45ptdhMUm42Bnw7tdavJ4g6B8rUFERCQvKcASEREpIObPn8+UKVMAmD17Ng0bNrRyRf+QkQIn18LRZZb7lNicx11LQdmmlomqA+uBX01wLGGdWkXyS0I4/NQfzm+zbDd9CdqMt/QezEfLD0Qw4sd9pGQYCfR0Zkbv+tQq45GvNYiIiOQ1BVgiIiIFwNatWxk82DJZ+dixY3nyySetXBGW0OrYcji8xNLbKiv1+jGHElC+JVRsA+UeAO8KWmFLipewv+DHvpaFCBw9oOsXULVTvpZgNJmZtuoYM/48BUDTCt58+mRdvEs45msdIiIi+UEBloiIiJWdO3eOLl26kJGRQdeuXZk4caL1ijGb4dzfsPc7OLQEMq5cP+YZDNU6Q5UOUKahVtaS4mv3fPj9FTBlQena0GM+eIXkawlX0jJ54fs9/Hk8BoAhD5TntfZVsLPNv5UORURE8pMCLBEREStKTk7m0UcfJTo6mjp16jB//nxsbKzwATT+POz7AfZ+D5fDru/3LAu1HrMEV/511MtKijeTEda+CVs+tWzX6AZd/mdZfCAfhcenMnDuDo5GXsHZ3papj9XmkTr5t9KhiIiINSjAEhERsRKTycTTTz/N3r178fX1ZenSpZQokY/zRpnNcHYz/P2FZaig2WTZ71ACqneB0KcguAlYI1ArRNLT03F01JCtIi8jBX4eBMeWWbZbjIaWo/M91N1/IZ6B83YScyWdUm6OzO57n+a7EhGRYkEBloiIiJW8+eab/PLLLzg4OLB48WLKli2bPw1npsKBH2HblxB18Pr+cvdD3d5Q7RFwcM2fWgqhFStWsGDBAv766y/Onz+PyWTC1dWVunXr8uCDD9K/f38CAtQbpkhJjYfve8L5v8HW0dLrqtZj+V7GqkORDFuwh7RME1VLu/F1v/sI9Mzf3l8iIiLWogBLRETECubNm8c777wDwMyZM2natGneN5oSB9tmwPZZkBpn2WfvAnWegIbPgG/VvK8By8TTiamZJKRmEn/1/totI8tEltFElslMptFE3eCStKhcKl/q+i+LFy9m1KhRXLlyhY4dOzJq1CgCAgJwdnYmLi6OgwcPsnbtWt5++2369evH22+/TalSBaN2uQdJ0fBNN4g6AE4e8NQiCG6cryWYzWa++iuM91YcwWyGFpVL8dlTdXFzyt/VDkVERKxJAZaIiEg+27BhQ/aKg6+//jp9+/bN2waTomHrZ7Dja8hIsuzzCIaGg6FeH3AumetNpmYYOR51hdOxSVyIS+XC5VTOX07hwuVULsankmUy39Z1BjYPKTAB1tSpU/noo4/o0KHDTecp69GjBwDh4eF8+umnfPvtt7zyyiv5XabkpvhzML8LxJ0CV1/osxhK18zXEkwmMxN/P8zcLWcA6NO4LG8+Ul2TtYuISLGjAEtERCQfHTt2jG7dupGZmUnPnj15++23866xxIuw+RPYNReyUi37SteC+0dYhgna2OZKM7FJ6ew7H8+RiESORFzhSGQiZ2KT+a+MytXBFg9nezxcHPBwtsPdyR4ne1vsbA3Y29hgZ2ugftncD9fu1tatW2/rvMDAQCZPnpzH1Uieu3wG5nSExHBL4Pv0EvCukK8lZBpNvPbTfhbvCQfgjU7VGNg8BIMWUxARkWJIAZaIiEg+iY2NpVOnTly+fJnGjRszZ86cvFlxMDkWNk6DnbPBmGHZF1gfHngNKre/p0mnzWYzFy6nsj0sjh1n4th+Jo7TMck3PdenhAOVfN0I8nKmTEkXypS8fl/KzRH7ItCDJCMjg7CwMCpUqICdnf6sKjISwmHeI5bwyqcy9FkCHoH5WkJappEXvt/N2iPR2NoY+ODxOnSpm781iIiIFCT6S0tERCQfpKen07VrV06dOkVISAhLly7F2TmXJ19OT4Ktn8OWT64PFSzbDB4YAeVb3XVwlZSexaYTsaw/Gs3GEzFEJKTdcE4l3xLUDPSgamk3qvm7U83fnVJuRXdlvpSUFF588UXmzZsHwPHjxylfvjwvvvgigYGBjB492soVyl1Liob5nS3DB73KQ9/fwK10vpZwJS2TQfN2si0sDkc7G/7Xqx5tqvnlaw0iIiIFjQIsERGRPGY2mxkwYACbNm3Cw8ODZcuW4evrm3sNZGXA7nnw5xRIjrHs8w+FthOgQqu7uuSZ2GTWHY1m/dFotoVdItN4fTygnY2BWmU8aFjOi/vKedGgXEk8XRzu/XUUImPGjGHfvn1s2LCBhx56KHt/27ZtmTBhggKswiolDuY/CpdOgkcQPP1rvodXl5LS6TtnOwfDE3FztOOrvg1oVN47X2sQEREpiBRgiYiI5LG33nqL77//Hjs7O37++WeqVauWOxc2m+HYclg1Fi6HWfZ5lYfW46B6F7jD4Ynh8an8vu8iv+2/yMHwxBzHynq70KqKL62q+tKwnBfODrkzf1ZhtWTJEhYuXEjjxo1zzEdUo0YNTp06ZcXK5K5lJMO33SD6MJQoDU8vBc+gfC0hIiGVXl9t43RMMt6uDswb0JCagR75WoOIiEhBpQBLREQkD3377be89dZbAHzxxRe0adMmdy4ccwxWjoZTf1i2XX2h5Sio1xds7W/7MrFJ6SzbH8Fv+y6y8+zl7P22NgYal/eiVRVfWlf1pXypErlTdxERExNz0150ycnJmmC7MDKZ4JchcHEPOHtZwqt8nrA9IiGVJ2f+zZlLKQR6OvPNwIb6/05EROQfFGCJiIjkkb/++ouBAwcC8NprrzFo0KB7v2haAvw5FbbNAFMW2DpA0xeh+XBwvL0Pu0aTmb9OxLBg+3nWHoki6+pygQYDNArx4pE6AXSo6Y+Xa/EaFngnGjRowLJly3jxxRcBskOrr776iiZNmlizNLkba9+Eo79b/n968gfwrZqvzf8zvArycuaHwY0pU9IlX2sQEREp6BRgiYiI5IETJ07QpUsXMjIy6N69O5MmTbq3C5rNsPd7ywfta/NcVekI7d+1DBu8DREJqfy48wILd5wnPD41e3+dMh50Dg3k4dr++Lk73VudxcR7771Hhw4dOHz4MFlZWXz88cccPnyYLVu28Oeff1q7PLkTu+ZZFj4AePR/ENw4X5tXeCUiInJ7FGCJiIjkskuXLtGpUyfi4uJo2LAh8+fPx+YO56PKIfYE/PYynN1k2fauBA9Nhkpt//OpZrOZnWcv89Vfp1lzOIqrna1wd7KjW70yPNEwiKql3e++tmKqefPm7N27l8mTJ1OrVi1Wr15NvXr12Lp1K7Vq1bJ2eXK7Tv8Jy4ZbHrccA7Ufz9fmoxPTFF6JiIjcJgVYcoPw8HBGjRrFihUrSElJoWLFisyZM4cGDRpYuzQRkQIvNTWVzp07c+LECYKDg1m6dCkuLnf5gTQrHTZ9BH99AMYMsHeBlqOh0XNg9+/D+zKNJpYfiODrTWHsv5CQvb9hiBdPNgyiQ01/nOyL90Ts96pChQrMmjXL2mXI3UoIhx/7WYbi1nocWozK1+YvJ2fQ++ttnLmUQpmSCq9ERET+iwIsyeHy5cs0a9aMVq1asWLFCkqVKsWJEycoWbKktUsTESnwjEYjvXr1YsuWLXh6erJixQpKly59dxc7uwV+Gwaxxy3bFdtBpw+gZNl/fdqVtEy+33aOuVvOEJGQBoCDnQ3d6wXSv1kIlf3c7q4eyWH58uXY2trSvn37HPtXrVqFyWSiQ4cOVqpMbosxC34eBKlx4F8HOn9mmQQunySlZ9FvznaORyXh5+6o8EpEROQ2KMCSHKZMmUJQUBBz5szJ3hcSEmLFikRECgez2cwrr7zC4sWLcXBwYOnSpVSvXv3OL5SWCGvGw66rv4ddfaHDZKjR7V8/YCekZDJ7cxhzNoeRmJYFgE8JR55uUpZejYLxLuF4Ny9LbmH06NFMnjz5hv1ms5nRo0crwCroNkyCc1vAwQ0emwP2+Tf3W1qmkUHzdrDvQgIlXez5dmAjgrwUXomIiPwXBViSw6+//kr79u15/PHH+fPPPwkMDGTo0KEMHjzY2qWJiBRoH374IZ9++ikA8+fP54EHHrjzi5xaD7++CAnnLdv1+kK7t8D51r1gLyWl8/WmMOZvPUtSuiW4quhbgmceKE/n0AAc7TRMMC+cOHHipgFl1apVOXnypBUqujPHjh2jZ8+eObZ/+OEHunTpYr2i8supPyzDcgE6fwzeFfKt6SyjiRe+383fp+Mo4WjH/AGNqKRekSIiIrdFAZbkcPr0ab744guGDx/O66+/zo4dO3jppZdwcHCgb9++N31Oeno66enp2duJiYn5Va6ISIGwYMECRowYAcD777+fIxi4LelJll5XO7+2bHuWhUc/h5D7b/mUuOQMZvx5im+2niU10whA1dJuvNSmEg/VKI2NTf4NhyqOPDw8OH36NOXKlcux/+TJk7i6ulqnqDtQpUoV9u7dC0BSUhLlypWjXbt21i0qP1yJhF+GAGao3x9qds+3ps1mM+OWHmTtkWgc7Wz4um8DapXxyLf2RURECjsFWJKDyWSiQYMGvPfeewDUrVuXgwcPMmPGjFsGWJMmTeKtt97KzzJFRAqMP//8M/v340svvcTw4cPv7AJhG2Hp8xB/zrJ93yBo+xY4lrjp6cnpWXz1Vxiz/jqd3eOqVqAHL7auSNtqfgqu8smjjz7Kyy+/zOLFi6lQwdKD5+TJk7z66qt07tzZytXdmV9//ZU2bdoUiuDtnpjNsGQoJMeAX014aFK+Nv/5+pP8sP08Ngb49Mm6NCrvna/ti4iIFHb3sKa3FEX+/v43DImoVq0a586du+VzxowZQ0JCQvbt/PnzeV2miEiBcOjQIbp06UJGRgbdunXjww8/xHC7E0FnpsHKMTDvEUt45REETy+1TNR+k/AqPcvI3M1hPDB1PR+tPU5SehY1AtyZ0+8+fn2hGQ+q11W+mjp1Kq6urlStWpWQkBBCQkKoVq0a3t7evP/++/d8/Y0bN/LII48QEBCAwWBgyZIlN5zz+eefU65cOZycnGjUqBHbt2+/q7YWLVp0570GC6M938CpdWDnBI/NBnvnfGv6510XeH+1ZUGGCZ1r8GCNu1zcQUREpBhTDyzJoVmzZhw7dizHvuPHj1O27K1XvXJ0dMTRUZMDi0jxcvHiRTp06EB8fDxNmzbl22+/xdb2NuebijpsWQEt+pBlu15fePAdcHK/4VSTycyv+y7y/upjXLicCkA5bxdGtK9Cx5r+Cq2sxMPDgy1btrBmzRr27duHs7MztWvXvru5z24iOTmZOnXqMGDAALp163bD8YULFzJ8+HBmzJhBo0aNmD59Ou3bt+fYsWP4+voCEBoaSlZW1g3PXb16NQEBAYBl2P+WLVtYsGBBrtRdYCVcgFVjLY9bvwGlquRb05tOxDLq5/0APNOiPE83KZdvbYuIiBQlBrPZbLZ2EVJw7Nixg6ZNm/LWW2/Ro0cPtm/fzuDBg5k5cya9evW6rWskJibi4eFBQkIC7u43fhgTESnsEhMTeeCBB9i3bx+VK1dmy5YteHvfxnAgsxm2z4LVb4AxHVx8oMv/oHL7m56+6+xlJv5+mH3n4wHwdXNkWNtK9GgQhL2tOlHficL83mQwGFi8eHGOCdYbNWrEfffdx2effQZYpgAICgrixRdfZPTo0bd97W+++YZVq1bx7bff3lFNhernaTbDD0/A8ZVQpiEMWAk2+bO4wamYJLp8vpkraVl0rhPA9J6hCp1FRPJIoXpvkruiHliSw3333cfixYsZM2YMEydOJCQkhOnTp992eCUiUtRlZGTQvXt39u3bh6+vLytXrry98CopBpYOhROrLdsV21nCqxK+N5x6MT6VKSuPsnTvRQBcHWwZ2qoiA5qF4OygVQULinXr1rFu3Tqio6MxmUw5js2ePTvP2s3IyGDXrl2MGTMme5+NjQ1t27Zl69atd3StRYsWMWTIkP88r1Av2HJ0mSW8srGHRz/Lt/AqITWTwfN2ciUtiwZlSzLt8doKr0RERO6BAiy5wcMPP8zDDz9s7TJERAock8nE4MGDWbt2La6urixbtoyQkJD/fuKJNbDkOcvk0baO0G4iNHoG/t98WSkZWcz48zQzN54iLdOEwQCP1y/DiPZV8HVzyqNXJXfjrbfeYuLEiTRo0AB/f//bn/ssF8TGxmI0GvHz88ux38/Pj6NHj972dRISEti+fTs///zzf55baBdsSU+CFa9ZHjd7Kd+GDhpNZl78YQ+nY5MJ8HDii971cbRT+CwiInIvFGCJiIjcplGjRjF//nxsbW1ZtGgRDRo0+PcnZKbBmvGw/UvLtm916P4V+NXIcZrZbGb14Sgm/naY8HjLPFcNy3kx/pHq1Az0yIuXIvdoxowZzJ07lz59+li7lLvm4eFBVFTUbZ07ZsyYHCtsJiYmEhQUlFel5Z6N0yAxHDzLwv0j8q3ZySuOsPF4DM72tszq24BSbporVERE5F4pwBIREbkN06ZNy15d7uuvv6Zjx47//oS40/BjP4jYZ9lu9Cy0nXDDymfnLqUw4bdD/HE0GoBAT2de71iNjrVK52uvHrkzGRkZNG3a1Cpt+/j4YGtre0P4FBUVRenSebO6XaFcsOXyGfj7f5bHHaaAg0u+NPvzrgvM+isMgPcfr0ONAIXQIiIiuUEzwIqIiPyHefPm8dprlmFI06ZNo2/fvv/+hMO/wpctLOGVsxc89aPlA/Q/wqv0LCOfrjtBu4/+5I+j0djbGni+VQXWDm9Bp9r5OyRN7tygQYP4/vvvrdK2g4MD9evXZ926ddn7TCYT69ato0mTJlapqUBaOwGMGRDSAio/lC9NHo1MZOySAwC81LoinWr750u7IiIixYF6YImIiPyL33//nYEDBwIwYsQIRoz4l2FIWRmWIYPbvrBsBzWCx2aDR5kcp205FcvYxQcJi00GoGkFbyY+WpOKviXy5DVI7ktLS2PmzJmsXbuW2rVrY29vn+P4hx9+eE/XT0pK4uTJk9nbYWFh7N27Fy8vL4KDgxk+fDh9+/alQYMGNGzYkOnTp5OcnEz//v3vqd0i49w2OLQYDDbQ/r0b5pvLC1fSMhn67W7SMk20qFyKl9tWzvM2RUREihMFWCIiIrewefNmevTogdFo5Omnn2bKlCm3Pjn+nGXIYPguy3bTl6DNeLC9HmwkpGYyecURfth+HoBSbo6Me7g6j6jHVaGzf/9+QkNDATh48GCOY7nx33Lnzp20atUqe/va/FN9+/Zl7ty59OzZk5iYGMaPH09kZCShoaGsXLnyhondiyWzGdZNtDwO7QWla+ZDk2ZG/3KA07HJ+Hs48VHPUK04KCIikssMZrPZbO0ipGhJTEzEw8ODhIQE3N3drV2OiMhdOXjwIPfffz/x8fF06tSJxYsX39DLJtuxFbD4WUiLBydP6DoDqnTIccqaw1G8seQAUYnpAPRuHMxrD1XF3ekW15Rcpfem3FWgf56nN8D8R8HWAV7ac0MPyLwwf+sZxi89hJ2NgYXPNKF+2ZJ53qaIiORUoN+bJFeoB5aIiMj/c/bsWdq3b098fDxNmzZl0aJFNw+vjFnwx0TY/LFlO7A+PDYHSpbNPiU2KZ0Jvx7i9/0RAIT4uDK5Wy0alffOj5cieezkyZOcOnWKBx54AGdnZ8xms3rTWZPZDH+8Y3ncYEC+hFf7L8Tz9u+HARjdoarCKxERkTyiAEtEROQfYmJiePDBB7l48SI1atTgt99+w8XlJquXJV+Cn/pD2J+W7UbPQbuJYOcAWIYULd4TzsTfDxOfkomtjYEhD5RnWJtKONnb5uMrkrxw6dIlevTowfr16zEYDJw4cYLy5cszcOBASpYsyQcffGDtEounk+vgwg6wc4bmw/O8uZSMLIYt2Eum0cxDNUozsHlInrcpIiJSXGkVQhERkauSkpLo1KkTx48fJzg4mFWrVuHl5XXjiRf3wswWlvDK3hUenwsdJmeHV5EJafSfu4Phi/YRn5JJdX93lj7fjFEPVVV4VUS88sor2Nvbc+7cuRwBZ8+ePVm5cqUVKyvmNn1kuW8wANzyfj6wd5YdIezqvFdTutdW7zsREZE8pB5YIiIiQEZGBt26dWPHjh34+PiwevVqAgMDbzxx7w/w+8uQlQZe5aHnd+BXHbD0uvp130XGLTlIYloWDnY2DGtTiSEPlMfeVt8ZFSWrV69m1apVlCmTc4hapUqVOHv2rJWqKuYu7ISzm8DGHpo8n+fNrT0cxffbzgHwweN18HDRfHYiIiJ5SQGWiIgUe1lZWfTq1Ys1a9bg6urK8uXLqVKlSs6TjJmw6nXYPtOyXak9dJsJzp4AXE7O4I2lB1l2da6rOmU8+KBHKBV9S+TjK5H8kpycfNOhpXFxcTg6OlqhIsnufVW7B3jcJHzORTFX0hn1834ABjUPoWlFnzxtT0RERDSEUEREijmTycSQIUP46aefcHBwYPHixdx33305T7oSBfMeuR5etRgNTy7IDq/+OBrFg9M3smx/BHY2Bl5pW5mfn2uq8KoIu//++5k/f372tsFgwGQyMXXqVFq1amXFyoqp2JNwdJnlcbNhedqU2Wxm9M/7uZScQdXSboxoX+W/nyQiIiL3TD2wRESk2DKbzbzyyivMmTMHW1tbFixYQLt27XKedH4HLOoDVyLA0R26fglVOwKQlJ7Fu8sO88P28wBU9C3BRz1CqVXGI79fiuSzqVOn0qZNG3bu3ElGRgavvfYahw4dIi4ujs2bN1u7vOJn59eA2dIzslTeBko/7rzAuqPRONjaMP2JUM1rJyIikk8UYImISLH15ptv8sknnwAwe/ZsunbtmvOEnXNg+UgwZYJPFXjiO/CpBMCOM3EMX7SX83GpGAwwsFkII9pX0YfZYqJmzZocP36czz77DDc3N5KSkujWrRvPP/88/v7+1i6veMlIhj3fWR43HJKnTUUnpvHOssMAvPpgZaqWds/T9kREROQ6BVgiIlIsvf/++7z99tsAfPbZZzz99NPXDxozLcHVrjmW7Wqdocv/wNGNLKOJT/44yWd/nMBkhkBPZ95/vA5NKnhb4VWINWRmZvLQQw8xY8YMxo4da+1y5ODPkJ4AJctBhdZ52tSE3w6RmJZFrUAPBjYPydO2REREJCcFWIXAkSNHWLBgAX/99Rdnz54lJSWFUqVKUbduXdq3b0/37t01YayIyB2YOXMmI0eOBOC9997j+ef/sWJZShwsehrO/AUYoM04aD4cDAbOx6Xw8sK97Dp7GYDu9cowoXN13Jy0+lhxYm9vz/79+61dhgCYzbB9luVxg4Fgk3fTu646FMnyA5HY2hiY3L0WdlpZVEREJF/pnbcA2717N23btqVu3bps2rSJRo0a8fLLL/P222/Tu3dvzGYzY8eOJSAggClTppCenm7tkkVECrwffviBZ599FoDRo0czZsyY6wejj8CsVpbwysHNMlH7/a+CwcCv+y7S8eO/2HX2Mm6OdnzyZF0+6FFH4VUx1bt3b77++mtrlyEX90DkfrB1hLq986yZhNRMxi05CMAzD5SnRoDmuRMREclv6oFVgHXv3p2RI0fy008/4enpecvztm7dyscff8wHH3zA66+/nn8FiogUMr/99ht9+vTBbDYzdOhQ3nvvvesHj6+CnwZCxhXwLAtPLQTfaiSlZzHh10P8tOsCAPWCPfn4iboEeblY6VVIQZCVlcXs2bNZu3Yt9evXx9XVNcfxDz/80EqVFTP7Fljuqz0MLl551szkFUeJvpJOiI8rL7WplGftiIiIyK0pwCrAjh8/jr39f3+z36RJE5o0aUJmZmY+VCUiUjj98ccfPP744xiNRvr06cOnn36KwWCwDEHa8imsGQ+YoWxz6DEfXL3ZfyGel37Yw5lLKdgY4IXWlXipdUUNHRIOHjxIvXr1AMv79T8ZDAZrlFT8GDPh4E+Wx3WezLNmdp2N44ft5wCY3K2WFmoQERGxEgVYBdjthFcAKSkpuLi43Pb5IiLFzd9//03nzp1JT0+nS5cuzJ49GxsbG8hKh99ehn3fW06s1xc6vo/Z1p6vNp5mysqjZJnMBHg4Mf2JujQMybseHlK4rF+/3tolyMm1kHIJXH2hfKs8acJoMjPhV8uqgz0bBNGovBZrEBERsRZ9hVxItGnThvDw8Bv2b9++ndDQ0PwvSESkkNizZw8dOnQgOTmZdu3asWDBAuzs7CApGuY9YgmvDDbQYSo88jHxGTB4/k7eXX6ELJOZjrVKs2LYAwqv5KZOnjzJqlWrSE1NBcBsNlu5omLk2vDBWo+Dbd58J/vjzvMcCE/AzcmOkQ9VyZM2RERE5PYowCoknJycqF27NgsXLgTAZDIxYcIEmjdvTseOHa1cnYhIwbR//37atm1LfHw8zZo1Y/HixZZVWyMPwKzWcH4bOHpAr5+g0TPsPh9Pp082sfZINA52NrzbtSafP1UPDxf1cJWcLl26RJs2bahcuTIdO3YkIiICgIEDB/Lqq69aubpiIC0Bjq2wPK7zRJ40kZCaybRVxwAY1qYSPiW04rOIiIg1aQhhIbFs2TI+//xzBgwYwNKlSzlz5gxnz57l999/58EHH7R2eSIiBc7hw4dp27YtcXFxNGrUiOXLl1sm2j7yG/wyBDJTwLsiPLkAs3fFHEMGy3m78HmvelppTG7plVdewd7ennPnzlGtWrXs/T179mT48OF88MEHVqyuGDi+Gozp4FMZStfKkyY+WXeCS8kZVPQtQd+m5fKkDREREbl9CrAKkeeff54LFy4wZcoU7Ozs2LBhA02bNrV2WSIiBc6xY8do3bo1MTEx1KtXj5UrV+Lu5gYbp8Ef71hOKt8KHp9DvNmVEfN3svZINAAP1/ZnUrdauDmp15Xc2urVq1m1ahVlypTJsb9SpUqcPXvWSlUVI0d/s9xXfRjyYNL8k9FXmLflDADjH66OvRZuEBERsTq9GxcSly9fpnv37nzxxRd8+eWX9OjRgwcffJD//e9/1i5NRKRAOXnyJK1btyYqKoo6deqwZs0aPEs4w+Jnr4dXjZ6FXj+xO4brQwZtbXi7S00+fbKuwiv5T8nJybi4uNywPy4uzjJMVfJOZhqcWGt5XO3hXL+82Wzmrd8Ok2Uy07aaHw9ULpXrbYiIiMidU4BVSNSsWZOoqCj27NnD4MGD+fbbb/n6668ZN24cnTp1snZ5IiIFQlhYGK1bt+bixYvUqFGDNWvW4OUEfNMV9i8Agy10+hDzQ5P5ass5eszYSnh8KmW9XfhlaFP6NC6LIQ96c0jRc//99zN//vzsbYPBgMlkYurUqbRqlTcr4slVpzdAZjK4B0JAvVy//OaTl/jrRCwOtjaMe7jafz9BRERE8oWGEBYSzz77LGPHjrUs+35Vz549adasGf3797diZSIiBcO5c+do3bo158+fp2rVqqxbt45SNonw1eMQdwoc3KDHXBICWzDim12sORwFQKda/kzuriGDcmemTp1KmzZt2LlzJxkZGbz22mscOnSIuLg4Nm/ebO3yirbs4YOdcn34oNlsZtqqowD0ahxMWW/XXL2+iIiI3D0FWIXEuHHjbrq/TJkyrFmzJp+rEREpWMLDw2ndujVnzpyhUqVK/PHHH/ilnYIFT0HqZfAIgqcWcZwgnvl8M2Gxydm9K3qr15XchZo1a3L8+HE+++wz3NzcSEpKolu3bjz//PP4+/tbu7yiy2S8vvpg1dwfPrjqUBT7LiTg4mDL860q5vr1RURE5O4pwCrAzp07R3Bw8G2fHx4eTmBgYB5WJCJS8ERGRtK6dWtOnTpF+fLl+eOPP/CP3gi/vgDGDMsQoycXsPyMiRE/biYlw0iAhxMz+tSndhlPa5cvhUi3bt2YO3cu7u7uzJ8/n549ezJ27Fhrl1W8ROyDlEvg6A5lm+XqpY0mMx+sPgbAwOYh+JTQXGYiIiIFiebAKsDuu+8+nnnmGXbs2HHLcxISEpg1axY1a9bk559/zsfqRESsLzo6mjZt2nD8+HGCg4P5Y906ypz4BhYPsYRX1Tpj7Ps7kzddZuh3u0nJMNK0gje/vdhc4ZXcsd9//53k5GQA+vfvT0JCgpUrKoZOr7fchzwAtrn7PezSveGciE7Cw9meQfeXz9Vri4iIyL1TD6wC7MiRI7zzzju0a9cOJycn6tevT0BAAE5OTly+fJnDhw9z6NAh6tWrx9SpU+nYsaO1SxYRyTdRUVG0adOGw4cPExgYyPq1qyi7+z3Yv9ByQrNhXG7yOi99u4+/TsQCMOSB8rzWvgp2tvr+Ru5c1apVGTNmDK1atcJsNrNo0SLc3d1veu7TTz+dz9UVE6c3WO7Lt8zVy2Zkmfho7XEAnm1RAQ9nzYknIiJS0BjMZrPZ2kXIze3fv58aNWqQkZHB8uXL+euvvzh79iypqan4+PhQt25d2rdvT82aNa1dag6JiYl4eHiQkJBwyz/sRUTuxbVhg0eOHCEgIIA/Vyym4s7xcG6rZaXBhz/kYOmuPPvtLi5cTsXZ3pYpj9Wmc50Aa5cuVpIb702bN2/m1Vdf5dSpU8TFxeHm5nbT+dMMBgNxcXH3WnKBZpX3+owUmFLW0rvyhZ3gUynXLv3N1jOMW3qIUm6ObBzZCmcH21y7toiI5A99Di361AOrAKtbty6RkZGUKlWKkSNHsmPHDry9va1dloiIVUVERNC6dWuOHj1KmTJl2LR0LmXXD4HLYZZ5cXrMZ3FiJUZ/sYX0LBPBXi582ac+1fz1h4zcm2bNmvH3338DYGNjw/Hjx/H19bVyVcXI+b8t4ZV7IHjn3gTr6VlGPlt/EoCXWldUeCUiIlJAaQxFAebp6cnp06cBOHPmDCaTycoViYhYV3h4OC1btuTo0aMEBQXx98IPKLt6gCW88ggms99KJhzy5ZWF+0jPMtGySil+e6G5wivJFd26dSMxMRGAOXPm4ObmZuWKiplTV+e/Kt8ScnHl0CV7wolKTKe0uxM977v9xXNEREQkf6kHVgHWvXt3WrRogb+/PwaDgQYNGmBre/NvBa8FXSIiRdWFCxdo1aoVJ0+epGzZsuz4ajil1g4FUyYENiD2kbkMXXqB7WGWoVsvtKrIK+0qY2uTex90pXi7Nom7u7s7AwYMoEOHDjg7O1u7rOIjD+a/MprMfPmn5W+oQfeH4GCn73ZFREQKKgVYBdjMmTPp1q0bJ0+e5KWXXmLw4MH6tldEiqVz587RqlUrTp8+TblyZdnz4eN4bhpnOVi9C/vum8Izsw8RmZhGCUc7PuhRh/Y1Slu3aClyNIm7FaUlQuQBy+OQB3LtsmsOR3I6Nhl3JzueaKjeVyIiIgWZAqwC7qGHHgJg165dDBs2TAGWiBQ7Z86coVWrVpw5c4aqFUPYOa4hrvtmWg42H84Ct76M/2oPGUYT5Uu5MrNPAyr6lrBu0VIkzZgxg+HDh7Ns2TIMBgNvvPHGLSdxV4CVyy7uBszgEQxuuRNOm81mvrja++rpJuUo4ag/i0VERAoyvVMXEnPmzLF2CSIi+S4sLIyWLVty7tw5GtYoz8bng3A8vQJs7Mjs+CHjz9Xjh7WHAHiwuh8f9KiDm5O9lauWoqpp06aaxN1aLuy03JdpkGuX/Pt0HPvOx+NoZ0O/ZuVy7boiIiKSNzTQX0RECqRTp07RokULzp07R/v65dnc3xnHqD3g6EFc1x/osb0iP2w/h8EAIx6szIze9RVeSb4JCwujVKlS1i7jtnTt2pWSJUvy2GOP3XDs999/p0qVKlSqVImvvvrKCtXdpuwA675cu+SMP08B0KNBED4lHHPtuiIiIpI31ANLREQKnOPHj9OmTRsuXLhAn/vLMbdDJjZXYsGzLAdafkX/pYnEJsXj7mTHx0/WpVUV9YKRvLd//35q1qyJjY0NCQkJHDhw4Jbn1q5dOx8r+3fDhg1jwIABzJs3L8f+rKwshg8fzvr16/Hw8KB+/fp07doVb29vK1V6C2YzXNhheZxLAdahiwn8eTwGGwMMvr98rlxTRERE8pYCLBERKVAOHDhAu3btiIqKYvRDwbzX5AqG9EzMZRqyqMJkxi6KIstkpmppN77sU5+y3q7WLlmKidDQUCIjI/H19SU0NBSDwYDZbM4+fm3bYDBgNBqtWGlOLVu2ZMOGDTfs3759OzVq1CAwMBCADh06sHr1ap588sl8rvA/xJ+FlFiwsYfStXLlkrM2Wua+6lQ7gGBvl1y5poiIiOQtDSEUEZECY/v27bRo0YLoqChmPhHMpEbxGEyZGKt3ZXSJdxi1KpIsk5lH6gTwy9CmCq8kX/1z2GBYWBinT58mLCws+3Zt+/Tp07d9zY0bN/LII48QEBCAwWBgyZIlN5zz+eefU65cOZycnGjUqBHbt2/Plddz8eLF7PAKIDAwkPDw8Fy5dq66NnzQvzbYO93z5aKvpLHsQAQAQ9T7SkREpNBQDywRESkQNm7cSKdOnchMTWLVkDK0848HIPG+l+h1qi0HLsZiY4AxHaox6P6Qm67+JpKXypYte9PH9yI5OZk6deowYMAAunXrdsPxhQsXMnz4cGbMmEGjRo2YPn067du359ixY9kTyIeGhpKVlXXDc1evXk1AQECu1GlVuTx88Idt58k0mqlftiS1ynjkyjVFREQk7ynAEhERq1u1ahVdu3bF1ZDG3y/6U8MjEWzsONHoXXpsK8/llCS8XB347Mm6NK3oY+1ypZj69ddfb/vczp0739Z5HTp0oEOHDrc8/uGHHzJ48GD69+8PwIwZM1i2bBmzZ89m9OjRAOzdu/e26/qngICAHD2uwsPDadiw4U3PTU9PJz09PXs7MTHxrtq8K7k4gXum0cR3284C8HST3AkhRUREJH8owBIREatavHgxPXv2pLy7kfWDSuHvlIzZyYPfq01l2AY3TOZMagV6MKNPfQI9na1drhRjXbp0ybF9szmwrsmNObAyMjLYtWsXY8aMyd5nY2ND27Zt2bp16z1fv2HDhhw8eJDw8HA8PDxYsWIF48aNu+m5kyZN4q233rrnNu+YyQRRhyyP/UPv+XKrDkUSfSWdUm6OdKjpf8/XExERkfyjObBERMRqvv32Wx5//HGaB5rY+Zwn/k5pmDzL8U7pT3hxqxsmMzxWvww/PttE4ZVYnclkyr6tXr2a0NBQVqxYQXx8PPHx8Sxfvpx69eqxcuXKXGkvNjYWo9GIn59fjv1+fn5ERkbe9nXatm3L448/zvLlyylTpkx2+GVnZ8cHH3xAq1atCA0N5dVXX73lCoRjxowhISEh+3b+/Pm7f2F3Iv4MZKWCnRN4hdzz5b7Zaul99WTDYBzs9GewiIhIYaIeWPKvJk+ezJgxYxg2bBjTp0+3djkiUoR8+eWXPPfcc/StY8eszi7YGbJIK92Ap5NfZvtRG+xsDLz5SHV6Ny6r+a6kwHn55ZeZMWMGzZs3z97Xvn17XFxcGDJkCEeOHLFidTmtXbv2lsc6d+58W8MdHR0dcXR0zM2ybk/0Ucu9TyWwsb2nS52OSWJbWBw2BnjivqBcKE5ERETykwIsuaUdO3bw5ZdfUrt2bWuXIiJFzAcffMDIESN4p7Ujr9/vCJiJCu5Ep3NPEptmQyk3R/7Xqx73lfOydqkiN3Xq1Ck8PT1v2O/h4cGZM2dypQ0fHx9sbW2JiorKsT8qKorSpUvnShsFXszVILBUtXu+1MKdll5jLSqXIkA9OkVERAod9Z2Wm0pKSqJXr17MmjWLkiVLWrscESkizGYzEyZM4I3RI/ihu/PV8Aq2Bw2kyQlLeFUv2JPfX2yu8EoKtPvuu4/hw4fnCJeioqIYOXLkLSdCv1MODg7Ur1+fdevWZe8zmUysW7eOJk2a5EobBd61Hli+Ve/pMplGEz/vugDAEw2D77UqERERsQIFWHJTzz//PJ06daJt27bWLkVEigij0cgLL7zA/6ZN5I+nXehZ0x6zjT2zS71GjxNtMJlt6NUomAVDmuDn7mTtckX+1ezZs4mIiCA4OJiKFStSsWJFgoODCQ8P5+uvv77t6yQlJbF3797slQTDwsLYu3cv586dA2D48OHMmjWLefPmceTIEZ577jmSk5OzVyUs8nKpB9a6I1HEJmXgU8KR1lV9c6EwERERyW8aQig3WLBgAbt372bHjh23db5Vl9YWkUIhPT2dp59+mv1//MTfg1wpX9IGo6MHI2xGsvh8eRxsbXinS016aF4aKSQqVqzI/v37WbNmDUePWnoJVatWjbZt297RnG07d+6kVatW2dvDhw8HoG/fvsydO5eePXsSExPD+PHjiYyMJDQ0lJUrV94wsXuRZDJCzHHL43vsgfXjTkvvq8fql8HeVt/fioiIFEYKsCSH8+fPM2zYMNasWYOT0+31gLDa0toiUihcuXKFbt26YTy5nq0DXfF0MpDiGsTjV4ZzKMMPfw8nZvSuT50gT2uXKnJHDAYDDz74IA8++OBdX6Nly5aYzeZ/PeeFF17ghRdeuOs2Cq24MDCmg50zeJa768tcSkrnz+MxADxWPzCXihMREZH8pq+gJIddu3YRHR1NvXr1sLOzw87Ojj///JNPPvkEOzs7jEbjDc+x2tLaIlLgxcTE0KZNG4Ji/2RVbxc8nQyEu9Wm+aWxHMrwo1GIF7+92FzhlRQKCxYsuO1zz58/z+bNm/OwmmIge/hgZbC5+z9Zf9t3kSyTmdplPKjo65ZLxYmIiEh+U4AlObRp04YDBw5kz8exd+9eGjRoQK9evdi7dy+2tjcuYe3o6Ii7u3uOm4jI2bNneeD+5nRx28/sR52xtzWw1aUVrWOGE4c7A5qF8O2gRviUcLR2qSK35YsvvqBatWpMnTqVI0eO3HA8ISGB5cuX89RTT1GvXj0uXbpkhSqLkGsTuN/j/FeL94QD0LWuel+JiIgUZhpCKDm4ublRs2bNHPtcXV3x9va+Yb+IyK0cOnSIRzs+yHsNL9OjhiWgmmvXgwlxj+Jkb8vH3WvzaKg+TErh8ueff/Lrr7/y6aefMmbMGFxdXfHz88PJyYnLly8TGRmJj48P/fr14+DBg8Vjnqq8dK0Hlu/dB1inYpLYdyEBWxsDj9QJyKXCRERExBoUYImISK7aunUrfR/ryDcPZdAkyB6jwY6xxiEsSGpOkJczX/ZuQPUA9dSUwqlz58507tyZ2NhYNm3axNmzZ0lNTcXHx4e6detSt25dbO5huJv8Q+zVCdxLVbnrSyy92vuqReVS6u0pIiJSyCnAkv+0YcMGa5cgIoXEihUrGDukO6sesyWkpB0ptm70T3mZbeZq3F/Jh0+frIuni4O1yxS5Zz4+PnTp0sXaZRRt8Vfn1PQse1dPN5vN/H4gAoBHQ9X7SkREpLBTgCUiIrli/vz5LHx3MOt7OeLhZCDS1p+nUl7ltDmAoS0r8OqDVbC1MVi7TBEpDNKvQFq85bFHmbu6xNHIK5yOScbBzoY21TScU0REpLBTgCUiIvfEbDbzzttvE7PsXX7t6YitjYG9hur0T36JDIeSfPF4HTrU8rd2mSK5qmTJkhgMNwayBoMBJycnKlasSL9+/ejfv78VqisCEi5Y7p08wOnuhhwvv9r7qmXlUpRw1J+8IiIihZ3ezUVE5K5lZmby4tBnCY34nnEdnAD4ydSSMRkDCPLx4Ms+9ankp2XrpegZP3487777Lh06dKBhw4YAbN++nZUrV/L8888TFhbGc889R1ZWFoMHD7ZytYXQteGDHsF39XSz2cyy/ZYAq1NtBegiIiJFgQIsERG5K1euXGHAk1151msrbRo4YMLApMwnmWXsRNtqfnzYMxR3J3trlymSJzZt2sQ777zDs88+m2P/l19+yerVq/n555+pXbs2n3zyiQKsu5Fwbf6roLt6+pGIK5yO1fBBERGRokTL5IiIyB2LiIigT8cmvBvyN23K25GCE4MzhvOV6WFeaVuFmX0aKLySIm3VqlW0bdv2hv1t2rRh1apVAHTs2JHTp0/nd2lFw7UA6y7nv1p5KBKwrD6o4YMiIiJFgwIsERG5I4cPH+bVrvWZ0/Qclb1tuYgP3dInsN2hEV/3bcCwtpWw0WTtUsR5eXnx22+/3bD/t99+w8vLC4Dk5GTc3DSE9q5kDyG8ux5Yaw9HAdC+RuncqkhERESsTF9JiYjIbduwYQNLxz3K/AfN2NkY2G2qxJCM4Xj7leHXPvUJ8XG1doki+WLcuHE899xzrF+/PnsOrB07drB8+XJmzJgBwJo1a2jRooU1yyy8rk3ifhdDCMPjUzkckYiNAVpVKZXLhYmIiIi1KMASEZHbsuD7b7n0/TN81MYOMLDY2IzRmYN5sE45pnSvhYuD3lKk+Bg8eDDVq1fns88+45dffgGgSpUq/PnnnzRt2hSAV1991ZolFm4Jd98Da90RS++r+mVL4l3CMTerEhERESvSpw0REflXZrOZ6VMmUu3gVJ5oYHnbmJrZgy/NXXj94eoMaFYOg0FDBqX4adasGc2aNbN2GUWPMROuWFYQvJsAa83V4YPtqmvydhERkaJEAZaIiNxSRkYGb77Ymz52y6le0Y4UswOvZA5ll0tzvnuqHo3Le1u7RBGrMRqNLFmyhCNHjgBQo0YNOnfujK2trZUrK+QSL4LZBLaO4HpnQwAT0zL5+/QlANpq9UEREZEiRQGWiIjc1KVLl5g8qB2vVz5JSWdbIswlGZQxAoeguvzWqx7+Hs7WLlHEak6ePEnHjh0JDw+nSpUqAEyaNImgoCCWLVtGhQoVrFxhIZY9fDAQbO5svaG/jseSaTRT3seV8qVK5EFxIiIiYi1ahVBERG5w9MgRZvWvxeRaJynpbGCXqRKd098ltGELFgxprPBKir2X/q+9e4+Lusz7P/6eGZgB5SQiKIHnTS1NPGHYZlqoqZlabZYdTDsnpTe77a333r9cq812tzWrdXMPd9ld21bWandallKKlVqhmKdMi9Q8IKSCchhg5vr9gZIEKijwHWZez8eDB8x3roHP1bcv1/jmuq7vQw+pS5cu2rt3rzZs2KANGzZoz5496tSpkx566CGry2vezuMOhJlf50mShnaPbciKAACAD2AGFgCgmg/fX6b8FydqRl9Jsum1iiF6THfqtzf00S/6n9st7QF/s3r1aq1bt07R0dFVx1q3bq0nn3ySfbHO1znegdAYo8ydlQHW4Au5+yAAAP6GAAsAUOXl+U+qx6bHdWV3hyqMXbMrbteHYdfqtdv6q1dCpNXlAT7D5XLp2LFjNY4fP35cTqfTgor8SMGeys/1nIH1Td5xHSgolTPIruSO0Wd/AQAAaFZYQggAUEVFhZ5Jv0nDc55Q/3iHfjDhurX8v/Rd54l656HLCa+An7jmmmt0zz33aP369TLGyBijdevW6b777tO1115rdXnNW+H+ys8RF9TrZau/zpckDewUrVAnG+kDAOBvCLAAIMAVFhZq/p3Jur/le4oLs2mbt4PGlj2uvoPHaOHkZEW3ZDYJ8FPPPvusunTpopSUFIWEhCgkJESDBg1S165dNW/ePKvLa96KD1d+bhlTr5ed3P9q8M9YPggAgD9iCSEABLCcXV9r/ewhmtalSJK01DNQs+1T9dgtA3V1z7bWFgf4sKioKL399tvatWuXtm/fLknq0aOHunbtanFlfqD0aOXn0FZ1f0m5R+tzfpDE/lcAAPgrAiwACFBrlv9bjrem6KYuRl5j01MVN+rDmFv0+q39uP08UIv09PQzPv/RRx9VfT137tzGLsd/lRyp/FyPACtr9xGVlnsVG+7ShXH8/gIAwB8RYAFAgDHG6NU/zdDP9y9QhwukYyZU08qnKrrPtVo8tid7xwCnsXHjxjq1s9lsjVyJH/N6pZKjlV/XI8Ba923l7KtBXVrz3x8AAD9FgAUAAaSkpEQv/2qUbmu1QaER0rfetprqfViTx4/QjQPqd8cvINCcOsMKjcRdIMlUfh0SVeeXrf+2ct+sSzu3bviaAACATyDAAoAAsfe7b7R+9lDd06FAkpTh6aO5Yb/UU7cN1sXx3GUQgA84uXwwuKUUVLcbSJSWe5S996gkaSABFgAAfosACwACwPoVixX07zt1QwePvMameRXX6+tu9+pfN/ZRREiw1eUBQKVz2P9qw54jKvNU7n/VsXWLRioMAABYjQALAPyYMUbvPPsrXbr/fxQbZ3TUtNR/VEzVZVffrOd/3om9YgD4lnMIsE4uHxzYmf2vAADwZwRYAOCn3KWlemfmcI2P2CRHqLTV20G/cf5av5k8SgM6RltdHgDUVLWBe1SdX7I+p3ID90s783sNAAB/RoAFAH5of87X+uoPqbohrnK/qzc9g7W0/a/0j5svVUyYy+LqAOA06jkDq7Tcow17jkqSBnZi/ysAAPwZARYA+Jl1S19SdMYvdWVcucqMQ49W3K7owffpf4Z1k8PO8hoAPqxqBlbdAqwt+wpUVuFVTJhTXdq0bLy6AACA5QiwAMBPeL1e/d8Td+iqkncUHunVAROt/7T9h+6dPFGXdY2xujwAOLt6zsDaeGL2Vd/2rdj/CgAAP2e3ugAAwPk7mn9Qyx/qpXEVbys82Ku1nos0u+2f9adf3kt4BQS48ePHq1WrVrrhhhuqHT969Kj69++vpKQk9ezZU3//+98tqvAU9Q2w9la2T2of1UgFAQAAX8EMLABo5rZ9vFTmrbs0KqZEXmPTXzzXyn7lTP1lSHfZWTIIBLxp06ZpypQpeumll6odDw8PV2Zmplq0aKGioiL17NlT1113nVq3tnAvqXOcgdUnse53LQQAAM0TM7AAoBn78M/TlLj8dl0cWaIfTLgess3QpXfN0wNX9iC8AiBJGjJkiMLDw2scdzgcatGihSTJ7XbLGCNjTFOXV11VgBV11qYHCkp0oKBUdpt0SUJk49YFAAAsR4AFAM1QceERrfxVf12Zv1DhQR6t93bXo+3+osceTlf/jtxKHmguMjMzNWbMGMXHx8tms2nJkiU12syfP18dO3ZUSEiIBg4cqM8++6zBfv7Ro0fVu3dvJSQk6OGHH1ZMjMVLjusxAyv7xOyrbm0j1NLFogIAAPwdARYANDM7P1uhb3/bU6lhOyVJz1eM0Zar/lfz7h2jVi2dFlcHoD6KiorUu3dvzZ8/v9bnX3/9daWnp2vWrFnasGGDevfurREjRujQoUNVbU7uYfXTj/3795/150dFRWnTpk3KycnRq6++qtzc3Abr2zmpT4C196gkqQ/7XwEAEBD4cxUANBPGGGX8JV0DDrysyIhyHTZhmm2bqjvve0CXJERZXR6AczBy5EiNHDnytM/PnTtXd999tyZPnixJWrBggZYtW6YXXnhBM2bMkCRlZ2efdx1xcXHq3bu31qxZU2Ozd6lyiaHb7a56XFhYeN4/swZj6hVg/bj/VVTD1wIAAHwOM7AAoBk4fjRf7/0yWal5LygyqFxfeC/Un9o/r8dnPEx4BfipsrIyZWVlKTU1teqY3W5Xamqq1q5de97fPzc3V8eOHZMkFRQUKDMzU926dau17Zw5cxQZGVn1kZiYeN4/v4byYslbXvn1WQKsco9XX+47Kknq054N3AEACATMwAIAH7d9zRLp7Qc1KqJyxsPfK0YpbvwT+l2/TtYWBqBR5efny+PxKC4urtrxuLg4ffXVV3X+Pqmpqdq0aZOKioqUkJCgRYsWKSUlRbt379Y999xTtXn7gw8+qF69etX6PWbOnKn09PSqx4WFhQ0fYp2cfeVwSsEtzth016HjKi33KtwVpM4xLRu2DgAA4JMIsADARxmvVx8+fadSCv5PLcIqlG8i9LTrAd0/bZoSWp35H3cAcNLKlStrPZ6cnFzn5Ycul0sul6sBq6rFqcsHbWe+i+q2/ZWBfo/4CO64CgBAgCDAAgAfdGTfN9r29FhdFbZXskuZnl7a1OdRPTpuqBz8Yw0ICDExMXI4HDU2Vs/NzVXbtm0tqqoR1WP/q20HKgOsi9pFNGZFAADAh7AHFgD4mKx/P6vS+ZfpsrC9KjMO/bHiJrWcslgPXncl4RUQQJxOp/r166eMjIyqY16vVxkZGUpJSbGwskZSnwDrxAysi+IJsAAACBTMwAIAH1FWUqQPfzdewx3rZXdK33jbaWHrdP363skKDwm2ujwAjeD48ePatWtX1eOcnBxlZ2crOjpa7du3V3p6uiZNmqT+/fsrOTlZ8+bNU1FRUdVdCf3KyQArJOqMzYwxzMACACAAEWABgA/4dsOHOvavu3V1y3xJ0hsVgxV09Rw99vOeFlcGoDF98cUXGjp0aNXjkxulT5o0SQsXLtSECROUl5enRx55RAcPHlRSUpKWL19eY2N3v1DHGVj7C0pVUFKuILtNP4sLa4LCAACALyDAAgALGa9XHzyTpkGHF6lzyzIVmhZ6JmiK7pz+34qPCrW6PACNbMiQITLGnLFNWlqa0tLSmqgiC9UxwNp+Yvlg19gwuYIcjV0VAADwEQRYAGCRH/Z8pW1/nqARLb6THNLnnp9pc9/H9ZuxI7irFoDAU8cAi+WDAAAEJgIsALDAmpdmq8fO53V5ixKVGYf+7h2rEQ/M1ZR2Z9+8GAD8UoW78nNwyBmbsYE7AACBiQALAJrQsfx9+uyp63VVyHYpSNruTdSKDr/SA3fcriAHN4YFAOnMM1CZgQUAQGAiwAKAJrL+red0wYbf66qQY/IamxZWDFefyX/SQ10TrS4NAJqFIneF9hwuliT1IMACACCgEGABQCMrPXZEH825XiOdWVKwtNsbqzfbpGnqfWkKCWYDYgCoq2/ziiRJMWFOtWrptLgaAADQlFivghrmzJmjAQMGKDw8XLGxsRo3bpx27NhhdVlAs/TFe6/o+zl9K8MrSa+XD9ae65fqlw9OI7wCgHr6Ju+4JKlzmzCLKwEAAE2NAAs1rF69WlOnTtW6deu0YsUKlZeXa/jw4SoqKrK6NKDZKDl2RP/3/0Ypad2D6uo8rFxvlJ4Jf1hjf7tEl/fuZnV5ANAsnQywuhBgAQAQcFhCiBqWL19e7fHChQsVGxurrKwsDR482KKqgOZj3dv/UJv1v9O1zsOSpHfL+yly/FxN659kbWEA0Mz9GGC1tLgSAADQ1AiwcFYFBQWSpOjo6Fqfd7vdcrvdVY8LCwubpC7A1xQdydNHf7xZo4K/kN1pdMhE6s3wO3Tn9FlyBbFcEADO1zeHKmeDd4llBhYAAIGGAAtn5PV6NX36dF122WXq2bNnrW3mzJmj2bNnN3FlgG/JfG2eOmx+Wtc4j0qSFpcPVMKEuXrgktqvGwBA/Xi8Rjn5lQFWV5YQAgAQcAiwcEZTp07Vli1b9PHHH5+2zcyZM5Wenl71uLCwUImJiU1RHmC5I4f26dOnb9Fo10YpWNrnjdY7re/WXWkzFORgm0EAaCjfHylWmccrV5Bd8VGhVpcDAACaGAEWTistLU1Lly5VZmamEhISTtvO5XLJ5XI1YWWADzBGy/76/9Rv30sa7apcNvtG+eXqfts83de9q8XFAYD/Obn/VaeYlnLYbRZXAwAAmhoBFmowxujBBx/U4sWLtWrVKnXq1MnqkgCfsuvLddr3appGh+yUHNJ33jb66IKpmnT3dNn5RxUANAr2vwIAILARYKGGqVOn6tVXX9Xbb7+t8PBwHTx4UJIUGRmp0FCm7CNwVZS5teQPd2tk+XvqGlKmcuPQa54rNXTqfE1uF2d1eQDg1368AyEBFgAAgYgACzU8//zzkqQhQ4ZUO/7iiy/qjjvuaPqCAB/w6bJXFP7J47rBeUCySVkVnbVvwH/ptrG/sLo0AAgIPwZYLS2uBAAAWIEACzUYY6wuAfAZhw/t05pnpuiaoPVyOI0KTAstCh6viTOeVr8Q9n4DgKbybd6JJYTMwAIAICARYAFALYzXqyXP/VqD8l/X2ODKTdrfLeuj2Ov/oLv6JVtcHQAEFq/X6HBxmSQpNpw/HgAAEIgIsADgJz7PWCyzcpbGu3ZXbtLuaaPVbafo9gdmymZjk3YAaGpFZRU6OUE8IjTY2mIAAIAlCLAA4IQfDuxV5p/v0ZigdQpyeVVinFrkTdWIh57VpDZtrC4PAAJWYWmFJCnYYZMryG5xNQAAwAoEWAACnvF69da8X2rwkUUaH3xMkrSirJdCr35Utw++0uLqAADHSsslSREhwcyEBQAgQBFgAQhoa5a+opBP/qAbTiwX/NYTq0/jp+iW+2bwjyQA8BGFJZUzsMJDeOsKAECg4l0AgIC0a2uWdrySrpHOTbK7jIqNU2+aYRo9/c+6tXW01eUBAE5RNQOL/a8AAAhYBFgAAkrhkXwtn/eARmuVurrckqRl7j6KGfOobh802OLqAAC1OVbKDCwAAAId7wIABASvx6M3n52hQYcX6UbHEUnSxvIO+q7ngxp/890WVwcAOJPCEzOwwl3MwAIAIFARYAHweyve/Idab3hGNzr3SA5pn7eVMiJu0M3TnlSfYH4NAoCvOzkDKyKU39kAAAQq3gUA8FufrVqmwuWPa1jINskpFRun/m2u0vC0Z3R7bJzV5QEA6qiw5MQMrBBmYAEAEKgIsAD4nR1ffqGv/jVDo51ZCgrxymNsWlbWTx1+8YRu7TvQ6vIAAHWw69AxZe0+ol/0S1ThyRlYBFgAAAQsAiwAfuPg999p9YLpGuP4RN1cZZKkj9w95L38YV076nqLqwMAnJEx1R7+ZvEWrc85rMRWLX7cA4tN3AEACFi8CwDQ7BUePaylz/6Hrq5YoQlBRZKkDeUdtKf7vRp321SLqwMAnIuDhaWSpD2Hi0/ZA4sZWAAABCoCLADNVtHxQi155lcaWvq+JjqOSnZpV0WcPo+7STfeP0t9gxxWlwgAOEcFJ/a9yjvmPmUPLN66AgAQqHgXAKDZKSk6rn8/92sNLnpPtzgOSw7pgDdKGaGjdf1/PqWuoS2sLhEAcB68XlMVWuUdd+sYSwgBAAh4dqsLAIC6cpeW6tU/Tlfuk711S+k/leg4rEPeCL0SdINapG/UrTP/olDCKwCoZvz48WrVqpVuuOGGGs/l5ORo6NChuuiii9SrVy8VFRVZUGFNx8sq5D2xJdahQvePSwjZxB0AgIDFn7EA+Lwyt1tvzv9vJR9eoolBhySHlO8N1/tBV+rqtD/p1pg2VpcIAD5r2rRpmjJlil566aUaz91xxx16/PHHdfnll+vw4cNyuVwWVFjTydlXUuUMrJObuBNgAQAQuAiwAPisouPHtGT+TF16bLkmBuVJQdIRb0u9Zx+iYQ88pVvaxltdIgD4vCFDhmjVqlU1jm/dulXBwcG6/PLLJUnR0dFNXNnpFZwSYO0/WqLScq8kKSKUt64AAAQqlhAC8DlHD+fplcfv1JHf99ItJS+rS1Cejnhb6l/eq1Vyz1pN/O2rakN4BcAPZGZmasyYMYqPj5fNZtOSJUtqtJk/f746duyokJAQDRw4UJ999lmD/OydO3cqLCxMY8aMUd++ffXEE080yPdtCKcGWAcKSqu+DnMRYAEAEKh4FwDAZ+Tu/14r/vafGu7N1K32QskhHfJGaIVjiIbdP0c3t0uwukQAaFBFRUXq3bu3pkyZouuuu67G86+//rrS09O1YMECDRw4UPPmzdOIESO0Y8cOxcbGSpKSkpJUUVFR47UffPCB4uNPH/ZXVFRozZo1ys7OVmxsrK6++moNGDBAw4YNa7gOnqNTlxCe1NLpUJCDv70CABCoCLAAWO7rbZuU9a9HdbVtrW61F0l26XtPK60OSdXo+5/ULdExVpcIAI1i5MiRGjly5Gmfnzt3ru6++25NnjxZkrRgwQItW7ZML7zwgmbMmCFJys7OPqeffcEFF6h///5KTEyUJI0aNUrZ2dk+EmDVDOTC2f8KAICARoAFwDKfrHxHeRnP6Gpnti50VP61PcfTRmvDRmrs1Cd0S1i4xRUCgHXKysqUlZWlmTNnVh2z2+1KTU3V2rVrz/v7DxgwQIcOHdKRI0cUGRmpzMxM3XvvvbW2dbvdcrvdVY8LCwvP++efSUEtM7DCQ3jbCgBAIOOdAIAm5fV49M4/5yti+z811PWVdOKGV5vKE7W9zWhdd/+jmugjd8ECACvl5+fL4/EoLi6u2vG4uDh99dVXdf4+qamp2rRpk4qKipSQkKBFixYpJSVFQUFBeuKJJzR48GAZYzR8+HBdc801tX6POXPmaPbs2efVn/qoLcCKCGUGFgAAgYwAC0CTKC4u0pIFv9XFP7ynscF7q4Krj9w9VNTzNo266T71djisLRIA/NDKlStP+9zZljCeNHPmTKWnp1c9LiwsrFp62BiYgQUAAH6KdwIAGtVXW7KV9cYcDTXrNNFxVAqWSk2wlpf1Udzw6Ro6dLTVJQKAT4qJiZHD4VBubm6147m5uWrbtm2T1uJyueRqwtmxhaWVAZbDbpPHayRJEeyBBQBAQCPAAtDgvF6v3l/0gkz2y7rKuUXd7ZWb8eZ7w7RSKRowcZbGde9lcZUA4NucTqf69eunjIwMjRs3TlLl79eMjAylpaVZW1xjstmqZmB1aN1C3+YVSWIGFgAAgY53AgAazNEj+Vr298fUsyBDI09ZJphd3l7bo4fpmrse0U2RUZbWCAC+5Pjx49q1a1fV45ycHGVnZys6Olrt27dXenq6Jk2apP79+ys5OVnz5s1TUVFR1V0J/dXJAKtrm7BTAixmYAEAEMgIsACctzUfLNHBVX/XlUHZusV+XAqW3CZIK8t6yTlgsoZdN0lJVhcJAD7oiy++0NChQ6sen9xnatKkSVq4cKEmTJigvLw8PfLIIzp48KCSkpK0fPnyGhu7+5uqACs2TB9sq1xCGRHK21YAAAIZ7wQAnJO8Q7l6f+Hv1OPoal3u/E5yVh7f52mlTEeKBk78b43udrGlNQKArxsyZIiMMWdsk5aW5t9LBmtRWFK59PxncWFVx5iBBQBAYCPAAlAvK97+p46ve0lXOb/UrbYSySlVGLvWlHXTDx1Ga8wdv9LNIaFWlwkAaKaMMSqsWkIYXnU8gj2wAAAIaLwTAHBWX23eoC8WP6M+7s80LHh/1d5Wezyt9Yk9WUnX/0pDL+lvbZEAAL9Q7jUq83glSe1bt1CQ3aYKr+EuhAAABDgCLAC1ys/L1Qf/+wcl5mdqkHOnuttM1d5Wq8p6qPTC8Rp960O6OZh/UAAAGk5puUeS5LDbFBESpLiIEO07WqKoFow3AAAEMgIsAFXK3G4te/XPCv56qYY4t2qizV0122pjWXtta3mpBk1I14iuPawtFADgt0rLKmdfRYQEyWazaeao7vriuyO6JCHK2sIAAIClCLCAAFfmdmv5or+pYttS/Txom8bbC6stEVyrPupw1d26dMjV6mNtqQCAAFBaUTkDKzK0csbVNZfE65pL4q0sCQAA+AACLCAAVZSX6703/qbyre/osqBtutZeUHUXwQLTQqvLL5aj1/UaeeM9muBwWFssACCglJZVD7AAAAAkAiwgYBQXHdcHb/xNnp0rdFnQVo2pFlqF6uOyHirpmKqrJz6oa8MjrC0WABCwSk7sgRVBgAUAAE5BgAX4sX17cpT51gJFHVqrQc6dGmcrrgqtCk2o1pR1V3GHVI2c+KBGR0RaWywAAPpxE3cCLAAAcCoCLMDPbFi7StsyXlan4mwNCM7RzTZP1Z5W+d5wrSu/UMXtr9DIidM0OjLK0loBAPiRkSSVVlRu4s4SQgAAcCoCLKCZyzu4X6uWvCDHno+VZN+lvkF56itVzbTaVRGrDaaHWvYarWHj79A1LpeV5QIAcEYnZ2ARYAEAgFMRYAHNTEV5uVYvX6TcrKXqXLZdfYN36xc2T1VgVWHs2ljeQV+7eqnz4JuVMnSUulpbMgAAdVZCgAUAAGpBgAX4uIrycmW+/5b2b1yudsU71Cd4t66yF1U+eSK02uuJVlZFV7njB+rnY+/UgPadNMC6kgEAOGcn70IYEUKABQAAfkSABfiY0pJirXn/LR36cqXiSyoDqyvtxZVPnlj9V2yc+rysk/a27Kkul92ggYNHKNHhsK5oAAAayKFjbklSx9YtLK4EAAD4EgIswGJbsz/XljVvy3bwS3U0e9QreJ+G2coqnzwRWB03LmWXtdce14WK6jFEQ66ZqCtahllXNAAAjaSgpFw2m9QrgbvjAgCAHxFgAU0oZ9d2ZWe+q+K92WpTmqOLgr7XxY4juliSTlkpUWBCtamsvb53/Uyte16lK0bfpJ+H8pdoAEBg6NImTOEsIQQAAKcgwAIagdfj0cb1q7Ur60OZQ9sUV7FPFwblqpPjiDqdbHRidpXH2LSjop12ei/Q8age6tA3VZcOGaXBwbxxBwAEpt4JUVaXAAAAfAwBFnAeiouOa+Paj/T99vXy5O9SVNkBJdrz1TkoT/1sbvWTJMeJjxO+88RoZ0Vb5Tnbq2XnSzVoxC90Udt4XWRRHwAA8DVJiSwfBAAA1RFgoVbz58/XH//4Rx08eFC9e/fWc889p+TkZKvLskTu/r3atvFTHcrZqorDuxVamqvW5rAucBxRB8cPusxWebck2VQ1q0qSyoxDOyvilONtp6MtOiqyc1/1HzJGHS/ooI5WdAQAAB9mVDmUStIlzMACAAA/QYCFGl5//XWlp6drwYIFGjhwoObNm6cRI0Zox44dio2Ntbq8BlPmdmvvdzu1e+cWHd73rdxH9slenKfQ8iOKVKFi7McV7ziqOPtxxZ36Qmf171NkXPq2oo32emN01NlOjpiuSugxUH1ShurilmGV+1sBAIAzKnZXqKUkh92m7u3CrS4HAAD4GAIs1DB37lzdfffdmjx5siRpwYIFWrZsmV544QXNmDGjSWspOPKDSkuKVVZernJ3qcrL3Kood6usrEwV5W5VlJerpKhQJccLVFZUqLLiAnncxTJlRVJFiewVJXJWHFeoKVaYShRhL1GkvVhRthJF2orVxWbU5dQfaFe1WVQnHfa21PeeVjrobaWjjtYqb9lOLWK7qPMll+ripEvVKzhYvZrovwkAAP7oSHG5WkqKiwiRK8hx1vYAACCwEGChmrKyMmVlZWnmzJlVx+x2u1JTU7V27dpaX+N2u+V2u6seFxYWNlg9eX+6VF2DDp37N7Crxoypn8r3huuQJ1z53nAdVbiKg6LkCY1RcKt4tU74mbr3TtEF7TspWtIl514JAAA4g6PFZUqQdEFUiNWlAAAAH0SAhWry8/Pl8XgUF1dt0Zzi4uL01Vdf1fqaOXPmaPbs2Y1Sj0f2GsfKjUMe2VUhuzyyy22CVWycKvEGq8Q4VapglZpgueVUmZxyO1qqPDhcckUqKKy1WrSKVWRMvOIu6KQLOnRRTHiEYhqlegAAUFfuCq8kKTKUu/ACAICaCLBw3mbOnKn09PSqx4WFhUpMTGyQ7x390Gr9YLfL5QpRSEiogoKDFSyJt7YAAPgr29mbAACAgEOAhWpiYmLkcDiUm5tb7Xhubq7atm1b62tcLpdcrlo2jmoAbWJr/5kAAAAAACBw1FyfhYDmdDrVr18/ZWRkVB3zer3KyMhQSkqKhZUBAAAAAIBAxQws1JCenq5Jkyapf//+Sk5O1rx581RUVFR1V0IAAAAAAICmRICFGiZMmKC8vDw98sgjOnjwoJKSkrR8+fIaG7sDAAAAAAA0BQIs1CotLU1paWlWlwEAAAAAAMAeWAAAAAAAAPBtBFgAAAAAAADwaQRYAAAAAAAA8GkEWAAAAAAAAPBpBFgAAAAAAADwaQRYAAAAAAAA8GlBVhcA/2OMkSQVFhZaXAkAAJVOjkknxyicn8YY64+XlqvQbVRUUsp7CABAvTHW+z+b4eyigX3//fdKTEy0ugwAAGrYu3evEhISrC6j2WOsBwD4KsZ6/0WAhQbn9Xq1f/9+hYeHy2azndf3KiwsVGJiovbu3auIiIgGqtC3BWKfpcDsN32mz/7KF/tsjNGxY8cUHx8vu50dFM4XY/35C8R+02f67K/os2/0mbHe/7GEEA3Obrc3eOIdERHhM78Ym0og9lkKzH7T58BAn60XGRlpdQl+g7G+4QRiv+lzYKDPgcHX+sxY79+IJQEAAAAAAODTCLAAAAAAAADg0wiw4NNcLpdmzZoll8tldSlNJhD7LAVmv+lzYKDPwJkF6v8vgdhv+hwY6HNgCMQ+w3ps4g4AAAAAAACfxgwsAAAAAAAA+DQCLAAAAAAAAPg0AiwAAAAAAAD4NAIsAAAAAAAA+DQCLFhu/vz56tixo0JCQjRw4EB99tlnZ2y/aNEide/eXSEhIerVq5fefffdJqq04dSnzwsXLpTNZqv2ERIS0oTVnr/MzEyNGTNG8fHxstlsWrJkyVlfs2rVKvXt21cul0tdu3bVwoULG73OhlTfPq9atarGebbZbDp48GDTFNwA5syZowEDBig8PFyxsbEaN26cduzYcdbXNedr+lz63Nyv6eeff16XXHKJIiIiFBERoZSUFL333ntnfE1zPsdoGIE41kuBNd4H4lgvBd54z1gfGGO9xHgP30SABUu9/vrrSk9P16xZs7Rhwwb17t1bI0aM0KFDh2pt/+mnn+rmm2/WnXfeqY0bN2rcuHEaN26ctmzZ0sSVn7v69lmSIiIidODAgaqP3bt3N2HF56+oqEi9e/fW/Pnz69Q+JydHo0eP1tChQ5Wdna3p06frrrvu0vvvv9/IlTac+vb5pB07dlQ717GxsY1UYcNbvXq1pk6dqnXr1mnFihUqLy/X8OHDVVRUdNrXNPdr+lz6LDXvazohIUFPPvmksrKy9MUXX+jKK6/U2LFjtXXr1lrbN/dzjPMXiGO9FHjjfSCO9VLgjfeM9YEx1kuM9/BRBrBQcnKymTp1atVjj8dj4uPjzZw5c2ptf+ONN5rRo0dXOzZw4EBz7733NmqdDam+fX7xxRdNZGRkE1XX+CSZxYsXn7HNr3/9a3PxxRdXOzZhwgQzYsSIRqys8dSlzx999JGRZI4cOdIkNTWFQ4cOGUlm9erVp23jD9f0qerSZ3+7po0xplWrVuYf//hHrc/52zlG/QXiWG9MYI/3gTjWGxOY4z1jfe386Xo+FeM9rMYMLFimrKxMWVlZSk1NrTpmt9uVmpqqtWvX1vqatWvXVmsvSSNGjDhte19zLn2WpOPHj6tDhw5KTEw8418+/EVzP8/nIykpSe3atdOwYcP0ySefWF3OeSkoKJAkRUdHn7aNv53ruvRZ8p9r2uPx6LXXXlNRUZFSUlJqbeNv5xj1E4hjvcR4Xxf+cJ7Ph7+M94z1p+dP1zPjPXwFARYsk5+fL4/Ho7i4uGrH4+LiTrsPwMGDB+vV3tecS5+7deumF154QW+//bZeeeUVeb1eDRo0SN9//31TlGyJ053nwsJClZSUWFRV42rXrp0WLFigt956S2+99ZYSExM1ZMgQbdiwwerSzonX69X06dN12WWXqWfPnqdt19yv6VPVtc/+cE1v3rxZYWFhcrlcuu+++7R48WJddNFFtbb1p3OM+gvEsV5ivK+LQBzrJf8a7xnr/Xuslxjv4XuCrC4AwJmlpKRU+0vHoEGD1KNHD/31r3/VY489ZmFlaEjdunVTt27dqh4PGjRI33zzjZ5++mm9/PLLFlZ2bqZOnaotW7bo448/trqUJlPXPvvDNd2tWzdlZ2eroKBAb775piZNmqTVq1ef9k0tgLPzh98NODt/Gu8Z60/PX65nxnv4GmZgwTIxMTFyOBzKzc2tdjw3N1dt27at9TVt27atV3tfcy59/qng4GD16dNHu3btaowSfcLpznNERIRCQ0MtqqrpJScnN8vznJaWpqVLl+qjjz5SQkLCGds292v6pPr0+aea4zXtdDrVtWtX9evXT3PmzFHv3r31zDPP1NrWX84xzk0gjvUS431dMNb/qDmO94z1/j/WS4z38D0EWLCM0+lUv379lJGRUXXM6/UqIyPjtGurU1JSqrWXpBUrVpy2va85lz7/lMfj0ebNm9WuXbvGKtNyzf08N5Ts7OxmdZ6NMUpLS9PixYv14YcfqlOnTmd9TXM/1+fS55/yh2va6/XK7XbX+lxzP8c4P4E41kuM93XhD+e5oTSn8Z6xPnDHeonxHj7A2j3kEehee+0143K5zMKFC822bdvMPffcY6KioszBgweNMcbcdtttZsaMGVXtP/nkExMUFGSeeuops337djNr1iwTHBxsNm/ebFUX6q2+fZ49e7Z5//33zTfffGOysrLMTTfdZEJCQszWrVut6kK9HTt2zGzcuNFs3LjRSDJz5841GzduNLt37zbGGDNjxgxz2223VbX/9ttvTYsWLczDDz9stm/fbubPn28cDodZvny5VV2ot/r2+emnnzZLliwxO3fuNJs3bzbTpk0zdrvdrFy50qou1Nv9999vIiMjzapVq8yBAweqPoqLi6va+Ns1fS59bu7X9IwZM8zq1atNTk6O+fLLL82MGTOMzWYzH3zwgTHG/84xzl8gjvXGBN54H4hjvTGBN94z1gfGWG8M4z18EwEWLPfcc8+Z9u3bG6fTaZKTk826deuqnrviiivMpEmTqrV/4403zIUXXmicTqe5+OKLzbJly5q44vNXnz5Pnz69qm1cXJwZNWqU2bBhgwVVn7uTt4z+6cfJfk6aNMlcccUVNV6TlJRknE6n6dy5s3nxxRebvO7zUd8+//73vzddunQxISEhJjo62gwZMsR8+OGH1hR/jmrrr6Rq587frulz6XNzv6anTJliOnToYJxOp2nTpo256qqrqt7MGuN/5xgNIxDHemMCa7wPxLHemMAb7xnrA2OsN4bxHr7JZowxDT+vCwAAAAAAAGgY7IEFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQA+Ji8vT23bttUTTzxRdezTTz+V0+lURkaGhZUBAICGwFgPAPVnM8YYq4sAAFT37rvvaty4cfr000/VrVs3JSUlaezYsZo7d67VpQEAgAbAWA8A9UOABQA+aurUqVq5cqX69++vzZs36/PPP5fL5bK6LAAA0EAY6wGg7giwAMBHlZSUqGfPntq7d6+ysrLUq1cvq0sCAAANiLEeAOqOPbAAwEd988032r9/v7xer7777jurywEAAA2MsR4A6o4ZWADgg8rKypScnKykpCR169ZN8+bN0+bNmxUbG2t1aQAAoAEw1gNA/RBgAYAPevjhh/Xmm29q06ZNCgsL0xVXXKHIyEgtXbrU6tIAAEADYKwHgPphCSEA+JhVq1Zp3rx5evnllxURESG73a6XX35Za9as0fPPP291eQAA4Dwx1gNA/TEDCwAAAAAAAD6NGVgAAAAAAADwaQRYAAAAAAAA8GkEWAAAAAAAAPBp/x8BMX7gj0RFewAAAABJRU5ErkJggg==", - "text/html": [ - "\n", - "
\n", - "
\n", - " Figure\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" - ] - }, - "metadata": {}, - "output_type": "display_data" + "ename": "ValueError", + "evalue": "zip() argument 2 is longer than argument 1", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mValueError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[19]\u001b[39m\u001b[32m, line 4\u001b[39m\n\u001b[32m 1\u001b[39m x = np.linspace(\u001b[32m0\u001b[39m, np.pi, \u001b[32m201\u001b[39m)\n\u001b[32m 2\u001b[39m m = np.arange(\u001b[32m1\u001b[39m, \u001b[38;5;28mlen\u001b[39m(fornberg) + \u001b[32m1\u001b[39m)\n\u001b[32m 3\u001b[39m y_fornberg = - fornberg[\u001b[32m0\u001b[39m] - \u001b[32m2\u001b[39m*np.sum(\n\u001b[32m----> \u001b[39m\u001b[32m4\u001b[39m \u001b[43m[\u001b[49m\u001b[43ma_\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mnp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcos\u001b[49m\u001b[43m(\u001b[49m\u001b[43mm_\u001b[49m\u001b[43m*\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43ma_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mm_\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mzip\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfornberg\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mm\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstrict\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m, axis=\u001b[32m0\u001b[39m\n\u001b[32m 5\u001b[39m )\n\u001b[32m 6\u001b[39m y_drp1 = - drp_stencil1[\u001b[32m0\u001b[39m] - \u001b[32m2\u001b[39m*np.sum(\n\u001b[32m 7\u001b[39m [a_ * np.cos(m_*x) \u001b[38;5;28;01mfor\u001b[39;00m a_, m_ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(drp_stencil1[\u001b[32m1\u001b[39m:], m, strict=\u001b[38;5;28;01mTrue\u001b[39;00m)], axis=\u001b[32m0\u001b[39m\n\u001b[32m 8\u001b[39m )\n\u001b[32m 10\u001b[39m fig, ax = plt.subplots(\u001b[32m1\u001b[39m, \u001b[32m2\u001b[39m)\n", + "\u001b[31mValueError\u001b[39m: zip() argument 2 is longer than argument 1" + ] } ], "source": [ "x = np.linspace(0, np.pi, 201)\n", "m = np.arange(1, len(fornberg) + 1)\n", - "y_fornberg = - fornberg[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m)], axis=0)\n", - "y_drp1 = - drp_stencil1[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m)], axis=0)\n", + "y_fornberg = - fornberg[0] - 2*np.sum(\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m, strict=True)], axis=0\n", + ")\n", + "y_drp1 = - drp_stencil1[0] - 2*np.sum(\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m, strict=True)], axis=0\n", + ")\n", "\n", "fig, ax = plt.subplots(1, 2)\n", "ax[0].plot(x, x**2, 'k')\n", @@ -1433,7 +1429,7 @@ "\n", " beta = k*h\n", " xs, ys = np.meshgrid(beta, alpha)\n", - " for ii, (axis, zs) in enumerate(zip(ax.flatten(), level_sets)):\n", + " for ii, (axis, zs) in enumerate(zip(ax.flatten(), level_sets, strict=False)):\n", " r = courant[ii]\n", " cb = axis.pcolormesh(xs, ys, zs, norm=norm, shading='gouraud')\n", " axis.set_title(f'{r = :.3g}')\n", @@ -1704,21 +1700,32 @@ ] }, "outputs": [ + { + "ename": "NameError", + "evalue": "name 'y_fornberg' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[31]\u001b[39m\u001b[32m, line 5\u001b[39m\n\u001b[32m 3\u001b[39m fig, ax = plt.subplots(\u001b[32m1\u001b[39m, \u001b[32m2\u001b[39m)\n\u001b[32m 4\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, x**\u001b[32m2\u001b[39m, \u001b[33m'\u001b[39m\u001b[33mk\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, \u001b[43my_fornberg\u001b[49m, label=\u001b[33m'\u001b[39m\u001b[33mFornberg\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m 6\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, y_drp1, label=\u001b[33m'\u001b[39m\u001b[33mDRP stencil 1\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m 7\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, y_drp1, label=\u001b[33m'\u001b[39m\u001b[33mDRP stencil 2\u001b[39m\u001b[33m'\u001b[39m, ls=\u001b[33m'\u001b[39m\u001b[33m:\u001b[39m\u001b[33m'\u001b[39m)\n", + "\u001b[31mNameError\u001b[39m: name 'y_fornberg' is not defined" + ] + }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "184ec5ee95cc403d8cc5cb755f1b2f84", + "model_id": "657e77c5d694452ebf779e993e27113a", "version_major": 2, "version_minor": 0 }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAGQCAYAAAC+tZleAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAx3ZJREFUeJzs3Xd4U2Ubx/Fvku69d0tL2atlTxmypwUUlCEbERUEQUCU5UCGgOKrCMhUBAQZsoegLNl7F8rq3nsmef8IBGoZBdqmlPtzXb2anPOcc+5EbJJfnqHQarVahBBCCCGEEEIIIYQoppSGLkAIIYQQQgghhBBCiMeRAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIV5AN27cQKFQsGTJEkOXIoQQQpQYkyZNQqFQ5NqWk5PDxx9/jLe3N0qlkqCgIABSUlIYOHAgbm5uKBQKPvzww6IvWAghXiISYAkhBLBkyRIUCsVDf8aOHWuwulasWMGcOXMMdn0hhBDiRfbf13czMzM8PDxo3bo13333HcnJyU88x6JFi5gxYwavv/46S5cuZcSIEQB89dVXLFmyhHfffZfly5fTu3fvwn44QgjxUlNotVqtoYsQQghDW7JkCf369WPKlCn4+fnl2lelShUCAwMNUleHDh04d+4cN27cyLVdq9WSmZmJsbExKpXKILUJIYQQxd1/X9+zs7OJiIhg79697Ny5Ex8fHzZu3Ei1atUAXW+rnJwczMzM9Od488032b9/P3fu3Ml17nr16mFkZMT+/fuL9DEJIcTLysjQBQghRHHStm1batWqZegynujet8hCCCGEeLL/vr6PGzeOv/76iw4dOtCpUycuXryIubk5RkZGGBnl/ogUFRWFnZ1dnnNGRUVRqVKlAqtRo9GQlZUlr+9CCPEIMoRQCCHyQaFQMGnSpDzbfX196du3r/7+vaEKBw4cYOTIkTg7O2NpaUnnzp2Jjo7Oc/zWrVtp0qQJ1tbW2NjYULt2bVasWAFA06ZN2bx5Mzdv3tQPffD19QUePQfWX3/9xSuvvIKlpSV2dna89tprXLx4MVebe/N7BAcH07dvX+zs7LC1taVfv36kpaXlartz504aNWqEnZ0dVlZWlC9fnk8++eTpn0AhhBCimHn11Vf57LPPuHnzJr/88guQew6se6+1e/bs4fz58/rX4r1796JQKAgJCWHz5s367fd6S2dmZjJx4kTKlCmDqakp3t7efPzxx2RmZua6vkKh4P333+fXX3+lcuXKmJqasm3bNgBCQ0Pp378/rq6umJqaUrlyZRYtWpTr+Ht1rF69mi+//BIvLy/MzMxo3rw5wcHBeR7v4cOHadeuHfb29lhaWlKtWjW+/fbbXG0uXbrE66+/joODA2ZmZtSqVYuNGzcWyPMthBDPS3pgCSHEAxITE4mJicm1zcnJ6anP88EHH2Bvb8/EiRO5ceMGc+bM4f3332fVqlX6NkuWLKF///5UrlyZcePGYWdnx8mTJ9m2bRs9evRg/PjxJCYmcufOHWbPng2AlZXVI6+5a9cu2rZtS+nSpZk0aRLp6enMnTuXhg0bcuLECX34dU+3bt3w8/Nj6tSpnDhxgoULF+Li4sK0adMAOH/+PB06dKBatWpMmTIFU1NTgoODOXDgwFM/H0IIIURx1Lt3bz755BN27NjBoEGDcu1zdnZm+fLlfPnll6SkpDB16lQAKlasyPLlyxkxYgReXl589NFH+vYajYZOnTqxf/9+Bg8eTMWKFTl79iyzZ8/mypUrrF+/Ptc1/vrrL1avXs3777+Pk5MTvr6+REZGUq9ePX3A5ezszNatWxkwYABJSUl5Jov/+uuvUSqVjBo1isTERKZPn07Pnj05fPiwvs3OnTvp0KED7u7uDB8+HDc3Ny5evMimTZsYPnw4oHvdb9iwIZ6enowdOxZLS0tWr15NUFAQa9eupXPnzgX87AshxNORAEsIIR7QokWLPNueZapAR0dHduzYof8WV6PR8N1335GYmIitrS2JiYkMGzaMOnXqsHfv3lzDBe5dr2XLlnh6ehIfH0+vXr2eeM3Ro0fj4ODAoUOHcHBwACAoKIjq1aszceJEli5dmqt99erV+fnnn/X3Y2Nj+fnnn/UB1s6dO8nKymLr1q3PFOIJIYQQxZ2Xlxe2trZcu3Ytzz5LS0t69erFwoULUalUuV6Le/Xqxaeffoqnp2eu7b/88gu7du3i77//plGjRvrtVapUYciQIRw8eJAGDRrot1++fJmzZ8/mGoo4cOBA1Go1Z8+exdHREYAhQ4bw1ltvMWnSJN555x3Mzc317TMyMjh16hQmJiYA2NvbM3z4cM6dO0eVKlVQq9W88847uLu7c+rUqVzDIR98jzN8+HB8fHw4evQopqamAAwdOpRGjRoxZswYCbCEEAYnQwiFEOIB//vf/9i5c2eun2cxePDgXMtwv/LKK6jVam7evAnowqHk5GTGjh2bZ66L/y7fnR/h4eGcOnWKvn376sMrgGrVqtGyZUu2bNmS55ghQ4bkuv/KK68QGxtLUlISgP4N7oYNG9BoNE9dkxBCCPEisLKyytdqhPnx+++/U7FiRSpUqEBMTIz+59VXXwVgz549udo3adIkV3il1WpZu3YtHTt2RKvV5jpH69atSUxM5MSJE7nO0a9fP314BbrXc4Dr168DcPLkSUJCQvjwww/zzOV17z1HXFwcf/31F926dSM5OVl/zdjYWFq3bs3Vq1cJDQ0tkOdICCGelfTAEkKIB9SpU6dAJnH38fHJdd/e3h6A+Ph4AP03vVWqVHnuawH6YKx8+fJ59lWsWJHt27eTmpqKpaVlvmq0sbGhe/fuLFy4kIEDBzJ27FiaN29Oly5deP3111Eq5fsPIYQQJUNKSgouLi4Fcq6rV69y8eJFnJ2dH7o/Kioq1/3/rnwcHR1NQkIC8+fPZ/78+fk6R0G85wgODkar1fLZZ5/x2WefPfK6np6ejzyHEEIUNgmwhBDiOajV6oduV6lUD93+LMMRC8uTajQ3N+eff/5hz549bN68mW3btrFq1SpeffVVduzY8cjjhRBCiBfFnTt3SExMpEyZMgVyPo1GQ9WqVZk1a9ZD93t7e+e6/+BQwHvHg26IYp8+fR56jmrVquW6XxDvOe5dd9SoUbRu3fqhbQrqORJCiGclAZYQQuSDvb09CQkJubZlZWURHh7+TOfz9/cH4Ny5c499Q5jf4YSlSpUCdHNp/NelS5dwcnLK1fsqv5RKJc2bN6d58+bMmjWLr776ivHjx7Nnz56HzhcmhBBCvEiWL18O8MjQ5mn5+/tz+vRpmjdv/kxTAjg7O2NtbY1arS6w19kH33M86pylS5cGwNjYWF7fhRDFlowBEUKIfPD39+eff/7JtW3+/PmP7IH1JK1atcLa2pqpU6eSkZGRa9+D35haWlqSmJj4xPO5u7sTGBjI0qVLcwVt586dY8eOHbRr1+6pa4yLi8uzLTAwECDPUuBCCCHEi+avv/7i888/x8/Pj549exbIObt160ZoaCgLFizIsy89PZ3U1NTHHq9SqejatStr167l3LlzefZHR0c/dU01atTAz8+POXPm5Pky7t57DhcXF5o2bcpPP/300C/nnuW6QghR0KQHlhBC5MPAgQMZMmQIXbt2pWXLlpw+fZrt27c/8+p8NjY2zJ49m4EDB1K7dm169OiBvb09p0+fJi0tTb9iYM2aNVm1ahUjR46kdu3aWFlZ0bFjx4eec8aMGbRt25b69eszYMAA0tPTmTt3Lra2tkyaNOmpa5wyZQr//PMP7du3p1SpUkRFRfHDDz/g5eWVa2UlIYQQorjbunUrly5dIicnh8jISP766y927txJqVKl2LhxY54FVZ5V7969Wb16NUOGDGHPnj00bNgQtVrNpUuXWL16Ndu3b3/iXJtff/01e/bsoW7dugwaNIhKlSoRFxfHiRMn2LVr10O/YHocpVLJjz/+SMeOHQkMDKRfv364u7tz6dIlzp8/z/bt2wHdQjaNGjWiatWqDBo0iNKlSxMZGcmhQ4e4c+cOp0+ffubnRQghCoIEWEIIkQ+DBg0iJCSEn3/+mW3btvHKK6+wc+dOmjdv/sznHDBgAC4uLnz99dd8/vnnGBsbU6FCBUaMGKFvM3ToUE6dOsXixYuZPXs2pUqVemSA1aJFC7Zt28bEiROZMGECxsbGNGnShGnTpuWZJDY/OnXqxI0bN1i0aBExMTE4OTnRpEkTJk+ejK2t7TM/biGEEKKoTZgwAQATExMcHByoWrUqc+bMoV+/flhbWxfYdZRKJevXr2f27NksW7aMdevWYWFhQenSpRk+fDjlypV74jlcXV05cuQIU6ZM4Y8//uCHH37A0dGRypUrM23atGeqq3Xr1uzZs4fJkyfzzTffoNFo8Pf3Z9CgQfo2lSpV4tixY0yePJklS5YQGxuLi4sL1atX1z9/QghhSAptcZpRWAghhBBCCCGEEEKI/5A5sIQQQgghhBBCCCFEsSYBlhBCCCGEEEIIIYQo1iTAEkIIIYQQQgghhBDFmgRYQgghhBBCCCGEEKJYkwBLCCGEEEIIIYQQQhRrEmAJIYQQQgghhBBCiGLNyNAFiJJHo9EQFhaGtbU1CoXC0OUIIYQQaLVakpOT8fDwQKmU7++el7zWCyGEKG7ktb7kkwBLFLiwsDC8vb0NXYYQQgiRx+3bt/Hy8jJ0GS88ea0XQghRXMlrfcklAZYocNbW1oDuD4eNjY2BqxFCCCEgKSkJb29v/WuUeD7yWi+EEKK4kdf6kk8CLFHg7g0lsLGxkTe1QgghihUZ7lYw5LVeCCFEcSWv9SWXDAwVQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsyB9ZL5p9//mHGjBkcP36c8PBw1q1bR1BQkH6/Vqtl4sSJLFiwgISEBBo2bMiPP/5I2bJlC7wWtVpNdnZ2gZ9XlCzGxsaoVCpDlyGEEEIIIYQQwoAkwHrJpKamEhAQQP/+/enSpUue/dOnT+e7775j6dKl+Pn58dlnn9G6dWsuXLiAmZlZgdSg1WqJiIggISGhQM4nSj47Ozvc3NxkQkYhhBBCCCGEeElJgPWSadu2LW3btn3oPq1Wy5w5c/j000957bXXAFi2bBmurq6sX7+eN998s0BquBdeubi4YGFhIaGEeCStVktaWhpRUVEAuLu7G7giIYQQQgghhBCGIAGW0AsJCSEiIoIWLVrot9na2lK3bl0OHTr0yAArMzOTzMxM/f2kpKRHXkOtVuvDK0dHx4IrXpRY5ubmAERFReHi4iLDCYUQQgghhBDiJSSTuAu9iIgIAFxdXXNtd3V11e97mKlTp2Jra6v/8fb2fmTbe3NeWVhYFEDF4mVx79+LzJkmhBBCCCGEEC8nCbDEcxs3bhyJiYn6n9u3bz/xGBk2KJ6G/HsRQgghhBBCiJebBFhCz83NDYDIyMhc2yMjI/X7HsbU1BQbG5tcP6JgTJo0icDAQEOXIYQQRU6r1XL48GFDlyGEEEKIQpSZoyYuNcvQZYgXhMyBJfT8/Pxwc3Nj9+7d+tAkKSmJw4cP8+677xq2uGKgb9++LF26NM/2q1evUqZMGQNUJIQQJde8efMYOnQoI0eO5JtvvjF0OUIIIYR4Clqtlvi0bCISM4hMziAyMYOIpAwikzKISMwgLjEJRVIolhnhlHGxYvKH7xu6ZPECkADrJZOSkkJwcLD+fkhICKdOncLBwQEfHx8+/PBDvvjiC8qWLYufnx+fffYZHh4eBAUFGa7oYqRNmzYsXrw41zZnZ+enPk9WVhYmJiYFVdZTy87OxtjY2GDXF0KIx7l69SqjRo0CwMfHx8DVCCGEEOJBWq2W2NQswhMyCE1IJzwxnbCEdMIT7wVU6WQlx+CsjsJTEYunIgYPRQzlFLE0u3vbWXF34S8TuJxUGpAASzyZBFgvmWPHjtGsWTP9/ZEjRwLQp08flixZwscff0xqaiqDBw8mISGBRo0asW3bNszMzAxVcrFiamr60OGUf//9N6NHj+b06dM4ODjQp08fvvjiC4yMdP+LNW3alCpVqmBkZMQvv/xC1apVmThxIs2aNWPXrl2MGTOGCxcuEBgYyOLFiylfvnyu8//000988cUXxMbG0qFDBxYsWICtra1+/8KFC/nmm28ICQnB19eXYcOGMXToUABu3LiBn58fK1eu5IcffuDw4cPMmzePXr16MXLkSJYtW4ZKpWLgwIFERESQmJjI+vXrC+9JFEKIx8jJyaF3796kpaXRvHlzPvjgA0OXJIQQQrxUUjJzCE9IvxtOZRCWkE5YQsbdkEr3Y5MTj7ci6u5PNKUVMbyiiMFDEYuHIhYLo8w8aUM2cNvYiFiFAucs0BhZoLX1ppx7FYM8TvHikQDrJdO0aVO0Wu0j9ysUCqZMmcKUKVOKsKoXW2hoKO3ataNv374sW7aMS5cuMWjQIMzMzJg0aZK+3dKlS3n33Xc5cOAAAOHh4QCMHz+eb775BmdnZ4YMGUL//v31bQCCg4NZvXo1f/75J0lJSQwYMIChQ4fy66+/AvDrr78yYcIEvv/+e6pXr87JkycZNGgQlpaW9OnTR3+esWPH8s0331C9enXMzMyYNm0av/76K4sXL6ZixYp8++23rF+/PlfAKYQQRW3atGkcPnwYW1tbFi9ejFIp03UKIYQQBUWj0RKTksnt+DRux+lCqrCE+z2owhLSScrIwZwMvBXR+Cii8FFEUUURRdu7t71V0ZgbPX7eqtOmJly2dqSxqRtutqXA1ovdykxGh20n0K4sy1suRGluD7JYk3gKEmAJg9NqtaRnq4v8uubGqqde3W7Tpk1YWVnp77dt25Zy5crh7e3N999/j0KhoEKFCoSFhTFmzBgmTJig//BVtmxZpk+frj/2XoD15Zdf0qRJE0AXMrVv356MjAx9r7eMjAyWLVuGp6cnAHPnzqV9+/Z88803uLm5MXHiRL755hu6dOkC6OYyu3DhAj/99FOuAOvDDz/Ut7l3nnHjxtG5c2cAvv/+e7Zs2fJUz4cQQhSkEydO6IP/uXPn4u3tbdiChBBCiBfMvbmnbselcTs+jTvx6dyOu/v77v2sHA2gxZEk/BTh+CkjqHEvnFJE4W0adX+I36Ouo1CisPEiyt6THeZmqM1t6ePdCmy9wNaLGf9O4HTMGWY0GUsb3zYA+MZdwjzqH8zMHcHCoQieDVHSSIAlDC49W02lCduL/LoXprTGwuTp/hdo1qwZP/74o/6+paUl7733HvXr188VhjVs2JCUlBTu3Lmjn7+lZs2aDz1ntWrV9Lfd3d0BiIqK0h/n4+OjD68A6tevj0aj4fLly1hbW3Pt2jUGDBjAoEGD9G1ycnJyDTEEqFWrlv52YmIikZGR1KlTR79NpVJRs2ZNNBpN/p8QIYQoIBkZGfTu3ZucnBy6du1Kr169DF2SEEIIUSylZuZwMzbtbi8qXSh1526PqjvxaaRm3e8cYEMKpRUR+CoiqKGM0AVWJhH4KSOxIu3xFzKzA/tSYO8L9r5sUKRyJCueoDKvUbt0WzAyISrmHNM2v4UjjvSp3lN/aG33OliZWmNtbK3fVt6+PId7HH7qTgRC3CMBlhBPwdLS8plXHLS0tHzo9gcnU7/3xzy/IVJKSgoACxYsoG7durn2qVSqfF1fCCGKg3HjxnHhwgVcXV2ZN2+evLkVQgjxUktMz+ZWbBo3YlO5GZvKjdg0/e/o5Mxcbc3IxF8RTllFOC0VEfgZR1DOKJJSRGCjfVxPKoWux5SjP9j7gYMf2PsSY27LgrC9xOWkMqPJDH3rf/eNY1PoUXy96lPbSLcglZ+tH819mlPatjRqjRqVUvcZZHiN4XmvJq/t4jlJgCUMztxYxYUprQ1y3YJQsWJF1q5di1ar1f9RPnDgANbW1nh5eT33+W/dukVYWBgeHh4A/PvvvyiVSsqXL4+rqyseHh5cv36dnj17PuFM99na2uLq6srRo0dp3LgxAGq1mhMnThAYGPjcNQshxNPYtWsXc+bMAeDnn3/GycnJsAUJIYQQhezeUD99QBWTxq24e4FVGnGpeeeYsiWFMopQXlWFUcU4nIomEfhq7uCYE4mC/8xz/OBda3dw8AfH0uBY5u5tXWi1PfRvtoVso6l3U14r8xoAxpmJrPhHt4jKhPoTsDbR9aJq49sGXxtf6nvU15/a0tiSOc3mFOhzI8SjSIAlDE6hUDz1UL7iZOjQocyZM4cPPviA999/n8uXLzNx4kRGjhxZIJMPm5mZ0adPH2bOnElSUhLDhg2jW7du+tUQJ0+ezLBhw7C1taVNmzZkZmZy7Ngx4uPj9atMPswHH3zA1KlTKVOmDBUqVGDu3LnEx8fLNyNCiCIVFxenn6/v3XffpX379gauSAghhCg46VlqQmJSuR6TwrUo3e/r0anciE0lOSPnIUdocSWehspQAs2iqGYagb8iDPfsW1hmx+Zu+mDGZe4ATmXvh1OO/rrbDqXB1IosdRbzTs/jcvxlZpd7FxOVrgfV9YTr7Lq1C0tjS32AZWtqy7sB7+Jl7YVKcf9L/ybeTWji3aSAnyEh8u/FTQ2EKCY8PT3ZsmULo0ePJiAgAAcHBwYMGMCnn35aIOcvU6YMXbp0oV27dsTFxdGhQwd++OEH/f6BAwdiYWHBjBkzGD16NJaWllStWpUPP/zwsecdM2YMERERvP3226hUKgYPHkzr1q3zDD0UQojCotVqGTJkCGFhYZQvX56ZM2cauiQhhBDiqWm1WiKTMrkWncL16BSuRafevZ1KaEL6I4+zJo0G1pHUtoigsuoOpdQ3cE67hnH23WF/GuC/h9t4gXM5cCr/wO/yYHm/9/LluMtsv7Edt6Rsurnr5ts1Vhqz6vIqkrKSuJpwlcqOlQFo7NUYC2MLarjUyHWZoYFDn/t5EaKgKbRarfbJzYTIv6SkJGxtbUlMTMTGxibXvoyMDEJCQvDz89OvsieKB41GQ8WKFenWrRuff/65ocvJRf7dCFEyLV++nLfffhsjIyMOHTqUa7GJgva41ybx9OT5FEK8jLLVGm7EpHIlMoXgqBR9b6rr0Sm5Jk7/L0czeMUhntrmEVRU3cErOwT75KsYp4Q+/ACFStdzyrk8OJW7/9upHJha5Wp6KuoUJ6JO0LF0R5wtnAH489qffLL/EwKdA1nebrm+7dLzSzFVmdKiVAuczEvecH15bSr5pAeWEC+pmzdvsmPHDpo0aUJmZibff/89ISEh9OjRw9ClCSFeAjdu3OC9994DYNKkSYUaXgkhhBBPI0et4UZsGlcjk7kSmcKVqGSuRiYTEpNKtvrh/T9USgU+DhZUsddQzzKMSoTgk3UV26QrqOKCUcRlP/xiNp7gWhlcKt3/7VQWjExzNdNqtdxJuUNE/EVqu9XWb596ZCoXYi/gYelBG782AAQ6B9KlbBcCnQNznaNP5T7P/qQIUQxIgCXES0qpVLJkyRJGjRqFVqulSpUq7Nq1i4oVKxq6NCFECadWq+nduzfJyck0bNiQsWPHGrokIYQQLyG1RsvNWF2PqquRyVyJ0v2+Hp1Klvrhq4Jbmqgo42pNGWcrqtimU011E9+sYOyTLqKMOAO3bz78YqY2d0OqSg+EVRXB3P6hzbPV2WRrsrEwtgDgdPRpem/tjYOZA3u77dXPW9vUqykelh7Ym90/j7eNN5MbTH6OZ0aI4kkCLCFeUt7e3hw4cMDQZQghXkIzZsxg//79WFtbs3z5cpl7TwghRKGLTcnkUkQyF8OTuBCexKXwZIKjU8jKeXhQZW6soqyrFWVdrCnnYklV62TKaa7hmHQJRcQZuHUaUiIefjG7UuAeAO7VwK2aLrCy9YJ8Lpb0w6kfWHJ+Ce9Ue4cBVQcAUNGxIhZGFnhZeZGcnYyNiW6I3LuB7z79kyHEC0oCLCGEEEIUmRMnTvDZZ58B8N133+Hn52fgioQQQpQkOWoN12NSuRiexMXw5Lu/k4hKznxoezNjJWVdrCnrakU5V2vKulhRwTYH95SLKMMOQOhxOHIc0mIecrRCNy/VvbDKPQDcqj6yV9V/ZWuy+eHUD5yIPMGPLX7U97ayNrEmPSedC7EX9G1NVabsf2s/xkrjp35OhCgpJMASQgghRJFIS0ujZ8+e5OTk0LVrV/r0kbk4hBBCPLuEtCwu3A2qLoUncTEiiSuRj+5V5etoQQU3Gyq621DR3ZoKbjZ4WStRRp2D0MNw5xicPg5x1/IerDTWDflzrwbugbqwyrUymFjmq9ZsTTZno8+Smp3KK16vALqVAbdc30JYahinok/RwKMBAG392tLAowF+trm/5JHwSrzsJMASQgghRJEYM2YMly5dwt3dnZ9++kk/f4cQQgjxJLEpmZwNTeRcaOLd30mEJqQ/tK2liYoK7jZUcLO+G1bpbluaGkFiKNz+F24ehgNHIeIsaB4ywbqDP3jWBK9aut9uVfNMrP44Gq0GtVatD53+vv03I/aOwN/WXx9gAQyqNgiVQkV5+/L6bU7mTiVylUAhnpcEWEIIIYQodFu3buX7778HYMmSJTg6Ohq4IiGEEMVVdHKmPqg6G5rI+dBEwhIzHtrW28Gcim42VHC3oZK7LrDytrdAqVSARg1RF+H2Tjj2L9w6DIm38p7EwhE8a90Nq2qARw2wcHjm+v936n+svryaYdWH0bVcVwBqu9XG0cyRsvZlydZk64Ot18u9/szXEeJlIwGWEEIIIQpVTEwM/fv3B2DYsGG0atXKwBUJgM6dO7N3716aN2/OmjVrDF2OEOIlFZeaxanb8Zy5o+tVdS40kYikh4dVpZ0sqeJpS1VPW6p42lLJwwZb8weG1WWlQdgJOH9IF1bdPgKZiblPolDqJlb3qa8LrLxq6SZdf4ZewRqthjPRZ/g3/F8GVxuMUqEEQKvVEpcRx7HIY/oAy9bUlj3d9kjvYyGegwRYQgghhCg0Wq2WwYMHExERQaVKlfj6668NXZK4a/jw4fTv35+lS5cauhQhxEsiI1vN+bAkTt9O4NTdn1txaXnaKRTg72xFFQ8bfWBVycMGa7P/zAGVnQ7XD0DIPrixD0JP5B0OaGwJ3rV1gZV3XV1gZWr9zI8hW52NsUpXh1qr5t1d75KSnUJDj4ZUda4KQOeynannXo8A54D/PC4Jr4R4HhJgCSGe2Y0bN/Dz8+PkyZMEBgayd+9emjVrRnx8PHZ2doYuTwhRDCxYsIB169ZhbGzMr7/+irm5uaFLEnc1bdqUvXv3GroMIUQJpdVqCYlJ1QdVp24ncDE8iWy1Nk9bf2dLArzsdGGVly2V3G1081X9V04m3Dl6P7C6cxTUWbnbWLmBTz1dYOVTD1yrgOr5P/aejT7LF4e/wNLYkkWtFwG6SdVb+bYiLTtNH2oBeFp54mnl+dzXFELkJgGWEPnQt29f/TfURkZGODg4UK1aNd566y369u2LUqnUt/X19eXmzZsAmJub4+/vz/Dhwxk4cKC+zb2g5x4XFxcaNWrEjBkzKF269FPXN2nSJNavX8+pU6ee8RE+G29vb8LDw3Fyyv8kk/Pnz2fFihWcOHGC5ORkCbuEKMEuXLjAhx9+CMDUqVMJDAw0aD0vkn/++YcZM2Zw/PhxwsPDWbduHUFBQbna/O9//2PGjBlEREQQEBDA3LlzqVOnjmEKFkK89JIysjl5K4HjN+M5dTuB07cTSEzPOzm6k5UJgd52BHjZEehjRzUvu9zDAB+kUUP4Kbj2ly60un0Ycv4zvNDaA/xeAd9XwLcR2Ps+03DAB8Wkx3Ag9ABl7MtQ2bEyAHZmdlyIvYCR0oi07DQsjC0AmNxg8nNdSwiRfxJgCZFPbdq0YfHixajVaiIjI9m2bRvDhw9nzZo1bNy4ESOj+/87TZkyhUGDBpGWlsbvv//OoEGD8PT0pG3btrnOefnyZaytrbl69SqDBw+mY8eOnDlzBpVKVdQP75moVCrc3Nye6pi0tDTatGlDmzZtGDduXCFVJoQwtIyMDHr06EF6ejqtWrVixIgRhi7phZKamkpAQAD9+/enS5cuefavWrWKkSNHMm/ePOrWrcucOXNo3bo1ly9fxsXFBYDAwEBycnLyHLtjxw48PDwK/TEIIUq20IR0jt2I49iNeI7djOdSRBLa/3SuMjVSUsXTlkBvO/2Pl73544fSJYXpAqvg3XB9L6TH5d5v6XI/sPJrDA6lnzuwUmvUqJT333/POz2PVZdX0b18d32A5W3tzcwmM6npWlMfXgkhipYEWELkk6mpqT6s8fT0pEaNGtSrV4/mzZuzZMmSXD2srK2t9W3HjBnD9OnT2blzZ54Ay8XFBTs7O9zd3ZkwYQI9e/YkODiY8uXL81979+7l448/5vz58xgbG1O5cmVWrFjBnj17mDxZ983PvTcDixcvpm/fviQkJDBq1Cg2bNhAZmYmtWrVYvbs2QQE6Mbj3+u59dFHH/HZZ58RHx9P27ZtWbBgAdbWurkBNBoNM2fOZP78+dy+fRtXV1feeecdxo8fn2cIYX7c640hw1aEKNnGjh3L6dOncXZ2ZunSpbl6qoona9u2bZ7XjAfNmjWLQYMG0a9fPwDmzZvH5s2bWbRoEWPHjgUo0F65mZmZZGZm6u8nJSUV2LmFEMWfWqPlYngSx2/Gc/RGHMdvxhP+kFUBSzlaUNPHnuql7KnubUd5N2uMVU/4+5+dDjcP6kKra39B1IXc+01tdEFV6aa6307lnjuwuker1TJu/zj23dnHqg6r8LL2AqCxV2PORJ/Bz9YvV/vWvq0L5LpCiGcjAZYQz+HVV18lICCAP/74I1eAdY9Go2HdunXEx8djYmLy2HPdmxcmKysrz76cnByCgoIYNGgQv/32G1lZWRw5cgSFQkH37t05d+4c27ZtY9euXQDY2toC8MYbb2Bubs7WrVuxtbXlp59+onnz5ly5cgUHB93SwNeuXWP9+vVs2rSJ+Ph4unXrxtdff82XX34JwLhx41iwYAGzZ8+mUaNGhIeHc+nSpWd/0oQQJd6WLVv49ttvAViyZMlT99QUj5eVlcXx48dz9WJVKpW0aNGCQ4cOFco1p06dqv+yRAhR8mVkqzlxM54jd8OqEzfjSc1S52pjpFRQ2cOGWr4O1CplT01fe1yszfJ3gcRQuLINLm/VzWWVa1igAjxrgH9zKNMcPGuC6hFDDJ9Ctiab01GniUiLoEPpDrorKRSEp4STlJXEwbCDdCvfDdAFWI29Gj/3NYUQBUsCLGF4Wi1k5119pNAZWxTItzcVKlTgzJkzubaNGTOGTz/9lMzMTHJycnBwcHhowHVPeHg4M2fOxNPT86G9r5KSkkhMTKRDhw74+/sDULFiRf1+KysrjIyMcn1I3L9/P0eOHCEqKgpTU1MAZs6cyfr161mzZg2DBw8GdCHbkiVL9D2uevfuze7du/nyyy9JTk7m22+/5fvvv6dPnz4A+Pv706hRo2d5qoQQL4HIyEh9r6Bhw4bRrl07A1dU8sTExKBWq3F1dc213dXV9am+YGjRogWnT58mNTUVLy8vfv/9d+rXr//QtuPGjWPkyJH6+0lJSXh7ez/bAxBCFDv3Aqt/r8fy7/U4Tt1OIEutydXG2tSIGqXsqVXKnlq+DgR422Jhks+Pk1otRJzRBVaXt0D46dz7rT2gzKvg/yqUbgYWDs/9mNQaNZnqTP1wvyvxV+i3vR9Wxla09m2NsVIXin1Q/QOMlEZUdar63NcUQhQuCbCE4WWnwVcGmIvjkzAwsXzu02i12jzj+EePHk3fvn0JDw9n9OjRDB06lDJlyuQ51svLC61WS1paGgEBAaxdu/ahPbUcHBzo27cvrVu3pmXLlrRo0YJu3brh7u7+yLpOnz5NSkoKjo6Oubanp6dz7do1/X1fX199eAXg7u5OVFQUABcvXiQzM5PmzZvn78kQQrzUNBoNffr0ISoqimrVqjFt2jRDlyQe416v3fwwNTXVfxkihHjxpWepOXFLF1gdfkRg5WZjRt3SDvoeVuVcrVEpn+LL35xM3cTrl7foelslhT6wUwHedaBcG92PS8UCGxYIsPzCcn48/SNvln+TYTWGAVDBvgKlbEpR2bEyKVkp2JvZA1DLrVaBXVcIUbgkwBLiOV28eBE/v9zj452cnChTpgxlypTh999/p2rVqtSqVYtKlSrlardv3z5sbGxwcXHJFSI9zOLFixk2bBjbtm1j1apVfPrpp+zcuZN69eo9tH1KSgru7u4PnWvqwVX/jI1zd8lWKBRoNLo3MLLcvRDiaXz77bds374dMzMzfvvtN8zM8jmURDwVJycnVCoVkZGRubZHRkbKcE0hXjAarQal4v4cURdiL5CRk0Flp8qYqnSh8e2k21yOv4yLhQvVnKvp226+vpksdRav+ryKralu+og7yXc4H3seFwsXqrtU17c9FXmWi+FJ3Iyw5PiNNE7dTiBbnQMKDWiNAAVuNmbU93ekXmkH6pV2xMfB4vGTrT9MVhoE74Tz6+HqDshKub/P2ELXw6p8WyjbGqycn/r5+i+tVsuXh7/kVNQpfmzxI84WunNaGVuRnJXM2Ziz+rYqpYo/g/58+sckhCg2JMAShmdsoesNZYjrPqe//vqLs2fPPnZ1LW9vb7p37864cePYsGFDrn1+fn65wqQnqV69OtWrV2fcuHHUr1+fFStWUK9ePUxMTFCrc89LUKNGDSIiIjAyMsLX1/dpHpZe2bJlMTc3Z/fu3Y8dAimEECdPnmTMmDEAzJ49O09gLwqOiYkJNWvWZPfu3QQFBQG63m+7d+/m/fffN2xxQrzEbibdJDg+GDcrN/3KddmabMbtG0diZiLfNvtWP5zt57M/8/3J7wkqG8TE+hP15+i9pTdZmiy2dtmqn1D87zt/M+3oNNr6tWW683R92+lHpxOXEUdlp8r6AOtw+GEmHZpEY6/GvF9pKgeCY9h3NYbj6o9RmMSSeuNdNOmlAHByvUymwzJ8LKryw6sL9IHV2H1j2XY0klG1RlHZSfc4biTeYMfNHXhZedGu9P2h4dcTr0N2Bm7h57G4tBmu7IDs1PtPirW7rodV+Xa6CdiN8/fFRlp2GvGZ8dib2uufszPRZ5h3eh6ulq7650yhUHAi6gRX469yJuYMzX10owaaejdlRbsVVHSsmOu8El4J8WKTAEsYnkJRIEP5CltmZiYRERGo1WoiIyPZtm0bU6dOpUOHDrz99tuPPXb48OFUqVKFY8eOUavW03dTDgkJYf78+XTq1AkPDw8uX77M1atX9df19fUlJCSEU6dO4eXlhbW1NS1atKB+/foEBQUxffp0ypUrR1hYGJs3b6Zz5875qsPMzIwxY8bw8ccfY2JiQsOGDYmOjub8+fMMGDDgqR8HQEREBBEREQQHBwNw9uxZrK2t8fHx0U8sL4R4saSmpvLWW2+RnZ1NUFAQ77zzjqFLeuGlpKTo/04C+r/xDg4O+Pj4MHLkSPr06UOtWrWoU6cOc+bMITU1VT//mBCiYIQkhnAl/goelh5UddbNkZSRk8HbW98mOj2aLV22YG6k67G+6fom5p2eR/fy3fUBlpHCiL9u/UW2JpvEzER9GGOkNCJHm0N6Tnqu6/nY+JClzsoVtNzrTVXKplSutvU96pOclYyVsRUA4YnpXArVYqcoz4ELxmzeuU/f1tzHBiOlmmZlvWhVtir1SjtyMj6bzw6At70NpRzvvxc/F3OOm0k3yVTfX3n0asJV5p6cS3WX6roAKzsDrm5n3PGvuKBJ438RUTRO103E/q+jDx/ZGlPFvjw/dVihHxo45dAUriVcY1iNYdR0rQnAqahTfHX4K7ysvZjVdJb+egO2D+Bc7DnmvjqXpt5NAchUZ7IvdB9eVl65noch1YagVCip6VJTv83ezF4/RFAIUXJIgCVEPm3btg13d3eMjIywt7cnICCA7777jj59+jxxefhKlSrRqlUrJkyYwJYtW5762hYWFly6dImlS5cSGxuLu7s77733nv5DYteuXfnjjz9o1qwZCQkJLF68mL59+7JlyxbGjx9Pv379iI6Oxs3NjcaNG+eZ+PdxPvvsM4yMjJgwYQJhYWG4u7szZMiQp34M98ybNy/XSlaNG+tWeLlXsxDixfPhhx9y+fJlPDw8WLhwoXzDXQCOHTtGs2bN9PfvTaDep08flixZQvfu3YmOjmbChAlEREQQGBjItm3bnurvuxAvsyx1FhnqDGxMbABdODL54GTCU8P5qeVPmKh0c5L+ee1PFpxdQPfy3fUBlqnKlJDEEDLUGcSmx+p7SvlY+xDoHIiH1f25XRUKBePrjsdEZYK1yf3pIrqU7UJr39a5tgGse21dnlpb+bailW+rPNsn1P2Cg8GxzP8rhn1Xr3AtOhUwB3RBtrmxirqlHWhUxolGZX+hvKt1rr/PXvYdaVmqJRpt7rmvPqv3GfEZ8ZS2La3f5mbhRpcynfFSa2HjMN0QwcxELN1csDExxsrCBaoHQeUgUnPiSdo7gjSVUa55rS7FXeJszFmSMpNy/Xe4GHeRLHXuVbjtzewxVZmS9sBCT2XtyvJZvc/wt/PP8/wIIV4OCq1WqzV0EaJkSUpKwtbWlsTERGxsbHLty8jIICQkBD8/P5kbReSb/LsRovhas2YNb7zxBgqFgt27d+cKXYqTx702iacnz6d4EWi1WsJSw7iZeJM67nUwUuq+u194diFzT87ljXJv8Gm9T/Vt666oS3pOOps7b8bHxgeAbTe28dvF32jm3Yy+Vfrqz30k/AjWJtb42/nrw66ieDzXY1LZezmavZejOHw9LtfE60oFVPOyuxtYOVHdxw5TI9XzXzj2GpxeCWdWQcLN+9ttPKFKF6jcGTxq6MOqtOw0ItIiUKLE19ZX3/xoxFHiM+IJdAnExcIFgMTMRM5En8HG1IYA5wB922x1NkZKI/lCRDwVeW0q+aQHlhBCCCGeya1btxg0aBAAY8eOLbbhlRCi5ItMjeRk1EmsTaxp6NkQAC1aXlv/GpnqzFyhlIOZAxqthpj0GP3xCoWCUbVGYWlsiZ2ZnX57G982tPFtk+d6ddzrFO4Duis9S82/12PZezmKPZejuRWXlmu/l705Tcs706iMM/VLO2JrYfyIMz2ljCQ4+zuc/g3uHL2/3cQKKr0GAW9CqUbwkFEIFsYWuXpv3VPbrXaebbamtrzi9Uqe7caqAnocQogSRQIsIYQQQjy1nJwcevToQUJCArVr1841NFgIIQrTgdADXI6/TOcynfXzHO29vZcvDn/BK56v6AMspUJJGbsyZKozScm+vxpey1ItaezVGEczx1zn7Va+W5E9hse5GZvKnku6wOrf67Fk5tzvZWWsUlDXz5Gm5Z1pWt4Ff2fLguulpNVC2Ak4thjOrYV7w/cUSvBvrgutyrcDk+dfCEkIIZ6FBFhCCCGEeGqTJk3iwIEDWFtb89tvv2FsLN+WCyEKVkJGAkcijqDWqmnr11a/fdrRaYQkhlDevrw+rKrgWIFqztWo4FAh1zl+a/9bnoDH2sQaa3LPPWVIGo2W03cS2HEhkp0XIgmOSsm138PWjKYVXGhW3oUG/o5YmhbwR7iMJDi7Go4vgYiz97c7lYcab0PVN8Ba5tcTQhieBFhCCCGEeCq7du3iq6++AmDBggX4+/s/4QghhHi8uIw4TkWdooJDBf0k6Odjz/PR3x/ha+ObK8Bq4tWE8vblsTKx0m8LcA7g13a/5jlvcZ1DKTNHzcFrsew4H8nui5FEJd9f8c9IqaC2rwNNyzvTrIILZV2sCudxxF2Hw/Ph5C+QlazbpjKFykFQsx/41Ms1CbsQQhiaBFhCCCGEyLfIyEh69eqFVqtl8ODBdO/e3dAlCSFeMBqthrCUMP3qfQCf7PuEA2EHGF93PG9WeBOASo6VqOhQkSpOVdBoNSgVuvmWPqr1kUHqfl6JadnsuRzFjgsR/H05mtQstX6flakRTcs707KSK03Lu2BrXki9WrVaCPkHDs+Dy1uBu+t5OZWDWv2hWnewcCicawshxHOSAEsIIYQQ+aLRaOjVqxeRkZFUqVKFOXPmGLokIcQLJiI1gs4bOpOtyeZQj0MYK3VBTaBLIBGpEZiqTPVt7c3sWd1xtaFKLRCRSRlsOxfB9vMRHAmJI0dzfwF4VxtTWlZypWUlN+qVdiiYFQMfRZ2jm9fqwLcQdf7+9jItod4QKP3qQydkF0KI4kQCLCGEEELky9dff82uXbswNzdn1apVmJubG7okIUQxtj90P79e/JUA5wCGBAwBwMXCBZVShVqr5nbybf1qde9Ue0ff5kUXkZjB1nPhbDkbzrGb8WjvZ1aUc7WiVSU3WlZypaqnLUplIQ/Ry8nUrSS4fzbE39BtM7aAwB5Q5x1wLle41xdCiAIkAZYQQgghnujAgQNMmDABgP/9739UqlTJwBUJIYqTO8l3OBB6gDZ+bbA1tQV081rtD91PYmaiPpxSKpT81v433C3dMVLe/yhSXOeqyq+IxAy2nNWFVsdv5Q6tavjY0baKOy0rueLrZFk0BWWlwYmlcOA7SA7TbbNwhHpDofYAMLcvmjqEEKIASYAlhBBCiMeKjY3lrbfeQq1W07NnT/r27WvokoQQBvbgnFQAw/YM42r8VaxNrGlXuh0A9d3rM7rWaOq61811rLe1d5HWWlgikzLYfOZ+T6sH1SxlT7uq7rSt4oaHXRH2Vs3JhGOLYd9MSI3WbbN2hwYfQM2+YFJEAZoQQhQCCbCEEM/sxo0b+Pn5cfLkSQIDA9m7dy/NmjUjPj4eOzs7Q5cnhCgAWq2W/v37c/v2bcqWLcuPP/74wveUEEI8u9CUUL749wtuJ9/mz6A/9X8Pmno1xcbEBkvj+wGJs4Uzb1d+21ClFoqkjGy2nY1gw+lQDl6LzdXTqta90KqqG+62RTzEWqOGs7/Dni8h4ZZum10paPQhBPYEI9PHHi6EEC8CmalPiHzo27cvCoUChUKBsbExrq6utGzZkkWLFqHRaHK19fX11be1sLCgatWqLFy4MFebvXv36tsoFApcXV3p2rUr169ff6b6Jk2aRGBg4LM+vGfm7e1NeHg4VapUyVf7uLg4PvjgA8qXL4+5uTk+Pj4MGzaMxMTEQq5UCPGsvvvuOzZu3IiJiQmrV6/G2tra0CUJIYrQ9cTrXIy9qL/vYObAsYhj3Ey6SXBCsH77B9U/YEmbJTTxbmKIMgtVZo6abeciePeX49T6Yhcfrz3DgWBdeFWzlD0TO1bi33HNWfNuA/o38iv68CpkH/zUGNa9owuvrNygw2z44LhuZUEJr4QQJYT0wBIin9q0acPixYtRq9VERkaybds2hg8fzpo1a9i4cSNGRvf/d5oyZQqDBg0iLS2N33//nUGDBuHp6Unbtm1znfPy5ctYW1tz9epVBg8eTMeOHTlz5gwqVSGuQlOAVCoVbm5u+W4fFhZGWFgYM2fOpFKlSty8eZMhQ4YQFhbGmjVrCrFSIcSzOHbsGKNHjwZg1qxZBgnKhRCGs/ryaj7/93MaeDTgp5Y/AWBuZM7UV6ZSyqYUZezK6NuWtJ6ZGo2WwyFxbDgVypaz4SRl5Oj3lXWxIqi6J50CPPB2sDBckQm3YMdncGG97r6ZLTQaoZuc3cSAdQkhRCGRHlhC5JOpqSlubm54enpSo0YNPvnkEzZs2MDWrVtZsmRJrrbW1ta4ublRunRpxowZg4ODAzt37sxzThcXF9zd3WncuDETJkzgwoULBAcH52kHul5bderUwdLSEjs7Oxo2bMjNmzdZsmQJkydP5vTp0/oeXffqSUhIYODAgTg7O2NjY8Orr77K6dOn9ee813Nr+fLl+Pr6Ymtry5tvvklycrK+jUajYfr06ZQpUwZTU1N8fHz48ssvAd0QQoVCwalTp/L1HFapUoW1a9fSsWNH/P39efXVV/nyyy/5888/ycnJefIJhBBFJikpiTfffJPs7Gw6d+7M0KFDDV2SEKIQXUu4xncnvuNC7AX9tjpudTBWGmOiMkGjvd/jvEWpFpS1L1viQiuAGzGpzNh+iYbT/uKtBf+y8uhtkjJycLMx453Gpdk8rBE7RjTmvWZlDBde5WTB3zPg+9q68EqhhNoDYdgpXYAl4ZUQooSSHlhCPIdXX32VgIAA/vjjDwYOHJhnv0ajYd26dcTHx2NiYvLYc91bjj4rKyvPvpycHIKCghg0aBC//fYbWVlZHDlyBIVCQffu3Tl37hzbtm1j165dANja6lb/eeONNzA3N2fr1q3Y2try008/0bx5c65cuYKDgwMA165dY/369WzatIn4+Hi6devG119/rQ+pxo0bx4IFC5g9ezaNGjUiPDycS5cuPfuT9h+JiYnY2Njk6sEmhDAsrVbLO++8w7Vr1yhVqhQ///xzifygKoS4b+HZhWy6vonU7FQqOepWGfW19WXfm/tyzWtVEqVm5rD5bDhrjt3hyI04/XYbMyPaVXXntUBP6vo5oFQWg7+DYSdh/XsQdV53v1QjaDsN3PI3nYMQQrzI5BOjKDbSstMAXdf0ex+UstXZZGuyMVIaYaIyydPWzMhMvwJOtiabbHU2KqUKU5XpY9sWpAoVKnDmzJlc28aMGcOnn35KZmYmOTk5ODg4PDTguic8PJyZM2fi6elJ+fLl8+xPSkoiMTGRDh064O/vD0DFihX1+62srDAyMso1nG///v0cOXKEqKgoTE11z8fMmTNZv349a9asYfDgwYAuZFuyZIl+XpvevXuze/duvvzyS5KTk/n222/5/vvv6dOnDwD+/v40atToWZ6qPGJiYvj888/1tQghiof58+ezcuVKjIyMWLlyJfb2sty6ECWFRqthzZU1bAnZwswmM3EydwKgY+mOpGan0sCjQa72JTW80mq1HL0Rz+/HbrP5bDhpWWoAlApoXM6ZN2p606KSC6ZGxWRah+wM2DsVDs4FrRosHKHNNKj6OsgXDEKIl4QEWKLYqLtCt8Ty393/xsFM1zto8fnFzD05l65luzKpwSR926arm5Kek862rtvwtPIEYOWllUw/Op12fu2Y1niavm2btW2Iz4xnXad1lLG/P1dDQdFqtXl6JowePZq+ffsSHh7O6NGjGTp0KGXK5L22l5cXWq2WtLQ0AgICWLt27UN7ajk4ONC3b19at25Ny5YtadGiBd26dcPd3f2RdZ0+fZqUlBQcHR1zbU9PT+fatWv6+76+vrkmZXZ3dycqKgqAixcvkpmZSfPmzfP3ZDyFpKQk2rdvT6VKlZg0aVKBn18I8WxOnDjBsGHDAJg6dSr16tUzcEVCiIKkVChZe3UtF2IvsP3GdnpW7AlAA88GNPBs8ISjX3wRiRmsPXGH34/d5kZsmn67n5Mlb9Tyokt1L9xszQxY4UNEX4Hf+0DU3eGdVbpC2+lg6WTYuoQQoohJgCXEc7p48SJ+fn65tjk5OVGmTBnKlCnD77//TtWqValVqxaVKlXK1W7fvn3Y2Njg4uLyxJW9Fi9ezLBhw9i2bRurVq3i008/ZefOnY/8cJmSkoK7uzt79+7Ns8/Ozk5/29jYONc+hUKhX1nx3rDGgpacnEybNm2wtrZm3bp1eWoQQhhGQkICr7/+OllZWbz22mt89NFHhi5JCPEc0rLTWHFpBfvu7OPn1j9jpNS99R9YdSChyaG0LNXSwBUWDY1Gy/7gGH759ya7L0Wh1mgBsDRR0b6aO91qeVOzlH3xHCp95nf4czhkp4KlC3ScAxXaG7oqIYQwCAmwRLFxuMdhQDeE8J5+lfvRq2Iv/Ruue/Z22wvohgXe82aFN+latisqZe6u3tu6bsvTtqD89ddfnD17lhEjRjyyjbe3N927d2fcuHFs2LAh1z4/P79cYdKTVK9enerVqzNu3Djq16/PihUrqFevHiYmJqjV6lxta9SoQUREBEZGRvj6+j7Nw9IrW7Ys5ubm7N69+7FDIJ9GUlISrVu3xtTUlI0bN2JmVsy+5RTiJaXVaunXrx8hISH4+vqyePHi4vlhTgiRb0ZKI5aeX0pCZgIHww7S2KsxwEsTXMWmZLLm+B1WHLnFzQd6W9XxdaBbbW/aVnHD0rSYfhzKToetY+DEUt1931eg689g7WrYuoQQwoCK6V9sYShqtZpJkybxyy+/EBERgYeHB3379uXTTz8t9A8yFsZ5V0wxVhljrMrbO+ehbZXGGCvz1/ZZZGZmEhERgVqtJjIykm3btjF16lQ6dOjA22+//dhjhw8fTpUqVTh27Bi1atV66muHhIQwf/58OnXqhIeHB5cvX+bq1av66/r6+hISEsKpU6fw8vLC2tqaFi1aUL9+fYKCgpg+fTrlypUjLCyMzZs307lz53zVYWZmxpgxY/j4448xMTGhYcOGREdHc/78eQYMGPDUjyMpKYlWrVqRlpbGL7/8QlJSEklJSQA4OzujUhWTeSaEeAnNnj2b9evXY2Jiwu+//y7zXgnxgsnIyWDz9c1cjLvIp/U+BcBEZcL7ge9jZmRGLdenf//xItJqtRy7Gc+v/95ky9kIstS6XuXWpkZ0relFz7o+lHV9fK93g0uJgt/ehNDjgAKafAxNxoBS3icJIV5uEmCJXKZNm8aPP/7I0qVLqVy5MseOHaNfv37Y2trq50R5WW3btg13d3eMjIywt7cnICCA7777jj59+qBUPn5y+EqVKtGqVSsmTJjAli1bnvraFhYWXLp0iaVLlxIbG4u7uzvvvfce77zzDgBdu3bljz/+oFmzZiQkJLB48WL69u3Lli1bGD9+PP369SM6Oho3NzcaN26Mq2v+v7377LPPMDIyYsKECYSFheHu7s6QIUOe+jGAbm6dw4d1Pe3+OyfYvV4fQoiid/DgQcaMGQPogqxnCdqFEIaVkJnAlH+noNFqeKvCW/jb6RZ96V6hu4ErKxopmTn8ceIOv/57i8uRyfrtVT1t6VXPh44BHliYvAAffaKvwK9dIeEWmNnB64ugTMHPRSqEEC8ihVar1Rq6CFF8dOjQAVdXV37++Wf9tq5du2Jubs4vv/ySr3MkJSVha2tLYmIiNjY2ufZlZGQQEhKCn5+fDB0T+Sb/boQoPNHR0VSvXp3Q0FDefPNNVqxYUSKHDj7utUk8PXk+De96wnWuxF+hjV8b/bZpR6bhZulG57KdsTF5Of673I5LY8nBG6w+epvkzBwAzIyVdArwoFe9UlTzsjNsgU8j4hwsew3SYsChNPT4HZwKfgEiIUoqeW0q+V6AryFEUWrQoAHz58/nypUrlCtXjtOnT7N//35mzZr1yGMyMzPJzMzU3783JEwIIUTxplar6dWrF6GhoZQvX5758+eXyPBKiJLmQuwFum/qjrmROfU96mNragvAmDpjDFxZ0dBqtRwOiWPxgRB2Xojk7pzslHaypHf9UnSp4YWt+Qu2QEz4GVjWCdLjwa0a9F4nqwwKIcR/SIAlchk7dixJSUlUqFABlUqFWq3myy+/pGfPno88ZurUqUyePLkIqxRCCFEQvvrqK3bs2IG5uTlr1qx54mqoQgjDScxM1AdVFR0qUtGhIu6W7qRkp+i3l3SZOWr+PB3Oov0hXAi//4Vp43LO9GvoS5OyziiVL2AIHxcCv3TVhVdetaHnGjC3M3RVQghR7EiAJXJZvXo1v/76KytWrKBy5cqcOnWKDz/8EA8PD/r06fPQY8aNG8fIkSP195OSkvD29i6qkoUQQjyD3bt3M3HiRAB+/PFHqlSpYuCKhBAPE5oSysSDE4lMjWTda+swUhqhUChY3m45pipTQ5dXJBLTs/nl35ssPnCDmBRdr38zYyVdanjRr4Fv8Z+U/XFSY+CXLpAaBa5VoddaMHs5AkkhhHhaEmCJXEaPHs3YsWN58803AahatSo3b95k6tSpjwywTE1NMTV9Od5ACSFESRAWFkaPHj3QarUMGDDgkX/fhRCGZ2dqx6W4S6Rmp3I+9jwBzgEAL0V4FZGYwaIDIaw4fIuUu/Nbudua8XZ9X96q442dhYmBK3xOGjWsHQhx18HOB3qtkfBKCCEeQwIskUtaWlqeFfVUKhUajcZAFQkhhChIOTk5vPnmm0RFRVGtWjXmzp1r6JKEEA+4nXSbfaH76FGxBwCWxpZ8/crX+Nn64WnlaeDqikZwVDI//X2d9adCyVbrJrgq72rNkKal6VDNA2PV41d/fmHs+wau7wEjc+ixGqzdDF2REEIUaxJgiVw6duzIl19+iY+PD5UrV+bkyZPMmjWL/v37F+h1ZPFL8TTk34sQBWf8+PHs27cPa2tr1qxZg7m5uaFLEkLcFZMeQ9CGILI0WVR1qkpV56oANPJsZODKisap2wn8sCeYHRci9dvq+DrwblN/mpZ3LlmLTNw+Anun6m53mAUuFQ1bjxBCvAAkwBK5zJ07l88++4yhQ4cSFRWFh4cH77zzDhMmTCiQ8xsb61aESUtLkw9NIt/S0tKA+/9+hBDP5o8//mD69OkA/Pzzz5QtW9bAFQkhHuRk7kQbvzbEpsdiZWJl6HKKzPGb8Xy7+yr/XInWb2tZyZUhTfypWcregJUVEo0atowGrQaqdYfAHoauSAghXggKrXRtEAUsKSkJW1tbEhMTsbGxybM/PDychIQEXFxcsLCwKFnfpokCpdVqSUtLIyoqCjs7O9zd3Q1dkhAvrMuXL1O7dm2Sk5MZMWIEs2bNMnRJRepJr03i6cjzWTAOhh3kfyf/xw8tftCvJJilzsJE9YLP7ZRPx27E8e3uq+y7GgOASqmgc3VPhjQpTRmXF3hi9ic5vhT+HAamNvDBCbByNnRFQpQI8tpU8kkPLFHk3Nx04/ujoqIMXIl4UdjZ2en/3Qghnl5KSgpdunQhOTmZV155hWnTphm6JCFeemqNmmlHpnE98TpLzy9lWI1hAC9FeHUkJI5vd1/hQHAsAEZKBV1rePFeszL4OFoYuLr/yE6H8DMQfhoSbkJKJKizQakCa3dwKA2lm4Kjf/7Ol5EIu6fobjcdK+GVEEI8BQmwRJFTKBS4u7vj4uJCdna2ocsRxZyxsTEqlcrQZQjxwtJqtQwcOJALFy7g7u7O6tWrZTiuEAai0WpQoEChUKBSqvi03qfsvrWb/lUKdq5Rg8tK1QU9mcmQlQbZqZCdTnBUMhtPhXE5MgVzlDRSmVO3oi9dG1bBw90DzIpJeJWRBOf/gIubIORvUGc9+RjnitDkY6jcGR43uuD0KkiLAccyUGdwwdUshBAvAQmwhMGoVCoJJoQQopDNmTOHVatWYWRkxO+//y69GYUwkMtxl5lyaAq9K/emjW8bAGq71aa2W20DV/aUNBpIDoPYYIi9pvtJCtUFVimRkBIFWSkPPbQMMBLgwU5mwXd/AMxswd4X7EqBUznwrgNetcHCoTAf0X2Jd2D/bDi9MvdjsHQBz5q6XlbWbqAyBU02JIZCxBm4dQiiL8KafnB4HnT/BaxcHn6Nk8t1v2sPApV8mSCEEE9DAiwhhBCihPrnn38YPXo0ALNmzaJhw4YGrkgUN2lpaVSsWJE33niDmTNnGrqcEm37je2ciTnD/07+j1alWqFUKA1d0pOlxekCmntD6KIuQtx1yEl/8rHGFqhNrInNNiY6Q0U6pmhR4GxlgrudOaYKja6HVmaSrsdTdqpueF34ad3Pg5zKgV8TqNQJfBqAqoA/wmSl6lYEPDwf1Jn3rxnwJlTooLv9uF5V6Qlw+Cc48C3cPgxLO0GfP/MODww/rXs+VSZQrVvBPgYhhHgJSIAlhBBClEBhYWF069YNtVpNjx49eP/99w1dkiiGvvzyS+rVq2foMkosrVarX6xmSMAQMtWZvF3p7eIZXqmzdUHVzQNw58jdOZ9uPbyt0ljXU8qxjK5Xkq03WLuCle4nGju+3x/OiiO3yFbr1otqX82dj1qWw9f5EasrZqVC/E3dPFPxNyDinC4Mir0KMVd0P0cXgI0n1OwHtfqDpePzP+5b/8L6d3XBHECpRrqhgH6NHx9aPcjcDpqOgaqvw5IOut5Yy4Ng4C4wfmDV7RN3e19VaF90vcqEEKIEkQBLCCGEKGGysrJ44403iIyMpEqVKsyfP19WfBV5XL16lUuXLtGxY0fOnTtn6HJKFI1Wwy8XfuFczDmmNZ6GQqHARGXC6NqjDV3afTlZuoDo5kG4dRBuH4HstLzt7H3BPUD341oVnMqArc9De0GlZOYw/+9rLNx/lLQsNQCvlHXi49YVqOpl+/h6TCzBtZLu50GpsXD7X7i8BS5t1g1X3POFrrdTw+FQ/z0weca5s04sgz8/BK1aF4x1mA1lW+U/uPovR3/ouwkWtYbIc3D6N13QBpCdAWdX625X7/1s5xdCiJecBFhCCCFECTN69GgOHjyIjY0Nf/zxB5aWloYuSTylf/75hxkzZnD8+HHCw8NZt24dQUFBudr873//Y8aMGURERBAQEMDcuXOpU6dOvq8xatQoZsyYwcGDBwu4enEj8Qazj88mR5tDUJkgGng2MHRJOgm3IXgnXN2lm5z8v3NVmdmBT33wqQeeNcCtKpjbP/G0Go2WNSfuMGP7ZaKTdUPwArztGNO6PA3KOD1fzZaOuh5LFdpD+1lwfj0cmgsRZ3VB1plV8PrPuoDtaeydBnu/0t2u0lUXXpk9IWTLD0d/eOUj2DYWDs6FGn10KxZGntcNkbRw0q1aKIQQ4qlJgCWEEEKUICtWrOC7774DYPny5ZQtW9bAFYlnkZqaSkBAAP3796dLly559q9atYqRI0cyb9486taty5w5c2jdujWXL1/GxUU3eXRgYCA5OTl5jt2xYwdHjx6lXLlylCtXTgKsQlDarjSjao/CSGFEfY/6hitEq4XQE3BhPVzdAdGXcu+3dNYNlfOpD6UagnMFUD7d8MYjIXFM2XSec6FJAJRytGBsmwq0qeJW8D0/jUwhoDtUfUO3SuCOz3RDDBe2gI7fQmCP/J3n2OL74VXjj6HZJ8/e6+phqveGvV/rhiVe2gSVXtMNjQRdwKWURYyEEOJZSIAlhBBClBBnzpxh4MCBAIwfP55OnToZuCLxrNq2bUvbtm0fuX/WrFkMGjSIfv36ATBv3jw2b97MokWLGDt2LACnTp165PH//vsvK1eu5PfffyclJYXs7GxsbGyYMGHCQ9tnZmaSmZmpv5+UlPQMj6rk0mg1LD2/lHZ+7XC1dAWgZ8WeBipGA6HH4MIG3U/i7fv7FErwqgNlW0CZluBW7akDq3tux6Xx9dZLbD4bDoC1qREfNC9Dnwa+mBoVckCjVOrmm/J/FTa8D5c3w/qhuscX8Objjw35B7aM0t1uNl4331VBM7WCOoPgnxmwfw5U7HT/v4Otd8FfTwghXhISYAkhhBAlQEJCAl27diU9PZ2WLVsyefJkQ5ckCklWVhbHjx9n3Lhx+m1KpZIWLVpw6NChfJ1j6tSpTJ06FYAlS5Zw7ty5R4ZX99rLv6lH++bYNyy7sIx9oftY2GqhYSZpj7wAp36F8+t080TdY2IF5VrrVtPzb5avIYGPk5KZww97glm4P4SsHA1KBbxZx4eRLcvhZGX6nA/iKVk4wJu/wuaP4NjPusnYzWyh/CPC38wUWDsQNDlQ5XVoXIhzktV5B/bNgrATkByuG74JYCcBlhBCPCsJsIQQQogXnFqtpmfPngQHB+Pj48OKFStQqWSISkkVExODWq3G1dU113ZXV1cuXbr0iKOez7hx4xg5cqT+flJSEt7e8kH8ntfLvc6m65voWLpj0YZXaXFwbq0uuAo7eX+7iTWUbwOVgqBM89wr4T0jrVbLpjPhfL7pAlF357mqX9qRCR0rUdHd5rnP/8wUCmg3E9RZcHI5/DkcSjV4+HxWh76HlEiw94PXvi/YYYP/ZeUM9qV0wwhjrt7vgWXnU3jXFEKIEk4CLCGEEOIFN3HiRLZs2YKZmRl//PEHTk7POWmyeKn07dv3iW1MTU0xNS3i3jXFXHJWMtYm1gD42fqxtctWLIyfcTW8p6HVwo19cGyRblU+dZZuu9IIyrWBgLegTAswNiuwS16PTmHixvPsuxoD6Oa5Gt+uIi0ruRaPFU6VSl2IdfMgxF2D3Z9D+5m526REwQHd/IA0n1Agod4TOZbVBVixV+/3wLKVAEsIIZ6VBFhCCCHEC2zNmjV8+eWXACxYsICaNWsauCJR2JycnFCpVERGRubaHhkZiZubm4GqermsubKGb098y6LWiyhrr1soodDDq6w0OLsaDv8EURfub3etAoE9oVo3sCzY8DojW80Pe4KZ9/d1stQaTIyUDG3qz5Am/pgZF7NensZm0GEWLHsNji7UTejuWeP+/n9mQHYqeNSAyp2LpiansnB1O8Ree6AHlvRcFEKIZyUBlhBCCPGCOnv2rL73zMiRI+nVq5dhCxJFwsTEhJo1a7J7926CgoIA0Gg07N69m/fff9+wxb0E1Bo1f177k4TMBP68/icja4588kHPI+E2HF0Ax5dCRoJum7GFbrLyGn3APaBQhsL9fSWaz9af41ZcGgCNyzkzpVNlfJ0sC/xaBaZ0U6jcRbdC4Yml9wMsjQbO/q67/eqnhTt08EGO/rrfocch8+7CB7ZeRXNtIYQogSTAEkIIIV5AcXFxBAUFkZqaSvPmzZk2bZqhSxIFKCUlheDgYP39kJAQTp06hYODAz4+PowcOZI+ffpQq1Yt6tSpw5w5c0hNTdWvSigKj0qp4ttm37Lp+qbCXWkw+jLs+wbOrgGtWrfNrhTUGQzVe4G5XaFcNj41i883X+CPE7qJ4N1szJjYsRJtqrgVj+GCTxLwli7AurpLN9xSodD1WEuPB2NL8GtcdLU46nrnceeY7reFI5gU4wBQCCGKOQmwhBBCiBdMTk4Ob775JtevX8fX15dVq1ZhZCQv6SXJsWPHaNasmf7+vQnU+/Tpw5IlS+jevTvR0dFMmDCBiIgIAgMD2bZtW56J3UXByFRncirqFHXd6wJgZ2ZHr0qF1OMx4iz8MxMubAC0um1+jaHuu7rVBJWFM3RPq9Wy9VwEEzacIyYlC4UC+jbw5aNW5bEyfYH+vvg2ApUpJN2B6EvgUhFuHtDt86kLKuOiq8XpboB1L4C0leGDQgjxPF6gVyMhhBBCAHzyySfs3LkTCwsL1q9fj6Ojo6FLEgWsadOmaLXax7Z5//33ZchgEcjIyeD9v97nWMQxvmnyDc1LNS+cC4We0M3TdHnL/W0VOkDjUeBRvXCueVdUUgafbTjH9vO6edXKuljxdddq1CxlX6jXLRQmFuD3CgTvgqs7dQHWjf26faUaFm0tVq66FSGzknX3Zf4rIYR4LhJgCSGEEC+Q3377jRkzZgCwePFiAgICDFyRECWbicoEVwtXTFWm2JraFvwFYq7C7ilwcePdDQrdJOONR4Fr5YK/3gO0Wi3rToYyceN5kjNyMFIqGNqsDO8188fUqJhN0v40yrTUBVjBO6HBB7rVCUHXO6soKRS6ebDCT+nuywqEQgjxXCTAEkIIIV4QJ0+eZMCAAQCMHTuWbt26GbgiIUo+pULJ5AaTGVB1AKVtSxfciZMjYO/XcGKZboiZQglVu+mCq3tDzwpRXGoWn/xxlm3nIwAI8LLl667VqOhuU+jXLnRlW8K2MXDzkG4C9bQYMDLXrUBY1JzK3g+wpAeWEEI8FwmwhBBCiBdAdHQ0QUFBpKen07ZtW7744gtDlyREiZWek86um7vo6N8RACOlUcGFVzmZcOh/unmuslN128q1heYTwLVSwVzjCXZfjGTM2rPEpGRipFQwomU53mlcGiOVskiuX+gc/cGhNMRdhw3v6bZ51wYjEwPU8kAYKXNgCSHEc5EASwghhCjmsrOz6d69O7du3aJs2bKsWLECleoFHt4jRDGm1qgZsXcEB0IPcCflDu8GvFtwJ7+yHbaN1QUrAF61oeUUKNWg4K7xGCmZOXyx6QIrj94GdHNdze4eSBXPQhgaaWh134Wto3UTuQOUKuLhg/c4+t+/LT2whBDiuUiAJYQQQhRzo0aNYs+ePVhZWbF+/Xrs7OwMXZIQJZZKqaKuW11ORJ6gnnu9gjlpwm3YMgqubNPdt3LTBVfVuunmSSoCp24nMOy3k9yKS0OhgAEN/RjVujxmxiU0DK87WDeH2I7xEHsNqnQxTB0PDge1kzmwhBDieUiAJYQQQhRjCxcu5LvvvgNg+fLlVKpUNEOMhHiZ9avSj/al2+Ni4fJ8J9Ko4ehC3STtWSmgNIb67+nmuTK1Lphin1SCRsvP+0OYtu0SORotnnbmzHwjgPr+L8Hqpb4NYfBe0GqLLCjMw7kC2JUCCwcwszNMDUIIUUJIgCWEEEIUU//88w9Dhw4FYPLkyQQFBRm2ICFKsMPhh6npWhMjpe7t8XOHV1GXYOP7cOeo7r53Pej0HTiXf85K8y8uNYuPVp9iz+VoANpXdeerLlWxNTcushqKBUOFVwBGpvD+MVCqDFuHEEKUABJgCSGEEMVQSEgIXbt21c9/9dlnnxm6pIKRmcyNsGOExV6kssYI26w0yEjgfPItlqQG46ZV8ZHSCdRZoM6Gyp2hziBDVy1KuD239jB8z3AaeDTgu1e/w0T1HJN9azRweB7smgTqTDCxhpaToWY/UBbdJOn/Xo9l+MqTRCZlYmKkZGLHSvSo44NCQpSiZ4jJ44UQogSSAEsIIYQoZpKTk+nUqRMxMTHUrFmTRYsWvXgfOlOiuHV9F7tu7sI0LY6eKWkQcwXS4xnp6cZVExPmRUTRMD0DgEQzM7a5u1AhMwvCTtw/j3uAgR6AeJlotBrMjMzwsvbCWPkcvZMSQ2H9uxDyt+5+2VbQ8Vuw8SiYQvNBo9Hy/Z5g5uy6gkYL/s6WfN+jBhXdbYqsBiGEEKIwSIAlhBBCFCNqtZoePXpw7tw53N3d2bBhAxYWFoYu6/FyMjlzcQ1HQnbwanISpSMuQnI4t8zNmO3mgl9WNj1Dw/XN/dWg1SjRelQHK18ws8Pf2IQxWVE4m9pD7SqgMtb9OJYx3OMSL43mpZqz0nYl3jbezx4WX9ioGzKYkQjGFtDqC6jVv0iHjSWmZzNy1Sl2X4oC4PWaXkx5rTIWJvKWXwghxItPXs2EEEKIYmT8+PFs2rQJMzMz1q9fj6enp6FLykOjziE0ZBfeoWfh+l64c5T/Odlw0MIcy5g4SienAAoqWfnQVmVBORcfqB8ETuXA3pcZD5m82hXoVcSPQ7zccjQ5qLVqTFWmAJS2K/1sJ1Jnw86J8O//dPc9akCXBeBUtOHrlchk3ll+nJCYVEyMlHwZVIU3ankXaQ1CCCFEYZIASwghhCgmli1bxrRp0wBYtGgRderUMXBFD8hKg+BdRFz4g+7Jx0hHy4Gbd7g32KqJWoWl0o5SAR2g3GvgWgUHUyumG7RoIR7tx9M/8vftv5neZDqlbZ8xvEoMhTX94PZh3f0Gw6D5BF3vwSK05Ww4o34/TVqWGk87c+b1qklVL9sirUEIIYQobBJgCSGEEMXAoUOHGDRIN1n5+PHjeeuttwxcEaSnxbHn6LcYhR6n1fVjkJOOK6D09kShVBJSrjnlynUA38b0cPSnx4s2T5d4aaVkpbD2ylpiM2K5Enfl2QKskH3wex9IiwVTW+j8I1RoX/DFPoZao2XG9svM+/saAA38HZn7VnUcrUyLtA4hhBCiKEiAJYQQQhjYrVu3CAoKIisri86dOzNlyhTDFaPVwq1/4dSvbArZwhR7S8pnZtEqJx3sfFBU7MQi7+p4+bfF2NTScHUK8RysTKz4vePv7Li5gzZ+bZ7+BCeWwaYRoMkBt2rQbRk4+BV8oY+RnJHN+ytO8veVaAAGNy7Nx63LY6QqupUOhRBCiKIkAZYQQghhQKmpqbz22mtERUUREBDAsmXLUCqL/gNoeNhxNhydTfU7Z6kbfQOA1koFv9hY0tipGur2v6HyqA4KBUX7MV2IwuFs4UzPij2f7iCNGnZNhINzdfcrd4GgH8DYvOALfIzQhHQGLDnKpYhkzI1VTH+9Gh0Dim6lQyGEEMIQJMASQgghDESj0fD2229z6tQpXFxc2LBhA1ZWVkVXgFYLNw/Avz+yOPogv9lY0UKRRl0TK6gUhE1gDzb41AcDBGovkszMTExNZcjWi2B98Hr8bP0IcA54+oOz0mDtQLi8WXe/yVhoOrZIVxkEOHMngQFLjxGdnImztSmL+tSW+a6EEEK8FCTAEkIIIQxk4sSJ/PHHH5iYmLBu3TpKlSpVJNfNSI9ny4EvqRd8AI+ICwC8ZWzENWtHWlYKggZjwUSGBz7K1q1bWblyJfv27eP27dtoNBosLS2pXr06rVq1ol+/fnh4SG+Y4uZG4g2++PcL1Bo1v3X4jQoOFfJ/cHoCrOgOt/8Flamu11XV1wut1kfZfj6C4StPkpGtoYKbNT/3rY2nXdH2/hJCCCEMRQIsIYQQwgCWLl3KF198AcD8+fNp0KBB4V80LQ4Oz+OTK8vZaWZE38wkPjK2gIA38avzDj+7PMUH+ueg1mhJSs8mMT2bhLu/7/1k5WjIUWvI0WjJVmuo7mNPk3LORVLXk6xbt44xY8aQnJxMu3btGDNmDB4eHpibmxMXF8e5c+fYtWsXn3/+OX379uXzzz/H2bl41C7AztSO5j7NSchMoLx9+fwfmBIFy7tA5Fkws4Ueq8GnXuEV+hBarZaF+0L4autFtFpoUs6Z73tUx9qsaFc7FEIIIQxJAiwhhBCiiO3du1e/4uAnn3xCnz59CvV6MTGXsD3xC8bHFkNWCkHmZpx3dsG7bFtoNhXM7Qv8mulZaq5EJnM9JoU7cenciU/ndnwad+LTCUtIJ0ejzdd5BjTyKzYB1vTp05k9ezZt27Z96Dxl3bp1AyA0NJS5c+fyyy+/MGLEiKIuUzyCnZkd0xpPI1OdiSK/w/4SbsGyIIi7BpYu0HsduFUp1Dr/S6PRMmXTBZYcvAFA73qlmNixkkzWLoQQ4qWj0Gq1+XsHKUQ+JSUlYWtrS2JiIjY2NoYuRwghipXLly9Tv3594uPj6d69OytWrCi8SduTwvhpxwcsSL7MJ7FxdElJBbeqaBqNRFuhAyojkwK5TExKJqdvJ3AxPImL4clcjEjiRkwqT8qoLE1U2JobY2thgq25ETZmxpgZqzBSKTBWKjFSKWhYxol2Vd2fu0Z5bSpYL9LzqdVq8x9YPSj+BixuB0mhYOsDb68HR/+CLu+xstUaPl5zhnUnQwH4tH1FBjTye7bHI4QQJdyL9Nokno30wBJCCCGKSExMDO3btyc+Pp569eqxePHiwgmvUmPgnxlwbBHmlqZkOtrzr4MHXTp+AeVao3yOD79arZY78ekcCYnj6I04jtyI43p06kPbOlmZUNbFGm8Hc7zsLfCyv//b2doU4xLQgyQrK4uQkBD8/f0xMpK3VcXRJ/s/wd7MnvcC38PSOJ9zuyWGwtKOuvDKqRz0Xg+2noVa539lZKt5f8UJdl2MQqVU8M0bAQRVL9oahBBCiOJE3mkJIYQQRSAzM5POnTtz7do1/Pz82LBhA+bmBTv5clpKFEt3jaDx1X1UTk0E4HX7mpSp3In6NYY882qCKZk57L8aw55LUfxzNZrwxIw8bcq6WFHF05YKbtZUdLehorsNztYld2W+tLQ0PvjgA5YuXQrAlStXKF26NB988AGenp6MHTvWwBUKgAuxF9h0fRNKhZIOpTtQybHSkw9KiYJlnXTDBx1KQ58/wdqt8It9QHJGNgOXHuNwSBymRkp+6FmD5hVdi7QGIYQQoriRAEsIIYQoZFqtlv79+7N//35sbW3ZvHkzLi4uBXeBnCw4sZRZx2exysKIE9YmLLAJhBaTsPBvxrNMD38jJpXdl6LYcymKwyGxZKvvjwc0Uiqo6mVLHV8Havs6UMvXHjuLghmO+KIYN24cp0+fZu/evbRp00a/vUWLFkyaNEkCrGKikmMl5rWYx9X4q/kLr9LiYNlrEBsMtt7w9sYiD69iUzLps/gI50KTsDY1YmGfWtQt7VikNQghhBDFkQRYQgghRCGbPHkyK1aswMjIiLVr11KxYsUCOa9Wo0F9eRNGOyZAfAj9jFQcM/Wka6W30TYcj0KleqrzhSaks+l0GH+eCeNcaFKufaUcLWhW3oVmFVyo4+uAucnTnbukWb9+PatWraJevXq55iOqXLky165dM2Bl4r8aejakoWfDJzfMSoVfukDUBbByg7c3gJ134Rf4gPDEdHouPMz16FQcLU1Y2r8OVTxti7QGIYQQoriSAEsIIYQoRL/88guTJ08G4Mcff6R58+YFct7rIX8x7Z+xVEqIYHh8Ili64Nl0DOuqv43iKSZnj0nJZPOZcP48Hcaxm/H67SqlgnqlHWhW3oVXK7hQ2tmqQOouKaKjox/aiy41NVUm2C4G0nPSUSqUmKryOYxVo4E/BkPYSTB30IVXRTxhe3hiOm/N/5cbsWl42pmzfEAd+f9OCCGEeIAEWEIIIUQh2bdvHwMGDADg448/ZuDAgc9/0oxE+Hs6N88s5qCLI6dtrBlQqQ9WjceAqRX5iU7UGi37rkaz8shtdl2MJOfucoEKBdT1c6BjgAdtq7jjYPlyDQt8GrVq1WLz5s188MEHAPrQauHChdSvX9+QpQlgyfklrL+6nvH1xtPYq/GTD9g1ES5tApUJvPUbuFQo/CIf8GB45e1gzm+D6uFlb1GkNQghhBDFnQRYQgghRCG4evUqQUFBZGVl0bVrV6ZOnfpc59NqNMQcW4Dz3zMgNZqmwFBVRTq8OgUr73wMj0L3Ifn3Y3dYdfQ2oQnp+u0BXrZ0CvSkQzV3XG3MnqvOl8VXX31F27ZtuXDhAjk5OXz77bdcuHCBgwcP8vfffxu6vJeaRqthy/UthKWGkZaT9uQDji+Fg9/pbr/2A/jUK9wC/0PCKyGEECJ/JMASQgghClhsbCzt27cnLi6OOnXqsGzZMpTPuAIgQMTtQ4zb/QHROSmsTYvG1LEsijZf827ZFk88VqvVcuxmPAv3XWfnhUjudrbCxsyILjW8eLOONxXcbJ65tpdVo0aNOHXqFF9//TVVq1Zlx44d1KhRg0OHDlG1alVDl/dSUyqUrO64mi3Xt9CqVKvHN77+N2weqbvddBxUe6PwC3xAVFKGhFdCCCFEPkmAJfIIDQ1lzJgxbN26lbS0NMqUKcPixYupVauWoUsTQohiLz09nU6dOnH16lV8fHzYsGEDFhbP+IE0JxP2z8Zy/yxuujuSojLiQsOhVG86GZ4wz1W2WsOWs+H8vD+EM3cS9dvr+DnwVh1v2lZxx8z45Z6I/Xn5+/uzYMECQ5chHsLcyJyu5bo+vlFiKPzeFzQ5UPUNaDKmSGq7Jz41i14/H+ZGbBpe9hJeCSGEEE8iAZbIJT4+noYNG9KsWTO2bt2Ks7MzV69exd7e3tClCSFEsadWq+nZsycHDx7Ezs6OrVu34ubm9kznunlxHaV2fwUxV7AGZpj44fbqJDw96zz2uOSMbFYcvsWSgzcIT8wAwMRISdcanvRr6Ec5V+tnqkfktmXLFlQqFa1bt861ffv27Wg0Gtq2bWugyl5u8Rnx2Jvl4z2LOgfWDoT0OHAPgE7f6yaBKyIpmTn0XXyEK5EpuNqYSnglhBBC5IMEWCKXadOm4e3tzeLFi/Xb/Pz8DFiREEK8GLRaLSNGjGDdunWYmJiwYcMGKlWq9NTn0aQn8MWGN1mTcYefU6KobekCbb+mZuUuj/2AnZiWzaIDISw+EEJSRg4ATlamvF2/FD3r+uBolc/V2ES+jB07lq+//jrPdq1Wy9ixYyXAMoC07DQ6ru9IZcfKTH1lKg5mDo9uvHcq3DoIJtbw+mIwLrq53zKy1QxcepTTdxKxtzDmlwF18XaQ8EoIIYR4EgmwRC4bN26kdevWvPHGG/z99994enoydOhQBg0aZOjShBCiWJs1axZz584FYNmyZTRunI+Vz/7r2h6UGz8AoxS0Ntac8KtH7U5LwfzRPUpiUzL5eX8Iyw7dJCVTF1yVcbHincal6RTogamRDBMsDFevXn1oQFmhQgWCg4MNUNHTuXz5Mt27d891/7fffiMoKMhwRT2nY5HHSM5KJjQlFFsT20c3vPYX7PtGd7vTt+DoXzQFAjlqDe+vOMG/1+OwMjViWf+6lJVekUIIIUS+SIAlcrl+/To//vgjI0eO5JNPPuHo0aMMGzYMExMT+vTp89BjMjMzyczM1N9PSkoqqnKFEKJYWLlyJaNGjQJg5syZuYKB/EhLiUK75wssjy8F4CM7H9pVG0Gt6v0feUxcahbz/r7G8kM3Sc9WA1DBzZphzcvSprIbSmXRDYd6Gdna2nL9+nV8fX1zbQ8ODsbS0tIwRT2F8uXLc+rUKQBSUlLw9fWlZcuWhi3qOTX2aszmzpuJSotCpXxEcJscAX8MBrRQsx9UecI8WQVIq9Xy2YZz7LoYhamRkp/71KKq12OCNiGEEELkIgGWyEWj0VCrVi2++uorAKpXr865c+eYN2/eIwOsqVOnMnny5KIsUwghio2///5b//dx2LBhjBw58qmOP3lqMeNOzKJeWgqTAGoPxLLFZGqZWj20fWpmDgv3hbBg33V9j6uqnrZ88GoZWlR0leCqiLz22mt8+OGHrFu3Dn9/XQ+e4OBgPvroIzp16mTg6p7Oxo0bad68+QsRvD2Jl7UXXtZeD9+p1cL6oZAaDa5VoM3UIq3tf3uC+e3IbZQKmPtWdeqWdizS6wshhBAvumdf01uUSO7u7nmGRFSsWJFbt2498phx48aRmJio/7l9+3ZhlymEEMXC+fPnCQoKIisriy5dujBr1iwU+Z0IOjsDto1Ds3UMYUothywsSerxG7T/Bh4SXmXmqFlyIITG0/cwe9cVUjJzqOxhw+K+tdn4fkNaSa+rIjV9+nQsLS2pUKECfn5++Pn5UbFiRRwdHZk5c+Zzn/+ff/6hY8eOeHh4oFAoWL9+fZ42//vf//D19cXMzIy6dety5MiRZ7rW6tWrn7rXYHGTnpP+5EYnl8O13WBkBq8vAmPzwi/srrXH7zBzxxUAJnWqTKvKz7a4gxBCCPEykx5YIpeGDRty+fLlXNuuXLlCqVKlHnmMqakppqYyObAQ4uUSFhZG27ZtSUhIoEGDBvzyyy+oVPmbbyo7/AzG64ZA1HlqAjOsqtCo9Wwsrd3ztNVotGw8HcbMHZe5E6/7kO7raMGo1uVpV8VdQisDsbW15eDBg+zcuZPTp09jbm5OtWrVnm3us4dITU0lICCA/v3706VLlzz7V61axciRI5k3bx5169Zlzpw5tG7dmsuXL+Pi4gJAYGAgOTk5eY7dsWMHHh4egG7Y/8GDB1m5cmWB1G0IwfHB9NzSk6AyQYytM/bhIXLiHdg+Xnf71U/BuXyR1bf/agxj1p4B4J0mpXm7vm+RXVsIIYQoSSTAErmMGDGCBg0a8NVXX9GtWzeOHDnC/PnzmT9/vqFLE0KIYiMpKYl27dpx+/ZtypUrx8aNGzE3f3JvDq1Gw8odw1h+ZzcrYiKws3CCoB9oXa71Q9sfvxnPlE0XOH07AQAXa1OGtyhLt1reGKukE7WhKRQKWrVqRatWrQr83G3btn3sSoazZs1i0KBB9OvXD4B58+axefNmFi1axNixYwH0c1w9zoYNG2jVqhVmZkW3Cl9B23FzB2k5aUSlRT08vNJqYfNHkJkEXnWg3tAiq+1adArv/nqcHI2WTgEejGldociuLYQQQpQ0EmCJXGrXrs26desYN24cU6ZMwc/Pjzlz5tCzZ09DlyaEEMVCVlYWXbt25fTp07i4uLBt2zYcHfMxl01KNJnrh7Ai/Ty3TYz53TeQQV1WgZVLnqZhCelM23aJDafCALA0UTG0WRn6N/TD3ERWFSwudu/eze7du4mKikKj0eTat2jRokK7blZWFsePH2fcuHH6bUqlkhYtWnDo0KGnOtfq1asZPHjwE9sV5wVb3g14l1qutbAxtXl4g0ub4co2UBrDa9/DoyZ4L2CJ6dkMWnqM5IwcapWyZ8Yb1aTHpBBCCPEcJMASeXTo0IEOHToYugwhhCh2NBoNgwYNYteuXVhaWrJ582b8/PyefODVnbD+XcxSo5luZsmJgNfo0fp/oMzdiyotK4d5f19n/j/XyMjWoFDAGzW9GNW6PC7WL24PmZJo8uTJTJkyhVq1auHu7p7/uc8KQExMDGq1GldX11zbXV1duXTpUr7Pk5iYyJEjR1i7du0T2xbnBVsUCgV13Os8fGdmCmz9WHe74bAiGzqo1mj54LeTXI9JxcPWjB971cTUSMJnIYQQ4nlIgCWEEELk05gxY1i2bBkqlYrVq1dTq1atx7bPzEhk9sae1LxxjJZp6eBSiYpdF1LRtXKudlqtlh0XIpny5wVCE3TzXNXxdWBCx0pU8bQttMcjnt28efNYsmQJvXv3NnQpz8zW1pbIyMh8tR03blyuFTaTkpLw9vYurNIKzj8zICkU7ErBK6OK7LJfb73IP1eiMTdWsaBPLZytZa5QIYQQ4nlJgCWEEELkw4wZM/Sry/3888+0a9fu8QfEXWflH2/yq3E6m5wcqO/aBqtWX+ZZ+exWbBqT/jzPX5eiAPC0M+eTdhVpV9WtSHv1iKeTlZVFgwYNDHJtJycnVCpVnvApMjISN7fCWd2uOC7YEpEawdDdQ+lQugP9KvfL+/9L/A349wfd7bbTwMSiSOpae/wOC/aFADDzjQAqe0gILYQQQhQEmQFWCCGEeIKlS5fy8ce6YUgzZsygT58+jz/gwkb4qQk97lymaUYOX1V+B6v2s3KFV5k5aubuvkrL2X/z16UojFUK3mvmz66RTWhfrWiHpImnN3DgQFasWGGQa5uYmFCzZk12796t36bRaNi9ezf169c3SE2GsCVkC1fjr/L37b8f/v/LrkmgzgK/JlCuTZHUdCkiifHrzwIw7NUytK+Wd2VRIYQQQjwb6YElhBBCPMamTZsYMGAAAKNGjWLUqEcPQ8rOTGXr5nfoeOZPFICxd13mvr4IbL1ytTt4LYbx684REpMKQAN/R6a8VoUyLlaF9jhEwcrIyGD+/Pns2rWLatWqYWxsnGv/rFmznuv8KSkpBAcH6++HhIRw6tQpHBwc8PHxYeTIkfTp04datWpRp04d5syZQ2pqqn5VwpfB6+Vex9bEFmcL57w7bx2G8+tAoYTWX0ERBMLJGdkM/eUEGdkampRz5sMW5Qr9mkIIIcTLRAIsIYQQ4hEOHDhAt27dUKvVvP3220ybNu2RbdXxIQxc34UTyhzSra3oXrU/NJ8AqvvBRmJ6Nl9vvchvR24D4GxtymcdKtFRely9cM6cOUNgYCAA586dy7WvIP5bHjt2jGbNmunv35t/qk+fPixZsoTu3bsTHR3NhAkTiIiIIDAwkG3btuWZ2L0kszGxoWu5rnl3aLWwe4rudmBPcKtS6LVotVrG/nGW6zGpuNuaMbt7oKw4KIQQQhQwCbCEEEKIhzh37hwdOnQgPT2d9u3bs3DhQpTKR4y8v7wV1bohvGqiJtjeDteGI6HeR7ma7LwQyafrzxKZlAlAr3o+fNymAjZmxg87oyjm9uzZU6jnb9q0KVqt9rFt3n//fd5///1CreOFFPI33NwPKhNoOrZILrn835tsPhOOkVLB9z1q4GBpUiTXFUIIIV4mEmAJIYQQ/3Hz5k1at25NQkICDRo0YPXq1XmGiAHkZGeQ/tdkrA/pJop+27EG7drMwtm9ur5NTEomkzaeZ9OZcAD8nCz5uktV6pZ2LJoHIwpVcHAw165do3Hjxpibm6PVaqU3XRH49sS3VHCoQBOvJpgZmd3fodXCX1/obtfqn2f4bmE4cyeBzzddAGBs2wrULGVf6NcUQgghXkYSYAkhhBAPiI6OplWrVoSFhVG5cmX+/PNPLCzyrl4WH3eN0X++iTY9gZ8Ao7rvomg5BWcjXc8LrVbLupOhTNl0gYS0bFRKBYMbl2Z487KYGauK+FGJghYbG0u3bt3Ys2cPCoWCq1evUrp0aQYMGIC9vT3ffPONoUssscJSwlh4diFKhZI93fbkDrCCd8Odo2BkDo1GFnotaVk5DF95imy1ljaV3RjQyK/QrymEEEK8rGQVQiGEEOKulJQU2rdvz5UrV/Dx8WH79u04ODjkbRh2irjlr3FWk8ZZU1Outp8Kbb+Gu+FVRGIG/ZYcZeTq0ySkZVPJ3YYN7zVkTJsKEl6VECNGjMDY2Jhbt27lCji7d+/Otm3bDFhZyadSqOhXuR8dSnfAwew//3/un637Xas/WBf+fGBfbL5IyN15r6Z1rSa974QQQohCJD2whBBCCCArK4suXbpw9OhRnJyc2LFjB56ennkbnvoNNn2If04GM419cWs1jbJl2gC6XlcbT4fx2fpzJGXkYGKkZHjzsgxuXBpjlXxnVJLs2LGD7du34+WVe4ha2bJluXnzpoGqejm4WroystZDelfdOaab+0ppDPXfK/Q6dl2IZMXhWwB880YAthYyn50QQghRmCTAEkII8dLLycmhZ8+e7Ny5E0tLS7Zs2UL58uVztcnOTuO7dd3peuUgvjk5ULY1r3SZD+Z2AMSnZvHphnNsvjvXVYCXLd90C6SMi1VRPxxRBFJTUx86tDQuLg5TU1MDVCT0va+qdQPbh4TPBSg6OZMxa88AMLCRHw3KOBXq9YQQQgghQwiFEEK85DQaDYMHD2bNmjWYmJiwbt06ateunbtRciSzfnmVJek3GOHqRE7jj+Gtlfrw6q9LkbSa849+FbIRLcqx9t0GEl6VYK+88grLli3T31coFGg0GqZPn06zZs0MWFnJdivpFsHxwXlXaIwJhkubdbcbDi/UGrRaLWPXniE2NYsKbtaMal3+yQcJIYQQ4rlJDywhhBAvLa1Wy4gRI1i8eDEqlYqVK1fSsmXL3I1uH4XVvRmQFskhd3eGVRmEUf1RAKRk5vDl5gv8duQ2AGVcrJjdLZCqXrZF/VBEEZs+fTrNmzfn2LFjZGVl8fHHH3P+/Hni4uI4cOCAocsrsZZdWMaqy6voV6UfI2s+MIzw2M+AFsq2BufCDZR+P3aH3ZeiMFEpmfNmoMxrJ4QQQhQRCbCEEEK8tCZOnMh3330HwKJFi+jcuXOu/df2z8T/r69Bk42TU3nWvrYMlXMFAI7eiGPk6lPcjktHoYABDf0Y1bq8fJh9SVSpUoUrV67w/fffY21tTUpKCl26dOG9997D3d3d0OWVWDmaHExVpgQ4B9zfmJUKJ3/V3a4zuFCvH5WUwRebLwDwUatyVHCzKdTrCSGEEOI+CbCEEEK8lGbOnMnnn38OwPfff8/bb7+t35eTnc5XazuzLuMOC42V1CzdCYJ+QGVqTY5aw3d/BfP9X1fRaMHTzpyZbwRQ39/RUA9FFLHs7GzatGnDvHnzGD9+vKHLealMajCJcXXHoXxwFoxzayEzEex9wf/Vwr3+n+dJysihqqctAxr5Feq1hBBCCJGbBFgvgIsXL7Jy5Ur27dvHzZs3SUtLw9nZmerVq9O6dWu6du0qE8YKIcRTmD9/PqNHjwbgq6++4r33HlixLC0O1ereJKdeQW1pwZWqnajZfgEoFNyOS+PDVac4fjMegK41vJjUqRLWZrL62MvE2NiYM2fOGLqMl5ap6oH3PFotHFmgu11rACgLb3rX7ecj2HI2ApVSwdddq2IkK4sKIYQQRUpeeYuxEydO0KJFC6pXr87+/fupW7cuH374IZ9//jm9evVCq9Uyfvx4PDw8mDZtGpmZmYYuWQghir3ffvuNIUOGADB27FjGjRt3f2fURVjQDMWN/UxJymJBpSG81WEhKBRsPB1Gu2/3cfxmPNamRnz3VnW+6RYg4dVLqlevXvz888+GLuOlotFq8m4MOwkRZ0BlCtV7Fdq1E9Oz+Wz9OQDeaVyayh4yz50QQghR1KQHVjHWtWtXRo8ezZo1a7Czs3tku0OHDvHtt9/yzTff8MknnxRdgUII8YL5888/6d27N1qtlqFDh/LVV1/p9/1zeA7nj3zPu/HRYFcK8x6rqOtSkZTMHCZtPM+a43cAqOFjx7dvVsfbwcJQD0MUAzk5OSxatIhdu3ZRs2ZNLC0tc+2fNWuWgSormbRaLe3+aEcpm1JMaTAFV0tX3Y7TK3W/K3YAC4dCu/7XWy8RlZyJn5Mlw5qXLbTrCCGEEOLRJMAqxq5cuYKx8ZO/2a9fvz7169cnOzu7CKoSQogX019//cUbb7yBWq2md+/ezJ07F4VCAVotIXs/54Obq9HYmFPZqiaNu/0Olo6cuZPAsN9OciM2DaUC3n+1LMNeLSNDhwTnzp2jRo0agO71+kEKhcIQJZVod1LuEJoSSmRaJHZmdrqN6mw4t0Z3O+CtQrv28Ztx/HbkFgBfd6kqCzUIIYQQBiIBVjGWn/AKIC0tDQsLi3y3F0KIl82///5Lp06dyMzMJCgoiEWLFqFUKiEnE/78EL/TKxhgb0ucSwXqd1mH1sSChf9cZ9q2S+RotHjYmjHnzerU8Su8Hh7ixbJnzx5Dl/BScbd0Z03HNdxKvnV/DqzgXZAWC5YuULpZoVxXrdEyaaNu1cHutbypW1oWaxBCCCEMRb5CfkE0b96c0NDQPNuPHDlCYGBg0RckhBAviJMnT9K2bVtSU1Np2bIlK1euxMjIiNiYy6QtbQ+nV4BCyQd1P2Fi922kqo0ZtOwYX265SI5GS7uqbmwd3ljCK/FQwcHBbN++nfT0dEA31E0UPCOlEeUdytOyVMv7G+8NH6z6BqgK5zvZ34/d5mxoItZmRoxuU75QriGE+H979x0eVZX/cfw9kzKT3hMISegC0kKXIjWCoCBgQWwIig0EjLo/2F11rai7IhZWXF3FsiqWBaVIka6C1ChSBQKEkgbppM7c3x+RrEhASsidZD6v55knmXvv3Pkcx+FMvnPOuSIi50YFrBrCbrfTpk0bZs+eDYDT6eRvf/sbPXr0YNCgQSanExFxTT/99BMJCQlkZ2fTvXt35syZg81mY9fu+Yz86nr+WpyM0xYEt36O5Yr72HIoh2te/ZZvdqTj7Wnl2WGtmHFLe4J8NcJVTnXs2DH69evHZZddxqBBgzh69CgAd911Fw8//LDJ6dxAUQ7s+rr897Y3X5KnyCks5e+LdwEwsV9Twv11xWcREREzqYBVQyxYsICnnnqKMWPGcMstt9CjRw/eeust5s+fz/Tp082OJyLicrZv305CQgLHjx+nS5cuLFy4sHyh7R3zOPHl/WRa4Re7L1l3fIHRuC9vrd7HTTPXcji7kAZhvsx5oBu3dqmv9YykUg899BBeXl4cPHgQX9//Leg/YsQIFi1aZGKy2qfEUcKrm19lZcpKHE5H+cbdS8BRDOGXQZ3Wl+R5X132C8cKSmgS6c+obg0uyXOIiIjIudMaWDXIuHHjOHToEC+88AKenp6sXLmSbt26mR1LRMTl7Nq1i759+5KRkUH79u1ZtGgRgQEBsPrvsPwZ2gGvRbag1fVvY3jVYez7G/lmRzoA17apy9ThrQmwa9SVnNmSJUtYvHgxMTExp2xv2rQpBw4cMClV7bTj+A7e2voWofZQVt60snzjznnlP5tfC5egyLwnPY/3vt8PwOPXXo6XLtwgIiJiOhWwaoisrCzuvvtuli1bxptvvsmqVavo378/L774Ig888IDZ8UREXMaePXvo27cvaWlptG3blqVLl+Lr48HfPx7AqL0biQToch/d+z/L5sN5PPjRtxzOLsTbw8pjgy/nti5xGnUlf6igoOCUkVcnHT9+HJtNU82qko+nD8OaDMPH06f8vVlaBL98U76zxbVV/nyGYfDkvO2UOQ0SWkTR87KIKn8OEREROX8qYNUQrVq1omHDhmzZsoWGDRsyduxYZs+ezQMPPMCCBQtYsGCB2RFFREyXnJxM3759OXLkCC1btmTp0qWE2uGJj6/iv5YCkqIi+CD+USyd7uLf3ybz/NflVxmsH+bLjFva06pekNlNkBriyiuv5P333+fpp58GwGKx4HQ6efHFF+nT59JcEc9dXRZyGU91f+p/G/athNICCKwH0e2r/Pm+23OMNb9klhe1r21R5ecXERGRC6MCVg1x33338Ze//KX8su+/GjFiBN27d2f06NEmJhMRcQ0HDx6kb9++pKSk0Lx5c5YtW0aENRfevpG7cw+woW4U49o9SF6rUTzywSaWbk8D4JrWdXn+ek0ZlPPz4osv0q9fPzZu3EhJSQl/+tOf2LZtG8ePH+e7774zO17tVjF98Joqnz5oGAZ/X7wTgFuviKN+mF+Vnl9EREQunApYNcRjjz1W6faYmBiWLl1azWlERFzL4cOH6du3L/v376dp06YsX74c//xt8PkYKMwiNiiWLwd9RLK1IUNnfEdyZkHF6IrbrtBC7XL+WrVqxe7du3n99dcJCAggPz+f4cOHM27cOOrWrWt2vFqj1FnKidITBNl+HR3pdPzv6oPNq3764OJtafx4KAdfbw/G9WlS5ecXERGRC6cClgs7ePAgcXFx53z84cOHqVev3iVMJCLielJTU+nbty979+6lUaNGLF++nI07Z/BC8hzeduTTPLo9jPyEpfudPPLZd5wocRAdZGfm7R1oExNsdnypQYYPH86sWbMIDAzk/fffZ8SIEfzlL38xO1attvv4bm5ecDPNQprx+ZDP4eiPcOIY2AKhfvcqfS6H0+ClJbsAuKtHQ8L9tZaZiIiIK9ElVVxYp06duPfee9mwYcMZj8nJyeGtt96iVatWfPHFF9WYTkTEfOnp6fTr14/du3cTFxfH8mXLqLf7febt+oQcDyufx7XGMWo+z3+bxQP/2cyJEgfdGocx78EeKl7JeZs/fz4FBQUAjB49mpycHJMT1X4p+SkABNoCyzfsW1H+s2FP8Kja72G/TDrML+n5BPl4cfeVjar03CIiInLxNALLhe3YsYNnnnmGq666CrvdTocOHYiOjsZut5OVlcX27dvZtm0b7du358UXX2TQoEFmRxYRqTZpaWn069eP7du3U69ePVZ8s5j6m5+Dn2bzD6uFOa0Hcm3Cv7nzw62s+SUTgHt6NuJPA5rh6aHvb+T8NW/enClTptCnTx8Mw+DTTz8lMDCw0mPvuOOOak5XO13d4Gp6RPcgtyS3fMO+leU/G/Wu0ucpKXPy8je7AbivV2OCfLQmnoiIiKuxGIZhmB1CKvfTTz/RsmVLSkpKWLhwIWvWrOHAgQMUFhYSHh5Ou3btGDBgAK1atTI76ilyc3MJCgoiJyfnjB/sRUQuxslpgzt27CA6Opp5894nI+kpBhxMAosHXDuNn+sM474PN3EoqxAfLw9euKENQ9pGmx1dTFIVfdN3333Hww8/zN69ezl+/DgBAQGVrp9msVg4fvz4xUZ2aab09SUn4IX64CiB8RshvGmVnfqDtft57MttRATYWP1oH3y8Pars3CIiUj30d2jtpxFYLqxdu3akpqYSERHBo48+yoYNGwgLCzM7loiIqY4ePUrfvn3ZuXMnMTExfP3FDP4vaRIpVgPvwFD6XPcOc3KbMvmN7ykucxIX6subt3egRV19kJGL0717d9atWweA1Wpl9+7dREZGmpzKjaSsKy9eBdaDsKpbYL24zMHrK/YAMKFvExWvREREXJTmULiw4OBg9u3bB8D+/ftxOp0mJxIRMdfhw4fp3bs3O3fuJDY2lnWzX6Ll8gfpUpBHtNNC9OCZ/G1bJA/N/pHiMie9m0Uwb3wPFa+kSgwfPpzc3PKpbO+++y4BAQEmJ6rdCkoLmLJmCm9vfRun4YS9v65/1ag3VOGVQ+duOUxabjF1Au2M6HTuF88RERGR6qURWC7s+uuvp1evXtStWxeLxULHjh3x8Kj8W8GThS4Rkdrq0KFD9OnThz179lC/fn3Wv/UQkd88AM5S/hzWgf0Jr/Lnr/NYn7wfgPF9mvDQVZfhYa26P3TFvZ1cxD0wMJAxY8YwcOBAfHx8zI5Va+3N3sv8ffMJ9wnn7tZ3X5L1rxxOgzdXlX+GuvvKhnh76rtdERERV6UClgv717/+xfDhw9mzZw8TJkxg7Nix+rZXRNzSwYMH6dOnD/v27aNBg/qMey6eN3a+zOPOUiyXD2Vbpxe498NtpOYW4W/z5KWb2jKgZR2zY0sto0Xcq1e4TzgT2k0ov1OUC6lby39v2LPKnmPp9lT2ZRYQaPfk5s4afSUiIuLKVMBycVdffTUAmzZtYuLEiSpgiYjb2b9/P3369GH//v00b9KQ9//amruK92AEBnBto2v5JfwRHn97CyUOJ40i/PjX7R1pEulvdmyphWbOnEliYiILFizAYrHw17/+9YyLuKuAdfGi/aMZ22Zs+Z19KwEDguIgoGqK04Zh8Mavo6/u6NoAf5s+FouIiLgy9dQ1xLvvvmt2BBGRapecnEzv3r05ePAgnVs2YvW4WGzJK/m/oCC8W93E53mj+HjONgD6Xx7FSze1JcDuZXJqqa26deumRdzNcmhj+c+YjlV2ynX7jvNjSjY2Tyt3dm9QZecVERGRS0MT/UVExCXt3buXXr16cfDgQfr2a8yiMT7Y0raALYiBCW/z0S/D+Hj9QSwWeKT/Zcy8rYOKV1JtkpOTiYiIMDvGORk2bBghISHccMMNp+2bP38+zZo1o2nTprz99tsmpDuz5Jxk8kvyy+9UFLA6Vdn5Z67aC8BNHWMJ97dV2XlFRETk0tAILBERcTm7d++mX79+HDp0iGE3NCJtkI1Hi4t5wxrHzt7/ZvSXuWTmZxNo9+SVke3o00yjYOTS++mnn2jVqhVWq5WcnBy2bt16xmPbtGlTjcnObuLEiYwZM4b33nvvlO1lZWUkJiayYsUKgoKC6NChA8OGDSMsLMykpP/jNJxc/9X1lDpLWTx8EdGHNpTvqKIC1rYjOazanYHVAmOvbFQl5xQREZFLSwUsERFxKVu3buWqq64iLS2NyVfHcUf7AkZhp9AWwH+avMwzn2ZS5jRoXieAN2/vQP0wP7Mji5uIj48nNTWVyMhI4uPjsVgsGIZRsf/kfYvFgsPhMDHpqXr37s3KlStP275+/XpatmxJvXr1ABg4cCBLlixh5MiR1ZzwdLnFufh6+ZJXkkdESSGcyASrF9RpXSXnf2t1+dpX17SJJi7Mt0rOKSIiIpeWphCKiIjLWL9+Pb169SI9LY1/3RzH1C7ZtCgu5C2fy6lrf4W/fZNPmdNgcNto/vtANxWvpFr9dtpgcnIy+/btIzk5ueJ28v6+ffvO+ZyrV69m8ODBREdHY7FYmDt37mnHzJgxgwYNGmC32+nSpQvr16+vkvYcOXKkongFUK9ePQ4fPlwl575YwfZgvr35W9aOXIvXkaTyjXXbgJf9os+dnlfEgq1HAbhHo69ERERqDI3AEhERl7B69WquueYaykoLGPt8U64uPQZlkNtpAn/dm8DWI7lYLTBlYAvuvrJhpVd/E7mU6tevX+nvF6OgoIC2bdsyZswYhg8fftr+2bNnk5iYyMyZM+nSpQvTp09nwIAB7Nq1q2IB+fj4eMrKyk577JIlS4iOjq6SnGbx9fKFKp4++PEPKZQ6DDrUD6F1TFCVnFNEREQuPRWwRETEdIsXL2bYsGH4WYq47dnGfBNuY2JJBM/FTmDk+iZkncgn1M+b10e2o1uTcLPjipv66quvzvnYIUOGnNNxAwcOZODAgWfcP23aNMaOHcvo0aMBmDlzJgsWLOCdd95h8uTJACQlJZ1zrt+Kjo4+ZcTV4cOH6dy5c6XHFhcXU1xcXHE/Nzf3gp7zglThAu6lDif/+eEAAHd0rZoipIiIiFQPFbBERMRUc+bMYcSIETQKdLDi7gg8T2RysLQuV4bcyaBV9XEapbSuF8TM2ztQL9jH7LjixoYOHXrK/crWwDqpKtbAKikpYdOmTUyZMqVim9VqJSEhgbVr1170+Tt37szPP//M4cOHCQoK4uuvv+axxx6r9NipU6fy5JNPXvRznquPd37M9mPbuabBIK5I21a+sW78RZ938bZU0vOKiQiwMbBV3Ys+n4iIiFQfrYElIiKm+fDDD7nxxhvp1sTCxvuDqWsvIiwgjraezzB9Y2ucBtzQIYbP7uuq4pWYzul0VtyWLFlCfHw8X3/9NdnZ2WRnZ7Nw4ULat2/PokWLquT5MjMzcTgcREVFnbI9KiqK1NTUcz5PQkICN954IwsXLiQmJqai+OXp6clLL71Enz59iI+P5+GHHz7jFQinTJlCTk5OxS0lJeXCG3YOvj/yPXP3zOVg2mYoKwRPO4Q2vOjzfrC2fPTVyM5xeHvqY7CIiEhNohFYclbPP/88U6ZMYeLEiUyfPt3sOCJSi7z55pvcf//9XDMylPSEKHalZtAyJJ47CiaxfqcVT6uFJwZfzm1X1Nd6V+JyJk2axMyZM+nRo0fFtgEDBuDr68s999zDjh07TEx3qm+++eaM+4YMGXJO0x1tNhs2m60qY53VjZfdSKuwVsQ7fv2oGt4UrB4Xdc59Gfn8kHwcqwVu7hRbBSlFRESkOqmAJWe0YcMG3nzzTdq0aWN2FBGpZV566SUefeQRnu5r41j3IPZbrXxepwX3HphEZpGViAAb/7y1PZ0ahJodVaRSe/fuJTg4+LTtQUFB7N+/v0qeIzw8HA8PD9LS0k7ZnpaWRp06darkOVxVz5ie9IzpCWteKt8Q0eKizzl7Y/mosV6XRRCtEZ0iIiI1jsZOS6Xy8/O59dZbeeuttwgJCTE7jojUEoZh8Le//Y2/Tn6Ej6/34S9X2ng24xi3OZoze3cimUVW2scFM//BHipeiUvr1KkTiYmJpxSX0tLSePTRR8+4EPr58vb2pkOHDixbtqxim9PpZNmyZXTt2rVKnsPlpe8s/xnZ/KJOU+pw8sWmQwDc3DnuYlOJiIiICTQCSyo1btw4rrnmGhISEnjmmWfMjiMitYDD4WDChAl8/vm/eOpPUYzwKsSwevFx2EO8sTsegFu7xPHE4JZam0Zc3jvvvMOwYcOIi4sjNrZ8OlpKSgpNmzZl7ty553ye/Px89uzZU3E/OTmZpKQkQkNDiYuLIzExkVGjRtGxY0c6d+7M9OnTKSgoqLgqYW2UX5LP0YKj1PWri3/Gr1MxL3IE1rIdaWTmlxDub6Nv88gqSCkiIiLVTQUsOc0nn3zC5s2b2bBhwzkdb+qltUWkRiguLuaOO+7gp+/+S+unG/O+zYvLsk6wsngSc1Ia4e1h5ZmhrbhJ69JIDdGkSRN++uknli5dys6d5aOEWrRoQUJCwnmt2bZx40b69OlTcT8xMRGAUaNGMWvWLEaMGEFGRgaPP/44qampxMfHs2jRotMWdq9NNqdvZtyycTQPacZnGbvLN17kCKzPNpaPvrqhQwxeHiqQi4iI1EQqYMkpUlJSmDhxIkuXLsVut5/TY6r70toiUrPk5eUxfPhwHHtWsPY2X94rPMHXnoHMyLuf3QWNqBtkZ+ZtHWgbG2x2VJHzYrFY6N+/P/3797/gc/Tu3RvDMM56zPjx4xk/fvwFP0dNc6LsBEG2IOp6B4GjGDx9ILjBBZ/vWH4xq3ZnAHBDh3pVlFJERESqm76CklNs2rSJ9PR02rdvj6enJ56enqxatYpXX30VT09PHA7HaY+p7ktri0jNkZGRQb9+/Yg5torFt/kSbLcwvCyOtD0Ps7ugBV0ahjLvwR4qXkmN8Mknn5zzsSkpKXz33XeXME3tdXWDq/n25m+ZFje4fEPEZWC98I+s8348QpnToE1MEE0iA6oopYiIiFQ3FbDkFP369WPr1q0kJSVV3Dp27Mitt95KUlISHh6nX8LaZrMRGBh4yk1E5MCBA/S8sgcNex4m9K4YPDwsrPXtQ0LGw2Q5IxnTvSEf3t2FcH+b2VFFzskbb7xBixYtePHFF9mxY8dp+3Nycli4cCG33HIL7du359ixYyakrD08M34p/+Ui17+as+UwAMPaafSViIhITaYphHKKgIAAWrVqdco2Pz8/wsLCTtsuInIm27Zt47pB/ZnUr4B/t4phu8VCwIkOzDo2CruXB69c34br4vXHpNQsq1at4quvvuK1115jypQp+Pn5ERUVhd1uJysri9TUVMLDw7nzzjv5+eefa/U6VdXi5ALukRdewNqbkc+Ph3LwsFoY3Da6ioKJiIiIGVTAEhGRKrV27VpG3TCID64uoWusJ3GZ2Xxq9GbWsZHEhvrw5m0duTxaIzWlZhoyZAhDhgwhMzOTb7/9lgMHDlBYWEh4eDjt2rWjXbt2WC9iupvA1B+mkl+az93HdtAQIKLZBZ/ry19HX/W6LEKjPUVERGo4FbDkD61cudLsCCJSQ3z99df8+S8388nNFtoHeHLCI4D/HJvED0YLrmwazmsj2xHs6212TJGLFh4eztChQ82OUSstT1lOakEqI/PzyzcE17+g8xiGwfytRwG4Ll6jr0RERGo6FbBERKRKvP/++7z34QRs42P4R2kpz2VYGVPwMPuMaB7o3ZiH+zfDw2oxO6aIuLgH2z1IZt5h6n315/INQTEXdJ6dqXnsyyjA29NKvxaazikiIlLTqYAlIiIXxTAMnnn6aTIWPMvb1/hxOwYl+DD0xERKvaJ548a2DGxd1+yYIlUqJCQEi+X0gqzFYsFut9OkSRPuvPNORo8ebUK6mm1I4yGQvgOcTrAHgf3Cphwv/HX0Ve/LIvC36SOviIhITafeXERELlhpaSkPPnAf8Uc/4rGBdnA6uOVwfV4uuJvY8DDevL0DTaN02XqpfR5//HGeffZZBg4cSOfOnQFYv349ixYtYty4cSQnJ3P//fdTVlbG2LFjTU5bA2WnlP8MirughxuGwYKfygtY17RRAV1ERKQ2UAFLREQuSF5eHqPuuA5b9/30aeCHs7SMqaUjeavoGhJaRDFtRDyBdi+zY4pcEt9++y3PPPMM99133ynb33zzTZYsWcIXX3xBmzZtePXVV1XAOg8nSk9wtOAowcd2EQYQHHtB59lxNI99mZo+KCIiUpvoMjkiInLejh49yu2DulK/+z5+jvDj0cgI7i55iLed1/JQQjP+dXtHFa+kVlu8eDEJCQmnbe/Xrx+LFy8GYNCgQezbt6+6o9VoO4/vZOiXQ7lj73/KN1zg+leLtqUC5Vcf1PRBERGR2kEFLBEROS/bt2/n4WEdeLfbQf52Ipv2hQ4KDt/CBu8r+PeojkxMaIpVi7VLLRcaGsq8efNO2z5v3jxCQ0MBKCgoICBAU2jPR7GjmCBbEMHGrxuCLmwE1jfb0wAY0LJOFSUTERERs+krKREROWcrV67kP9Nu4P3+JXhaLWwua8K2/YmERcXw1e0daBjuZ3ZEkWrx2GOPcf/997NixYqKNbA2bNjAwoULmTlzJgBLly6lV69eZsascbpGd+Xbm7/F+Hf/8g0XMIXwcHYh24/mYrVAn2YRVZxQREREzKICloiInJOP//MBX+34C9uH1+H7tAyO5XdgculY+rdtwAvXt8bXW12KuI+xY8dy+eWX8/rrr/Pf//4XgGbNmrFq1Sq6desGwMMPP2xmxBrNknOo/JcLGIG1bEf56KsO9UMI87dVZSwRERExkf7aEBGRszIMg+kvPEXzn1+kWZdIfrZYeMfaiTWO+/nztZczpnsDLBZNGRT30717d7p37252jNrHUQp55VcQvJAC1tJfpw9edbkWbxcREalNVMASEZEzKikp4YkHb+N2z4Vc3sSTbpn57My5mp8Zzn/ubs8VjcLMjihiGofDwdy5c9mxYwcALVu2ZMiQIXh4eJicrOb6dNenJB36loF2b64sMcDv/KYA5haVsm7fMQASdPVBERGRWkUFLBERqdSxY8d4+M/9aNLsOC1yPThqhHB3ySN4R7Vj3q3tqRvkY3ZEEdPs2bOHQYMGcfjwYZo1awbA1KlTiY2NZcGCBTRu3NjkhDXTxtSNfH1oBS28vLjSJwKs53e9oTW7Myl1GDQK96NRhP8lSikiIiJm0FUIRUTkNDt37GD6A235sWsZs8OCmOnTiCHFzxLfuRef3HOFilfi9iZMmEDjxo1JSUlh8+bNbN68mYMHD9KwYUMmTJhgdrwaa0iTISTW7UuHoqILmj64encGAH2aR1Z1NBERETGZRmCJiMgpli9eQOa7t/B0C2iYBcs94ng9/xH+dkMnbux4YZe0F6ltVq1axbp16wgNDa3YFhYWxvPPP691sS5Cj3o96LFnLZSUnvcVCA3DYPUv5QWsnpfp6oMiIiK1jQpYIiJSYcY/H6Plz69wU3MLZYaVfZnD2ec/hE/u7UTrmCCz44m4DJvNRl5e3mnb8/Pz8fb2NiFRLZJzsPzneY7A2puRz9GcIrw9rXRuEPrHDxAREZEaRVMIRUSEsrIy/vzXIXzi/QUftYkkzQjgttI/c6DRrcyf0FPFK5Hfufbaa7nnnnv44YcfMAwDwzBYt24d9913H0OGDDE7Xo21L3sfGTkHcQIE1juvx67anQlAl4ah+HhrIX0REZHaRgUsERE3l5uby4y7OnOL7XuKPSwctvowzPFn2vcczKzRnQn102gSkd979dVXady4MV27dsVut2O32+nWrRtNmjRh+vTpZserkRxOB9d9eR19nfvIslrBL/y8Hn9y/aueTTV9UEREpDbSFEIRETeWvGc3PzzZm4mNC8ABdxyJYFbpOJ65+UqublXH7HgiLis4OJgvv/ySPXv2sGPHDgBatGhBkyZNTE5WcxWUFRBsCya3KJtApxN8Qs75sUWlDn5IPgZo/SsREZHaSgUsERE3tXDh+3y880mea1aMs9TCP8puYnnQrXx6Wwddfl6kEomJiWfdv2LFiorfp02bdqnj1DqB3oGsuXkNzhcalE8ROI8C1qYDWRSVOokMsHFZlP79EhERqY1UwBIRcTOGYfDRS5NZ4/U5SWG+/J+fF7YDtxDW7jrmXNdKa8eInMGWLVvO6TiLxXKJk9RiTifWwuzy38+jgLVuX/noq26Nw/TfX0REpJZSAUtExI0UFhbywSODuD1kM1fZPJhos5OeNoZ7ho3kpk7nd8UvEXfz2xFWcokU5wBG+e/24HN+2A/7jgNwRaOwqs8kIiIiLkEFLBERN7Fv307+++pAHoko/0Pv+5I25OY8zCtje9IyWlcZFBFzbUjdwJxt79MmwJ+biwzwPLcLSBSVOkhKyQagiwpYIiIitZYKWCIibmDF4o+ZvudvHIz3o9+RAlYUDGZ3s3v5+KZ2BNq9zI4nIsIvWb8w79BKinzs3Gyxn/PjNh/MosRRvv5VgzDfS5hQREREzGQ1O4CIiFw6hmHw1SsPc/ny+2lsKcPHafC0MYLAq//CG7d3UvFKRFxGu8h2JNYfzDX5Bee1/tXJ6YNdGmn9KxERkdpMI7BERGqposJCvvpzf64P/AkPH7glzZdd9nuZcustdGoQanY8EZFTtAhrQYuw9nBiBkQGn/PjfkguX8D9ikb6d01ERKQ2UwFLRKQW2vPLj/xt3gg6NjiBRxZ87ujJ/LhHeG/kFYT728yOJyJSucKs8p/nOAKrqNTB5oPZAHRpqPWvREREajMVsEREapl1899j74Y/82OjULYbgRw8PpCYHg/x76ua4WHV9BoRcU05xTkU5R3B32LB7xwLWD8fzqGkzEm4vzeNI/wucUIRERExkwpYIiK1hNPp5Kvn7qRf4Tyu8HKSdtyP74pu5PrbH6J7k3Cz44mInNUL619g3qF5PBLgz6hzLGBt+XX0Vfu4EK1/JSIiUstpEXcRkVogLe0gDz/fhv6OrwjwcrLWcTm77S8zc/zjKl6JuLlhw4YREhLCDTfccMr27OxsOnbsSHx8PK1ateKtt94yKWE5q8WKJxYscM5TCLeklE85jI8LvmS5RERExDVoBJaISA23/dv5vLA5kc31bNjzQqmX2gNr3yn8s3dzrJoyKOL2Jk6cyJgxY3jvvfdO2R4QEMDq1avx9fWloKCAVq1aMXz4cMLCzFlL6pkez/DMwT2Q+/W5F7B+HYHVLvbcr1ooIiIiNZNGYImI1GDLX59I7KI7mFiQTViZkwMFg7ni7uk80LeFilciAkDv3r0JCAg4bbuHhwe+vr4AFBcXYxgGhmFUd7xTVSziHvyHhx7NKeRoThFWC7SJCbq0uURERMR0KmCJiNRA2Vlp/Ocv7embOYsATwelhQ1pVfoUM8b/nY4NdCl5kZpi9erVDB48mOjoaCwWC3Pnzj3tmBkzZtCgQQPsdjtdunRh/fr1Vfb82dnZtG3blpiYGB599FHCw02ecnweVyFM+nX0VbM6gfjZNKlARESktlMBS0Skhln33Wfc8Ulv/t2okEyrlTfKBvNzv/d57d7hhPh5mx1PRM5DQUEBbdu2ZcaMGZXunz17NomJiTzxxBNs3ryZtm3bMmDAANLT0yuOObmG1e9vR44c+cPnDw4O5scffyQ5OZmPPvqItLS0Kmvb+fpyz5c845HHOrvt3ApYKdkAtNP6VyIiIm5BX1eJiNQQhmGw7J+JtEr9EGtcKCUWTx7xupPEMU/QJibY7HgicgEGDhzIwIEDz7h/2rRpjB07ltGjRwMwc+ZMFixYwDvvvMPkyZMBSEpKuugcUVFRtG3bljVr1py22DuUTzEsLi6uuJ+bm3vRz/l7PxxdxzxfT2ILvbniHApY/1v/KrjKs4iIiIjr0QgsEZEaIPt4Kl8/3JmEjHeo41HC3an+dDb+j9ceelHFK5FaqqSkhE2bNpGQkFCxzWq1kpCQwNq1ay/6/GlpaeTl5QGQk5PD6tWradasWaXHTp06laCgoIpbbGzsRT//7/WL7sH9WTm0LS7+wxFYpQ4nPx3OBqBdnBZwFxERcQcqYImIuLily/7FbZ/2xVL3EABvlQ3COehzpt11BwF2L5PTicilkpmZicPhICoq6pTtUVFRpKamnvN5EhISuPHGG1m4cCExMTEVxa8DBw5w5ZVX0rZtW6688koefPBBWrduXek5pkyZQk5OTsUtJSXlwht2Bv3C2/JAdg7xZYCX71mP3ZOeT1GpkwCbJ43C/ao8i4iIiLgeTSEUEXFRhtPJ8pfvYod1OQdCA3nVI4TVpeMZN/EhYkLO/sediMhJ33zzTaXbO3fufM7TD202GzabrQpTVeK3C7hbzn4V1e1HyqcwtogO1BVXRURE3IQKWCIiLijr8F62v3wd/fxT6AXss4RTN+YxHrnrOjz0x5qIWwgPD8fDw+O0hdXT0tKoU6eOSakunRP5qZRZLdh8gvmjUtn2o+UFrMvrBl76YCIiIuISNIVQRMTFfPjpZJ7/fADd/FMoMTx4uexmbr9uEf93w1AVr0TciLe3Nx06dGDZsmUV25xOJ8uWLaNr164mJrs0ntn5Pt3rx/KJ7x9PjT45AuvyaBWwRERE3IVGYImIuIiSwgK+en4IrzRIpSjYh8Yl0aR4P8Sf7h2tta5Eaqn8/Hz27NlTcT85OZmkpCRCQ0OJi4sjMTGRUaNG0bFjRzp37sz06dMpKCiouCphreIoKf/p6XPWwwzD0AgsERERN6QCloiIC9i3eTl5H4/lBr9MirP8WeQZR2CX13i6Z0ezo4nIJbRx40b69OlTcT8xMRGAUaNGMWvWLEaMGEFGRgaPP/44qampxMfHs2jRotMWdq8NngrtzJNbFmFt0/2sxx3JKSKnsBRPq4WmUf7VlE5ERETMpgKWiIiJnA4HU2fexPWZ39HWr5Bcw5e9BaN44cHHiA4++ygEEan5evfujWEYZz1m/PjxjB8/vpoSmcezKKf8F9/Qsx6349fpg00i/bF5elzqWCIiIuIiVMASETHJsYM7+fvsm1gQabDbM4AHDtVjW/tn+et1A3RVLRFxP7+9CuFZaPqgiIiIe1IBS0TEBGvee5IWv7zBeHsJ3zrq4FnQmOD7P2RMdLjZ0URETLGw8DA7QoLpWXqcTmc5Tgu4i4iIuCcVsEREqtHRo7v54u1bGO/4BTxhR0ksQ40JTJp4N54eujCsiLivNWVZzA8OJLw48+wFLI3AEhERcUsqYImIVJNFnz3P33PeJzvGyoDDXqwp7Eu70S/xSJNYs6OJiJjuSs8QwjP3cXm9iDMeU1BcxsHjJwBooQKWiIiIW1EBS0TkEivKy2LF1Ou52nsTc6Mi2IuNd0Lu44n/exK7lxYgFhEBGOQVzqCsbPCNPuMx+zIKAAj39ybEz7uakomIiIgr0HwVOc3UqVPp1KkTAQEBREZGMnToUHbt2mV2LJEaac68aRyY2p6B3puwAJ2PXsaf2v+HqeOeUfFKROQ87c3IB6BRhL/JSURERKS6qYAlp1m1ahXjxo1j3bp1LF26lNLSUvr3709BQYHZ0URqjMK8LB55qQdPHnuH5ZFlpDmDeSXgUW55bB5XtY83O56IiMspMwxKAYfhPOMxJwtYjVXAEhERcTuaQiinWbRo0Sn3Z82aRWRkJJs2baJnz54mpRKpOdZ9+TYRPzxL35AiFlvCWesRRevBbzGxUzuzo4mIuKzHivYwv2Ecj2RvZdQZjvlfAcuv+oKJiIiIS1ABS/5QTk4OAKGhoZXuLy4upri4uOJ+bm5uteQScTWZGQdZ8Pod3E4SVm8D//wghnr15K/jX8XmqemCIiIXa296+WjwxpEagSUiIuJuNIVQzsrpdDJp0iS6d+9Oq1atKj1m6tSpBAUFVdxiY3VFNXE/H3/0OCPnDmROzFHKLAZzSrtw8PqFPD1phopXIiLn4DF7I747kMLNQS0r3e9wGiRnlhewmmgKoYiIiNtRAUvOaty4cfz888988sknZzxmypQp5OTkVNxSUlKqMaGIubLSD7NgSm8G7nmNEg+DXIsnL4Xdz+CnFtGpTeVFXxEROZ2vxYNAp4HNWvkEgUNZJyhxOLF5WokO9qnmdCIiImI2TSGUMxo/fjzz589n9erVxMTEnPE4m82GzWarxmQi5jOcTt5+ayKDD83lGlsuOGHYoXpcMfwlrmjVxux4IiK1zsn1rxqG++FhtZicRkRERKqbClhyGsMwePDBB5kzZw4rV66kYcOGZkcScSm7fvyOp9Y8wE8BTjr6FlFUGMGKeuOYMHYSVv1RJSJyQb4pPcbu4CC6FaYSX8l+rX8lIiLi3lTAktOMGzeOjz76iC+//JKAgABSU1MBCAoKwsdHQ/bFfZWVFDP3xbEMLP2axhH+bDf8+MCzDYn3f8ToulFmxxMRqdGWlR1nfkgQfkXplRewKq5AqAKWiIiIO1IBS07zxhtvANC7d+9Ttr/77rvceeed1R9IxAV88uU/aLjhbW7wPAoW6JceRGz0WMbeOc7saCIitcIVnkH4HU+mWb3Kr3r8vwKWX3XGEhERERehApacxjAMsyOIuIzj6YeZ+v5NLAnP4caoIppn+vKZ1zBueeRletm19puISFW5ziuS645lgW/l627uy/h1CqFGYImIiLglFbBERCphOJ3Mfe1PdMuczY1+xSyyRLGbMHYNnsHdHa8wO56IiFtxOg2OnygBIDJAXx6IiIi4IxWwRER+Z+6iN8ne8Dp3Og6CBxQWRDDqxEAeHv8CFosWaRcRqW4FJWWcHCAe6ONlbhgRERExhQpYIiK/OnY0hXffuYMP62UQFu3g2hQbCx39GDDhVR6JiDA7nohIrfaXwj0sbBBLYtZP3P67fblFZQB4eViweVqrP5yIiIiYTgUsEXF7htPJF9MfpmfWZzzomcfysroEF/mwvte/uKPPULPjiYi4BScGZRYLzkr25RWVAhBo99JIWBERETelApaIuLX/zP0HW/e+x9S8g1g8YF9ZJMNKb+bucU/ojyQRkWo02d6ASdt/xL9x89P25RaWj8AKsOujq4iIiLvSpwARcUt7tm1i80eTeLnJcYrDrfQuCyC9oCfXTHqdsWGVX8JdREQunSCLF0EOB1i9T9tXMQJL61+JiIi4LRWwRMStZB9PZ8kr47mGldzkVczh3CA2WyIwrpjBHb2uMzueiIhUIq9II7BERETcnT4FiIhbcJSV8eSbd7LBezPveKbi53CwpbQ+9SPG89At95gdT0TE7a0uy2JvUACditJp9bt9ub+OwAqwaQSWiIiIu1IBS0RqvaWfv03o5ldIiSvikLed14IiaF4ymJETn6edl/4ZFBFxBV+XZjI/NIRHClNPK2CdHIEV6KN/s0VERNyVPgWISK311dJZeC+fydW2HeANE4758pZPHPeNfIe4uvXNjiciIr/R3iMQa9Z+GkcHn7Yvt/DXEVh2jcASERFxVypgiUits+unjby+dDxrwvJ5OCIbR46FBSUdqD/kOf7ZvovZ8UREpBI3ekdxY+ZxaB8HwJ70PDYdyOLGDrHknhyBpQKWiIiI21IBS0RqjdRD+1k1cxKDPb6jV6AXKy1hrPKIILrLKwwZdL3Z8URE5GwM45S7f5nzMz8kHyc2xPd/a2BpEXcRERG3pU8BIlLjZRw7yrPvj2FY4TZGeOYAUP94HcZGDGXCxKdNTiciIhciNbcIgIPHT/xmDSyNwBIREXFXKmCJSI1VkJ/L3FceYb/vSpaF2sgoshOdYmdj1M3cdP8TdPL0MDuiiIicoyeL9rE4LoYHs39mJJDz67pXGXnFv1kDSx9dRURE3JU+BYhIjZOXl81n//wTV+Uv5VaP42TkW1njX5e6RS2J/vMHjPT1NzuiiIicpyKc5HlYKTGcOJ1GRdEqI7+YPE0hFBERcXtWswOIiJyr4qIinp1+O9d/0p0U3yXEehwn3RnIYstwPr5hNf+YOAc/Fa9ERE4xbNgwQkJCuOGGG07bl5ycTJ8+fbj88stp3bo1BQUFJiQs97B3HPNTjjA0sBn5JWU4f10SKz23+H9TCLWIu4iIiNtSAUtEXF5JcTEfTXuUlGdac3XhYo56W1np68csj8F4TNjMbX/9NyEh4WbHFBFxSRMnTuT999+vdN+dd97JU089xfbt21m1ahU2m62a0/1PuNWb+mVlBHnYKkZfQfkIrJOLuKuAJSIi4r40DltEXFZObhbPzRrLZYVbuaswFTwhq9CPIemx3DvyNeJiGpsdUUTE5fXu3ZuVK1eetn3btm14eXlx5ZVXAhAaGlrNyc4s5zcFrCPZhRSVOgEI9NFHVxEREXelEVgi4nKyj2fw4TN38d+ZnVgYtIuPwq2kOf342Hk1hfes5dlHF6p4JSK1wurVqxk8eDDR0dFYLBbmzp172jEzZsygQYMG2O12unTpwvr166vkuX/55Rf8/f0ZPHgw7du357nnnquS816o78uy+TjAn53FmacUsI7mFFX87m9TAUtERMRd6VOAiLiM/Qd/Yf77U7ip5Adus+ZSUgYLiqKJPhFHyT3/ZKSKViJSyxQUFNC2bVvGjBnD8OHDT9s/e/ZsEhMTmTlzJl26dGH69OkMGDCAXbt2ERkZCUB8fDxlZWWnPXbJkiVER0ef8bnLyspYs2YNSUlJREZGcvXVV9OpUyeuuuqqqmvgefiqLJMF4aE8cuIIUb8pYJ3k5+2Bp4e+exUREXFXKmCJiOl2b/+Rz+f+HwvqpNA0vITxqbkccoSwyp7AWzdMJSQswuyIIiKXxMCBAxk4cOAZ90+bNo2xY8cyevRoAGbOnMmCBQt45513mDx5MgBJSUkX9Nz16tWjY8eOxMbGAjBo0CCSkpJMK2C1tvpRUnCQ+nUDSS88vSAXoPWvRERE3Jq+xhIR06xZ+iVzpyQQN7sfdznWc8Jq4aiHN2/7jiTk/7Zy61/eVvFKRNxWSUkJmzZtIiEhoWKb1WolISGBtWvXXvT5O3XqRHp6OllZWTidTlavXk2LFi0qPba4uJjc3NxTblXtVu+6TEvPpLd/g1OmEJ4UYNf3riIiIu5MnwREpFo5HQ7e+vApVuZ9SXMjmydsWQDsKorltrzuPHD3y/j4+JqcUkTEfJmZmTgcDqKiok7ZHhUVxc6dO8/5PAkJCfz4448UFBQQExPDZ599RteuXfH09OS5556jZ8+eGIZB//79ufbaays9x9SpU3nyyScvqj3no7ICVqCPRmCJiIi4MxWwRKRanDhRwNyZf6Plsa/p5J/O69FR7HH60Sk1CufldzDo5vto6+FhdkwRkVrnm2++OeO+P5rCeNKUKVNITEysuJ+bm1sx9fBS0AgsERER+T19EhCRS+qHLat4f8WTtHMc5O6CDPCCwiIvEtL96dn2XgaNvtPsiCIiLik8PBwPDw/S0tJO2Z6WlkadOnWqNYvNZsNms13S53iuOJllsdHcl7Od3KLuAHhYLTicBgCBWgNLRETEramAJSJVzul0svizdzCSPuBE6F5WR4bwS5kXg/P8WUFXOt3yBC83b212TBERl+bt7U2HDh1YtmwZQ4cOBcr/fV22bBnjx483N9wlkGs4SPf0pNAoqxiBVT/Ml30ZBYBGYImIiLg7fRIQkSqTlnmElz+aRNvc7YwsSwEbFJ2w8N+CEBo64/F+6DVuDgk3O6aIiMvIz89nz549FfeTk5NJSkoiNDSUuLg4EhMTGTVqFB07dqRz585Mnz6dgoKCiqsS1iYTvGO588BWIhpdxpz95QWsJhH+vylgaQSWiIiIO1MBS0Qu2polc0ld+RaHwneyIMyXTO9ihh/15JuS1nh3Gs1Hw0eZHVFExCVt3LiRPn36VNw/uc7UqFGjmDVrFiNGjCAjI4PHH3+c1NRU4uPjWbRo0WkLu9cG0VYb0SWl4OlTMQKrSaQ/S7aXT6EM9NHHVhEREXemTwIickFSjh7g9c8eofvxXQzhAHjDkRMezA+041NYn5SR/+UaTRMUETmr3r17YxjGWY8ZP358rZwyeDa5hWUANI3yr9imEVgiIiLuTQUsETkvS7/8D/nr3mNdvWQWBtnxt+QxKNPKmpJmHKtzDfNvTcTHx8/smCIiUsNsdORyyN+PlkXHyD05AisioGJ/oNbAEhERcWv6JCAif2jt5uV8supFbso6wFXWI2CD6Hwbm+zeHC9pyi/XT6VPm45mxxQRkRrs89J0FkSEkViQQomjAwBxYb54Wi2UOQ1dhVBERMTNqYAlIpXKzEhjyfsvEpu5mjcb5PBjqI2ORh4dczxZWdKCwvpDWXjrg3h7X9rLqouIiHtobvUl90QhEeHlo3g9rBYC7Z5EBdo5nF1IsK8KWCIiIu5MBSwRqVBUVMiMjx9jZ84q/p51gFssxWCDw/n+5Bs2dtOZI7c/y4AmLcyOKiIitcyd3tHcmfYt6Y3igPIpgxaLhSmDmrNxfxZtYoLNDSgiIiKmUgFLxM2VFBez6LN/UbZ9Pl09tzOvfgDHQjz4qdhCg/ww1tKO+u3vZm6fgWZHFRERN1BU5gAgyKd8xNW1baK5tk20mZFERETEBaiAJeKGykpLmT37ZVZk/heLdzZvpR8F7/J9g/K82WEJY0/MBHrcPJkRHh7mhhUREbdSVHJqAUtEREQEVMAScRu5ednM+/yf2H9ZQ3fPbQz0zOPFuHo4LV5s9/Aj+UQzChsk8MAtD+IfEGh2XBERcTN/Lz7Aypi6DCj8BWhNoApYIiIi8hsqYInUYocPJrP6i5nsL1zFV3XyuLKwkBe9jwGQ6/Chf4YfwUEdqPvgU1weHG5yWhERcWfHjFIOenmR4ygGUAFLRERETqEClkgts2T1f/l68zv0yDrCYPYz0uIgyebNRx51+Mnbzrzi9hTG9WbgLRP5e1Cw2XFFREQAuM87mhEHf+Zg9CBAUwhFRETkVCpgidRwh4/s5/uvPsTj4LfEW/fwdbTBNyG+XEY23tkO9pRFsrO0Ofd5dWDMTX/Cx8fX7MgiIiKnaWD1oUFxCeml5YsyqoAlIiIiv6UClkgNU1ZayqpFn3Fo81fMj9jOAR8Hy9MO4+9tANDthD8HLb6klLVlba8H6NpnEE1MziwiInKuCku1iLuIiIicTgUsERdXVlrKlwvfZU3yHEKKM3iw4Aj9rAUAfOZdl0KrF4u9w/HIbUhxdBd6XHcXN8Y1NDm1iIjI+fnJkc9RXx8Ol+UAEGhXAUtERET+RwUsEReTX5DLp4vepGzHFpoW7KOd1wEi/JwsqxNJXGkpoYUFnDC82VDSkG5ZcdzddjCDb70VD0+9nUVEpOb6qDSVBVERXJF1FIAGYZryLiIiIv+jv3hFTLZu4woOrF2GJfUnGhgHeS+mgNV+dv7sc5w+ZfkAXFZo5/J8C5ElMcxvPom+g2+jl58/vUzOLiIiUlUaWX3oWFiEpdATiwVaxwSZHUlERERciApYItVozy8/s3XNYk6kJGEv3cs/G+SQ7WFh3bEUvH6dKbGpJJDNdm9+dkbyEb0Ja9WPXtfczGwtvi4iIrXYPd71uCf1e54ujaBxhD8BmkIoIiIiv6EClsgl4CgrY8sPq9i7eQVG+nb2+R1gaXgR/U8UMOV4FgCGJ7xsrUeZxcI3lmhKiuqRH9yCZpf3YlXvwXh720xuhYiIiDnaxgSbHUFERERcjApYIhchNy+b77//mpzd23Bk7iG45CjvxR0j2W7wyZFUOpaWgQfM8/TlY69wdtq82O8I55eyOmR4x3GbR1MG9ruD+jG6TqCIiMhJ8bGaPigiIiKnUgFLKjVjxgz+/ve/k5qaStu2bXnttdfo3Lmz2bFMkXYkhWUbvmTP0STqZp4g7MRxwozjHAjK4eU6XnQqKuJfxzLAAtjgPY8oCq02dnvaKDwRSbKzLke96jHWI45BV99Gg/otaGB2o0RERFzM9OKDfBtdh+DjaQzRCCwRERH5HRWw5DSzZ88mMTGRmTNn0qVLF6ZPn86AAQPYtWsXkZGRZserMkVFhWzfs4X05L3kH0mhOOswx0oPkRRwlGBHKaOPlxDtkU2UNZ+50XXY4ePNa14Z9PYuBCDJ8KbMUofDHl5sLY0hxRlOtnddrigN46bY9nQbfj2BAcG0NLmdIiIiNUFKWTG7bN608yyjed0As+OIiIiIi1EBS04zbdo0xo4dy+jRowGYOXMmCxYs4J133mHy5MnVmiUn6xhZ+ccpLC7EWgaUOSgrLSa/KJ/MogyMMie+xR4U5udQUpDL7uI95DhyiTlhI6jYibWskCxrFj+EZhHscDA600GQ9QTBlkIm1PMnyW7jlbQMbjhRXpTaZLPxVkgUcaWltM47WpGjYbGTYsPgx9L6pJfVodSvLt6RDZjetAVXdrwab28brav1v4yIiEjtMrQkjOszd/KDV09snh5mxxEREREXowKWnKKkpIRNmzYxZcqUim1Wq5WEhATWrl1b6WOKi4spLi6uuJ+bm1tleTJeuoK/xlnYZrMxIzWdnoVFAHzvY2dynUiaFZfw+ZHUiuNH1Y1ks5+dlwoz6O8sBCtsttl4I6i8KBWf87+iVJCz/Kp+KRZftpeGkOkM4FCZHz2OOwm0hPBp1EjCYprSvG1XXohrWGVtEhERkdOFnPCgVVERuWEhZkcRERERF6QClpwiMzMTh8NBVFTUKdujoqLYuXNnpY+ZOnUqTz755CXJ48CKh2GU/26xAFBqeGA4PfB1OvEyLKQ7AzlheFPo9CLihBctHBb2FcfydbEPJXhzzLDR53gp/pYgZoffhG9IJEHh0dxVpw5PN2hGSFDYKc95yyVpiYiIiJxNcZkTgCAfL5OTiIiIiCtSAUsu2pQpU0hMTKy4n5ubS2xsbJWcO3TCKv5ulOFj98HPxx+8bXgB3YEfKjn+H1XyrCIiIlLdDngUUeBjJ9soMDuKiIiIuCAVsOQU4eHheHh4kJaWdsr2tLQ06tSpU+ljbDYbNpvtkuSJiKz8OUVERKR2+doni3XBkdxoHGGI2WFERETE5VjNDiCuxdvbmw4dOrBs2bKKbU6nk2XLltG1a1cTk4mIiEhtFuHwomVxMYHYzY4iIiIiLkgjsOQ0iYmJjBo1io4dO9K5c2emT59OQUFBxVUJRURERKra9YXhdMhNYm2TOLOjiIiIiAtSAUtOM2LECDIyMnj88cdJTU0lPj6eRYsWnbawu4iIiIiIiIhIdVABSyo1fvx4xo8fb3YMERERERERERGtgSUiIiIi5pvrc4zb60axwZlidhQRERFxQSpgiYiIiIjpUj1KSLLbyKHQ7CgiIiLigjSFUERERERMN6AwhJuz95FfN9rsKCIiIuKCNAJLREREREzX0GGn34lCIi3+ZkcRERERF6QCloiIiIiIiIiIuDRNIRQRERER0x32KMZpt5FlnDA7ioiIiLggFbCkyhmGAUBubq7JSURERMqd7JNO9lFycS5FX/8F6WwIDmfYiWR66DOEiIicJ/X1tZ/F0KsrVezQoUPExsaaHUNEROQ0KSkpxMTEmB2jxlNfLyIirkp9fe2lApZUOafTyZEjRwgICMBisVzUuXJzc4mNjSUlJYXAwMAqSuja3LHN4J7tVpvV5trKFdtsGAZ5eXlER0djtWoJ0Iulvv7iuWO71Wa1ubZSm12jzerraz9NIZQqZ7Vaq7ziHRgY6DL/MFYXd2wzuGe71Wb3oDabLygoyOwItYb6+qrjju1Wm92D2uweXK3N6utrN5UlRURERERERETEpamAJSIiIiIiIiIiLk0FLHFpNpuNJ554ApvNZnaUauOObQb3bLfa7B7UZpGzc9f/X9yx3Wqze1Cb3YM7tlnMp0XcRURERERERETEpWkEloiIiIiIiIiIuDQVsERERERERERExKWpgCUiIiIiIiIiIi5NBSwREREREREREXFpKmCJ6WbMmEGDBg2w2+106dKF9evXn/X4zz77jObNm2O322ndujULFy6spqRV53zaPGvWLCwWyyk3u91ejWkv3urVqxk8eDDR0dFYLBbmzp37h49ZuXIl7du3x2az0aRJE2bNmnXJc1al823zypUrT3udLRYLqamp1RO4CkydOpVOnToREBBAZGQkQ4cOZdeuXX/4uJr8nr6QNtf09/Qbb7xBmzZtCAwMJDAwkK5du/L111+f9TE1+TWWquGOfT24V3/vjn09uF9/r77ePfp6UH8vrkkFLDHV7NmzSUxM5IknnmDz5s20bduWAQMGkJ6eXunx33//PSNHjuSuu+5iy5YtDB06lKFDh/Lzzz9Xc/ILd75tBggMDOTo0aMVtwMHDlRj4otXUFBA27ZtmTFjxjkdn5yczDXXXEOfPn1ISkpi0qRJ3H333SxevPgSJ60659vmk3bt2nXKax0ZGXmJEla9VatWMW7cONatW8fSpUspLS2lf//+FBQUnPExNf09fSFthpr9no6JieH5559n06ZNbNy4kb59+3Ldddexbdu2So+v6a+xXDx37OvB/fp7d+zrwf36e/X17tHXg/p7cVGGiIk6d+5sjBs3ruK+w+EwoqOjjalTp1Z6/E033WRcc801p2zr0qWLce+9917SnFXpfNv87rvvGkFBQdWU7tIDjDlz5pz1mD/96U9Gy5YtT9k2YsQIY8CAAZcw2aVzLm1esWKFARhZWVnVkqk6pKenG4CxatWqMx5TG97Tv3Uuba5t72nDMIyQkBDj7bffrnRfbXuN5fy5Y19vGO7d37tjX28Y7tnfq6+vXG16P/+W+nsxm0ZgiWlKSkrYtGkTCQkJFdusVisJCQmsXbu20sesXbv2lOMBBgwYcMbjXc2FtBkgPz+f+vXrExsbe9ZvPmqLmv46X4z4+Hjq1q3LVVddxXfffWd2nIuSk5MDQGho6BmPqW2v9bm0GWrPe9rhcPDJJ59QUFBA165dKz2mtr3Gcn7csa8H9ffnoja8zhejtvT36uvPrDa9n9Xfi6tQAUtMk5mZicPhICoq6pTtUVFRZ1wHIDU19byOdzUX0uZmzZrxzjvv8OWXX/Lhhx/idDrp1q0bhw4dqo7IpjjT65ybm0thYaFJqS6tunXrMnPmTL744gu++OILYmNj6d27N5s3bzY72gVxOp1MmjSJ7t2706pVqzMeV9Pf0791rm2uDe/prVu34u/vj81m47777mPOnDlcfvnllR5bm15jOX/u2NeD+vtz4Y59PdSu/l59fe3u60H9vbgeT7MDiMjZde3a9ZRvOrp160aLFi148803efrpp01MJlWpWbNmNGvWrOJ+t27d2Lt3Ly+//DIffPCBickuzLhx4/j555/59ttvzY5Sbc61zbXhPd2sWTOSkpLIycnh888/Z9SoUaxateqMH2pF5I/Vhn8b5I/Vpv5eff2Z1Zb3s/p7cTUagSWmCQ8Px8PDg7S0tFO2p6WlUadOnUofU6dOnfM63tVcSJt/z8vLi3bt2rFnz55LEdElnOl1DgwMxMfHx6RU1a9z58418nUeP3488+fPZ8WKFcTExJz12Jr+nj7pfNr8ezXxPe3t7U2TJk3o0KEDU6dOpW3btrzyyiuVHltbXmO5MO7Y14P6+3Ohvv5/amJ/r76+9vf1oP5eXI8KWGIab29vOnTowLJlyyq2OZ1Oli1bdsa51V27dj3leIClS5ee8XhXcyFt/j2Hw8HWrVupW7fupYppupr+OleVpKSkGvU6G4bB+PHjmTNnDsuXL6dhw4Z/+Jia/lpfSJt/rza8p51OJ8XFxZXuq+mvsVwcd+zrQf39uagNr3NVqUn9vfp69+3rQf29uABz15AXd/fJJ58YNpvNmDVrlrF9+3bjnnvuMYKDg43U1FTDMAzj9ttvNyZPnlxx/HfffWd4enoa//jHP4wdO3YYTzzxhOHl5WVs3brVrCact/Nt85NPPmksXrzY2Lt3r7Fp0ybj5ptvNux2u7Ft2zazmnDe8vLyjC1bthhbtmwxAGPatGnGli1bjAMHDhiGYRiTJ082br/99orj9+3bZ/j6+hqPPvqosWPHDmPGjBmGh4eHsWjRIrOacN7Ot80vv/yyMXfuXOOXX34xtm7dakycONGwWq3GN998Y1YTztv9999vBAUFGStXrjSOHj1acTtx4kTFMbXtPX0hba7p7+nJkycbq1atMpKTk42ffvrJmDx5smGxWIwlS5YYhlH7XmO5eO7Y1xuG+/X37tjXG4b79ffq692jrzcM9ffimlTAEtO99tprRlxcnOHt7W107tzZWLduXcW+Xr16GaNGjTrl+E8//dS47LLLDG9vb6Nly5bGggULqjnxxTufNk+aNKni2KioKGPQoEHG5s2bTUh94U5eMvr3t5PtHDVqlNGrV6/THhMfH294e3sbjRo1Mt59991qz30xzrfNL7zwgtG4cWPDbrcboaGhRu/evY3ly5ebE/4CVdZe4JTXrra9py+kzTX9PT1mzBijfv36hre3txEREWH069ev4sOsYdS+11iqhjv29YbhXv29O/b1huF+/b36evfo6w1D/b24JothGEbVj+sSERERERERERGpGloDS0REREREREREXJoKWCIiIiIiIiIi4tJUwBIREREREREREZemApaIiIiIiIiIiLg0FbBERERERERERMSlqYAlIiIiIiIiIiIuTQUsERERERERERFxaSpgiYiIiIiIiIiIS1MBS0REREREREREXJoKWCIiIiIiIiIi4tJUwBIRcTEZGRnUqVOH5557rmLb999/j7e3N8uWLTMxmYiIiFQF9fUiIufPYhiGYXYIERE51cKFCxk6dCjff/89zZo1Iz4+nuuuu45p06aZHU1ERESqgPp6EZHzowKWiIiLGjduHN988w0dO3Zk69atbNiwAZvNZnYsERERqSLq60VEzp0KWCIiLqqwsJBWrVqRkpLCpk2baN26tdmRREREpAqprxcROXdaA0tExEXt3buXI0eO4HQ62b9/v9lxREREpIqprxcROXcagSUi4oJKSkro3Lkz8fHxNGvWjOnTp7N161YiIyPNjiYiIiJVQH29iMj5UQFLRMQFPfroo3z++ef8+OOP+Pv706tXL4KCgpg/f77Z0URERKQKqK8XETk/mkIoIuJiVq5cyfTp0/nggw8IDAzEarXywQcfsGbNGt544w2z44mIiMhFUl8vInL+NAJLRERERERERERcmkZgiYiIiIiIiIiIS1MBS0REREREREREXJoKWCIiIiIiIiIi4tL+H6ialkb6aHALAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAYAAAA10dzkAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAP6FJREFUeJzt3X18z/Xi//HnLuyCbGTZ0Ip0gQi5WGs5UYuiaccpxGGpqFCx6rByEcp0QSu5CLkohOSi0FI7h05Z1NiJXFVT/GhD2Gaysc/794ezz/csc7HZPq/PZ+/H/Xb7/PF5e7/bc+vz/uy51+vzfr+8LMuyBAAAANvwNh0AAAAArkUBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAgjAtr788kvFxMSobt268vLy0ooVKy54zLp163TzzTfL399f1157rebOnVvhOQGgvFEAAdhWXl6emjdvrilTplzU/nv27FGXLl3UoUMHpaena8iQIXr00Uf12WefVXBSAChfXpZlWaZDAIBpXl5eWr58uWJjY8+5z7Bhw7R69Wpt27bNua1nz546duyYkpOTXZASAMoHI4AAcJFSU1MVHR1dbFunTp2UmppqKBEAlI2v6QCezOFw6MCBA6pevbq8vLxMxwFsx7Is5ebmqm7duvL2rvi/ZzMzMxUaGlpsW2hoqHJycvTHH38oMDCwxOPy8/OVn5/vfO5wOHTkyBHVqlWL9w7AAFe/d7gjCuAlOHDggMLDw03HAGxv3759uvLKK03HOKfExESNGTPGdAwAf+Lu7x0ViQJ4CapXry7pzAsoKCjIcBrAfnJychQeHu48FytaWFiYsrKyim3LyspSUFDQOUf/JCkhIUHx8fHO59nZ2brqqqt47wAMcfV7hzuiAF6CoqmboKAg3sQBg1w1jRoZGak1a9YU2/b5558rMjLyvMf5+/vL39//rO28dwBm2fkjGPac+AYAScePH1d6errS09MlnbnNS3p6uvbu3SvpzMhd3759nfs//vjjysjI0D/+8Q/t3LlTU6dO1ZIlSzR06FAT8QGgzCiAAGzru+++U8uWLdWyZUtJUnx8vFq2bKlRo0ZJkn777TdnGZSkBg0aaPXq1fr888/VvHlzTZw4UbNmzVKnTp2M5AeAsuI+gJcgJydHwcHBys7OZhoHMMBTz0FPzQ1UFpyDjAACAADYTqUtgBda49OyLI0aNUp16tRRYGCgoqOj9eOPP5oJCwAA4EKVtgBeaI3PV199VW+99ZamT5+ujRs3qlq1aurUqZNOnjzp4qQAAACuVWlvA3PPPffonnvuKfHfLMtSUlKSRowYofvuu0+S9N577yk0NFQrVqxQz549XRkVAADApSrtCOD57NmzR5mZmcXW9AwODlZERARregIAgEqv0o4Ank9mZqYklbimZ9G/leTP63nm5ORUTEAAAIAKZMsRwLJKTExUcHCw88E6wAAAwBPZsgCGhYVJUolrehb9W0kSEhKUnZ3tfOzbt69CcwIAAFQEWxbABg0aKCwsTCkpKc5tOTk52rhx43nX9PT393eu3ckangAAwFNV2s8AHj9+XD/99JPzedEan5dffrmuuuoqDRkyRC+99JKuu+46NWjQQCNHjlTdunUVGxtrLjQASWdG23///Xc9+eSTatasmek4AFDpVNoC+N1336lDhw7O5/Hx8ZKkuLg4zZ07V//4xz+Ul5enAQMG6NixY7rtttuUnJysgIAAU5EBSCosLNTs2bN18OBBPfDAAxRAAKgArAV8CVhLECh/GzZsUFRUlIKDg3Xw4EH5+fmdc19PPQc9NTdQWXAO2vQzgADcV9GyjZ07dz5v+QMAlB0FEIBbWblypSQ5V+kBAJQ/CiAAt7Fz507t3r1bVapUOedSjgCAS0cBBOA2ikb/7rjjDtt+LgcAXIECCMBtFH3+j+lfAKhYFEAAbiEzM1MbN26UJHXt2tVwGgCo3CiAANzCJ598Isuy1KZNG9WrV890HACo1CiAANwC078A4DoUQADGHT9+3Lk2N8sxAkDFowACMO6zzz5Tfn6+GjZsqCZNmpiOAwCVHgUQgHH/O/3r5eVlNgwA2AAFEIBRp06d0urVqyXx+T8AcBUKIACjvvrqKx09elQhISG69dZbTccBAFugAAIwqmj1j3vvvVe+vr6G0wCAPVAAARhjWRa3fwEAAyiAAIz5/vvv9euvvyowMFAdO3Y0HQcAbIMCCMCYounfu+66S1WrVjWcBgDsgwIIwBimfwHADAogACN+/fVXbdmyRd7e3rr33ntNxwEAW6EAAjBi+fLlkqR27dqpdu3ahtMAgL1QAAEYsWzZMklSt27dDCcBAPuhAAJwuaysLH311VeSpL/+9a+G0wCA/VAAAbjcypUrZVmW2rRpo/DwcNNxAMB2KIAAXI7pXwAwiwIIwKWOHTumlJQUSUz/AoApFEAALrVq1SqdPn1aTZo00Q033GA6DgDYEgUQgEsx/QsA5lEAAbhMXl6ekpOTJVEAAcAkCiAAl/nss8/0xx9/qH79+mrRooXpOABgWxRAAC5TtPpHt27d5OXlZTgNANgXBRCASxQUFOiTTz6RxPQvAJhGAQTgEv/617+UnZ2tsLAwRUZGmo4DALZGAQTgEkVX/8bGxsrbm7ceADCJd2EAFa6wsFArVqyQxPQvALgDCiCACrdhwwYdPHhQNWrUUPv27U3HAQDbowACqHBF079du3ZVlSpVDKcBAFAAAVQoy7JY/QMA3AwFEECF2rx5s/bu3auqVauqY8eOpuMAAEQBBFDBikb/OnfurMDAQMNpAAASBRBABWP6FwDcDwUQQIXZvn27du7cqSpVqqhz586m4wAA/osCCKDCfPjhh5Kkjh07Kjg42HAaAEARCiCAClNUAB944AHDSQAA/4sCCKBC7NixQz/88IOqVKmirl27mo4DAPgfFEAAFWLp0qWSpOjoaNWsWdNwGgDA/6IAAqgQTP8CgPuiAAIod7t27dLWrVvl6+ur++67z3QcAMCfUAABlLui0b/o6GhdfvnlhtMAAP6MAgig3DH9CwDujQIIoFzt3r1b33//vXx9fRUbG2s6DgCgBBRAAOWq6OrfO++8k+lfAHBTFEAA5coTp3+nTJmi+vXrKyAgQBEREdq0adN5909KStINN9ygwMBAhYeHa+jQoTp58qSL0gLApaMAAig3P/30k9LT0+Xj4+Mx07+LFy9WfHy8Ro8erc2bN6t58+bq1KmTDh48WOL+Cxcu1PDhwzV69Gjt2LFD7777rhYvXqznn3/exckBoOwogADKTdHo3x133KFatWoZTnNxJk2apP79+6tfv35q0qSJpk+frqpVq2r27Nkl7r9hwwZFRUWpV69eql+/vjp27KgHH3zwgqOGAOBOKIAAyo2nTf8WFBQoLS1N0dHRzm3e3t6Kjo5WampqicfceuutSktLcxa+jIwMrVmzRp07d3ZJZgAoD76mAwCoHH7++Wdt2bJFPj4++utf/2o6zkU5fPiwCgsLFRoaWmx7aGiodu7cWeIxvXr10uHDh3XbbbfJsiydPn1ajz/++HmngPPz85Wfn+98npOTUz7fAACUESOAAMpF0dW/HTp0UEhIiOE0FWfdunUaP368pk6dqs2bN2vZsmVavXq1xo0bd85jEhMTFRwc7HyEh4e7MDEAnI0RQADlwtOmfyUpJCREPj4+ysrKKrY9KytLYWFhJR4zcuRI9enTR48++qgkqVmzZsrLy9OAAQP0wgsvyNv77L+rExISFB8f73yek5NDCQRglG1HAAsLCzVy5Eg1aNBAgYGBatiwocaNGyfLskxHAzzOnj17lJaW5lHTv5Lk5+enVq1aKSUlxbnN4XAoJSVFkZGRJR5z4sSJs0qej4+PJJ3z/cPf319BQUHFHgBgkm1HAF955RVNmzZN8+bN04033qjvvvtO/fr1U3BwsJ566inT8QCPsmTJEklS+/btdcUVVxhOUzrx8fGKi4tT69at1bZtWyUlJSkvL0/9+vWTJPXt21f16tVTYmKiJCkmJkaTJk1Sy5YtFRERoZ9++kkjR45UTEyMswgCgLuzbQHcsGGD7rvvPnXp0kWSVL9+fX3wwQfcygEog0WLFkmSevbsaThJ6fXo0UOHDh3SqFGjlJmZqRYtWig5Odl5YcjevXuLjfiNGDFCXl5eGjFihPbv368rrrhCMTExevnll019CwBQal6WTec8x48frxkzZmjt2rW6/vrr9Z///EcdO3bUpEmT1Lt37xKPKelKvvDwcGVnZzOlA9vauXOnGjduLF9fX2VlZbl0+becnBwFBwd73DnoqbmByoJz0MYjgMOHD1dOTo4aNWokHx8fFRYW6uWXXz5n+ZPOXMk3ZswYF6YE3N/ixYslSR07dmTtXwDwELa9CGTJkiVasGCBFi5cqM2bN2vevHl6/fXXNW/evHMek5CQoOzsbOdj3759LkwMuB/Lsjx6+hcA7Mq2I4DPPfechg8f7vyl1axZM/36669KTExUXFxcicf4+/vL39/flTEBt7Z161bt3LlT/v7+uu+++0zHAQBcJNuOAJ7rVg4Oh8NQIsDzFI3+de7c2bafowEAT2TbEcCiq/auuuoq3XjjjdqyZYsmTZqkhx9+2HQ0wCMw/QsAnsu2BXDy5MkaOXKkBg4cqIMHD6pu3bp67LHHNGrUKNPRAI/w7bffas+ePapWrZrzdkoAAM9g2wJYvXp1JSUlKSkpyXQUwCMVXf0bExOjatWqGU4DACgN234GEEDZORwOZwFk+hcAPA8FEECpff3119q/f7+Cg4N19913m44DACglCiCAUiu6+OOvf/0rt0YCAA9EAQRQKqdPn9bSpUslnVlHFwDgeSiAAEpl3bp1OnjwoGrVqqU777zTdBwAQBlQAAGUStH07/33368qVaoYTgMAKAsKIICLVlBQoI8++kgSV/8CgCejAAK4aGvXrtWxY8cUFhamdu3amY4DACgjCiCAi7Zw4UJJUvfu3eXj42M4DQCgrCiAAC7K8ePHtXLlSklS7969DacBAFwKCiCAi7Jy5UqdOHFCDRs2VJs2bUzHAQBcAgoggIuyYMECSWdG/7y8vAynAQBcCgoggAs6ePCg1q5dK4npXwCoDCiAAC5oyZIlKiwsVOvWrXX99debjgMAuEQUQAAXVHT1L6N/AFA5UAABnFdGRoZSU1Pl7e3N2r8AUElQAAGcV9Ho3x133KE6deoYTgMAKA8UQADnZFlWsat/AQCVAwUQwDlt2bJFO3fuVEBAgLp162Y6DgCgnFAAAZxT0fRvTEyMgoKCDKcBAJQXCiCAEhUWFuqDDz6QJPXq1ctwGgBAeaIAAijR+vXrdeDAAdWoUUP33HOP6TgAgHJEAQRQoqKLPx544AH5+/sbTgMAKE8UQABnOXnypJYuXSqJq38BoDKiAAI4y5o1a5STk6Mrr7xS7dq1Mx0HAFDOKIAAzjJ//nxJ0oMPPihvb94mAKCy4Z0dQDG///67Vq1aJUnq27ev4TQAgIpAAQRQzOLFi3Xq1Cm1bNlSTZs2NR0HAFABKIAAinnvvfckMfoHAJUZBRCA065du7Rx40b5+PjowQcfNB0HAFBBKIAAnN5//31J0t13363Q0FDDaQAAFYUCCECS5HA4nAWQ6V8AqNwogAAknVn6be/evQoODlZMTIzpOACACkQBBCDp/y7+6N69uwIDAw2nAQBUJAogAOXl5TmXfmP6FwAqPwogAK1YsULHjx9XgwYNFBUVZToOAKCCUQABFLv3n5eXl+E0AICKRgEEbG7//v364osvJEl9+vQxnAYA4AoUQMDmFi5cKIfDoaioKDVs2NB0HACAC1AAARuzLEvz5s2TxMUfAGAnFEDAxtLT0/XDDz/I399fDzzwgOk4AAAXoQACNlZ08UfXrl1Vs2ZNw2kAAK5CAQRsqqCgQPPnz5fE9C8A2A0FELCp1atX6/DhwwoLC9Pdd99tOg4AwIUogIBNzZ49W9KZ0T9fX1/DaQAArkQBBGzot99+06effipJ6tevn+E0AABXowACNvT++++rsLBQkZGRatSokek4AAAXowACNmNZlnP69+GHHzacBgBgAgUQsJlvvvlGu3btUmBgoLp37246DgDAAAogYDNFo38PPPCAgoKCDKcBAJhAAQRsJC8vT4sXL5bE9C8A2BkFELCRjz76SLm5ubrmmmv0l7/8xXQcAIAhFEDARoqmf/v16ycvLy/DaQAAplAAAZv4+eeftX79enl5eSkuLs50HACAQRRAwCbmzp0rSbrrrrsUHh5uNgwAwCgKIGADhYWFmjdvniRW/gAAUAABW0hJSdG+fftUo0YNxcbGmo7jdqZMmaL69esrICBAERER2rRp03n3P3bsmAYNGqQ6derI399f119/vdasWeOitABw6WxdAPfv36+///3vqlWrlgIDA9WsWTN99913pmMB5a7o4o/evXsrICDAcBr3snjxYsXHx2v06NHavHmzmjdvrk6dOungwYMl7l9QUKC77rpLv/zyi5YuXapdu3Zp5syZqlevnouTA0DZ+ZoOYMrRo0cVFRWlDh066NNPP9UVV1yhH3/8UTVr1jQdDShXv//+u5YvXy6J6d+STJo0Sf3793f+bKZPn67Vq1dr9uzZGj58+Fn7z549W0eOHNGGDRtUpUoVSVL9+vVdGRkALpltC+Arr7yi8PBwzZkzx7mtQYMGBhMBFeO9995TQUGBWrZsqZtvvtl0HLdSUFCgtLQ0JSQkOLd5e3srOjpaqampJR7z8ccfKzIyUoMGDdLKlSt1xRVXqFevXho2bJh8fHxKPCY/P1/5+fnO5zk5OeX7jQBAKdl2Cvjjjz9W69at9cADD6h27dpq2bKlZs6ced5j8vPzlZOTU+wBuDPLspyv6/79+3Pvvz85fPiwCgsLFRoaWmx7aGioMjMzSzwmIyNDS5cuVWFhodasWaORI0dq4sSJeumll875dRITExUcHOx8cBU2ANNsWwAzMjI0bdo0XXfddfrss8/0xBNP6KmnnnJeKVkS3sThaTZs2KAdO3aoatWq6tWrl+k4lYLD4VDt2rU1Y8YMtWrVSj169NALL7yg6dOnn/OYhIQEZWdnOx/79u1zYWIAOJttp4AdDodat26t8ePHS5Jatmypbdu2afr06ee8SW5CQoLi4+Odz3NyciiBcGszZsyQJPXo0UPBwcGG07ifkJAQ+fj4KCsrq9j2rKwshYWFlXhMnTp1VKVKlWLTvY0bN1ZmZqYKCgrk5+d31jH+/v7y9/cv3/AAcAlsOwJYp04dNWnSpNi2xo0ba+/evec8xt/fX0FBQcUegLs6duyYPvzwQ0lnpn9xNj8/P7Vq1UopKSnObQ6HQykpKYqMjCzxmKioKP30009yOBzObbt371adOnVKLH8A4I5sWwCjoqK0a9euYtt2796tq6++2lAioHwtWLBAf/zxh5o2bapbbrnFdBy3FR8fr5kzZ2revHnasWOHnnjiCeXl5TmvCu7bt2+xi0SeeOIJHTlyRE8//bR2796t1atXa/z48Ro0aJCpbwEASs22U8BDhw7VrbfeqvHjx6t79+7atGmTZsyY4ZwyAzyZZVnO1zIXf5xfjx49dOjQIY0aNUqZmZlq0aKFkpOTnReG7N27V97e//e3cnh4uD777DMNHTpUN910k+rVq6enn35aw4YNM/UtAECpeVmWZZkOYcqqVauUkJCgH3/8UQ0aNFB8fHyppspycnIUHBys7OxspoPhVjZt2qSIiAgFBARo//79uvzyy01HqhCeeg56am6gsuActPEIoCTde++9uvfee03HAMpd0ejf/fffX2nLHwCg7Gz7GUCgssrNzdWiRYskSQMGDDCcBgDgjiiAQCXzwQcfKC8vT40aNdJtt91mOg4AwA1RAIFKhos/AAAXQgEEKpEtW7YoLS1Nfn5+6tu3r+k4AAA3RQEEKpGidX+7deumkJAQw2kAAO6KAghUEnl5eVqwYIEkVv4AAJwfBRCoJBYuXKicnBxde+21at++vek4AAA3RgEEKgHLsjR16lRJZ5Yq+9+VKwAA+DN+SwCVwMaNG5Wenq6AgAA99NBDpuMAANwcBRCoBIpG/3r27MnKHwCAC6IAAh7u8OHDWrJkiaQz078AAFwIBRDwcHPmzFF+fr5atWqlNm3amI4DAPAAFEDAgzkcDk2fPl3SmdE/Vv4AAFwMCiDgwdauXauMjAwFBwfrwQcfNB0HAOAhKICAByu6+OOhhx5S1apVDacBAHgKCiDgoX799VetXr1aEhd/AABKhwIIeKgZM2bI4XDojjvu0A033GA6DgDAg1AAAQ9UUFCgWbNmSZIGDhxoOA0AwNNQAAEPtGzZMh08eFB16tRR165dTccBAHgYCiDggaZNmyZJ6t+/v6pUqWI4DQDA01AAAQ+zbds2ffnll/Lx8VH//v1NxwEAeCAKIOBhJk+eLEm67777dOWVVxpOAwDwRBRAwIMcOXJE77//viTp6aefNpwGAOCpKICAB3n33Xf1xx9/qHnz5mrXrp3pOAAAD0UBBDzE6dOn9fbbb0uSnnrqKdb9BQCUGQUQ8BCffPKJ9u7dq1q1arHuLwDgklAAAQ/x1ltvSZIGDBigwMBAw2kAAJ6MAgh4gO+//17r1q2Tj48P6/4CAC4ZBRDwAEW3funWrZvCw8MNpwEAeDoKIODmfv/9d82fP1/SmYs/AAC4VBRAwM3NmjVLJ0+eVMuWLRUVFWU6DgCgEqAAAm7s9OnTmjp1qiRu/QIAKD8UQMCNffzxx9q7d69CQkLUs2dP03EAAJUEBRBwY0W3fnnssccUEBBgOA0AoLKgAAJu6j//+Y/Wr1/PrV8AAOWOAgi4qUmTJkmS7r//ftWrV89wGgBAZUIBBNzQgQMH9MEHH0iS4uPjDacBAFQ2FEDADb399ts6deqUbrvtNrVt29Z0HABAJUMBBNxMXl6epk+fLkl65plnDKcBAFRGFEDAzcyZM0dHjx7Vtddeq5iYGNNxAACVEAUQcCOFhYV64403JElDhw6Vj4+P4UQAgMqIAgi4kZUrVyojI0M1a9ZUXFyc6TgAgEqKAgi4kYkTJ0qSnnjiCVWrVs1wGgBAZUUBBNzEN998ow0bNsjPz0+DBw82HQcAUIlRAAE3UXTj5169eqlOnTqG0wAAKjMKIOAG9uzZo48++kgSN34GAFQ8CiDgBt588005HA517NhRzZo1Mx0HAFDJUQABw44dO6Z3331XEjd+BgC4BgUQMOydd97R8ePH1bRpU911112m4wAAbIACCBh08uRJJSUlSToz+ufl5WU2EADAFiiAgEHvvfeeMjMzFR4erl69epmOAwCwCQogYEhhYaFeffVVSWdG//z8/AwnAgDYBQUQMGTp0qX6+eefdfnll+vRRx81HQcAYCMUQMAAy7I0YcIESdJTTz3Fsm8AAJeiAAIGrF27Vunp6apWrRrLvgEAXI4CCBhQNPo3YMAA1apVy3AaAIDdUAABF/vmm2+0bt06ValShWXfAABGUAD/a8KECfLy8tKQIUNMR0El98orr0iS/v73v+vKK680nAYAYEcUQEnffvut3nnnHd10002mo6CS27Fjh1asWCEvLy8999xzpuMAAGzK9gXw+PHj6t27t2bOnKmaNWuajoNKrmj0LzY2Vo0bNzacBgBgV7YvgIMGDVKXLl0UHR19wX3z8/OVk5NT7AFcrL1792rBggWSpGHDhhlOAwCwM1/TAUxatGiRNm/erG+//fai9k9MTNSYMWMqOBUqq4kTJ+r06dPq0KGDIiIiTMcBANiYbUcA9+3bp6effloLFixQQEDARR2TkJCg7Oxs52Pfvn0VnBKVRWZmpmbMmCHpzOsI7mXKlCmqX7++AgICFBERoU2bNl3UcYsWLZKXl5diY2MrNiAAlDPbFsC0tDQdPHhQN998s3x9feXr66v169frrbfekq+vrwoLC886xt/fX0FBQcUewMWYOHGiTp48qYiIiIv6uAFcZ/HixYqPj9fo0aO1efNmNW/eXJ06ddLBgwfPe9wvv/yiZ599Vu3atXNRUgAoP7YtgHfeeae2bt2q9PR056N169bq3bu30tPT5ePjYzoiKolDhw5p6tSpkqRRo0bJy8vLcCL8r0mTJql///7q16+fmjRpounTp6tq1aqaPXv2OY8pLCxU7969NWbMGF1zzTUuTAsA5cO2nwGsXr26mjZtWmxbtWrVVKtWrbO2A5fijTfe0IkTJ9SqVSvdc889puPgfxQUFCgtLa3YtLy3t7eio6OVmpp6zuPGjh2r2rVr65FHHtG///3vC36d/Px85efnO59zARkA02w7Agi4wpEjR/T2229LkkaOHMnon5s5fPiwCgsLFRoaWmx7aGioMjMzSzzmq6++0rvvvquZM2de9NdJTExUcHCw8xEeHn5JuQHgUtl2BLAk69atMx0Blcybb76p3Nxc3XTTTeratavpOLhEubm56tOnj2bOnKmQkJCLPi4hIaHYsn85OTmUQABGUQCBCpKdna0333xTEqN/7iokJEQ+Pj7Kysoqtj0rK0thYWFn7f/zzz/rl19+UUxMjHObw+GQJPn6+mrXrl1q2LDhWcf5+/vL39+/nNMDQNkxBQxUkMmTJys7O1tNmjRRt27dTMdBCfz8/NSqVSulpKQ4tzkcDqWkpCgyMvKs/Rs1anTWxWNdu3ZVhw4dlJ6ezqgeAI/BCCBQAXJzc/XGG29IkkaMGCFvb/7Wclfx8fGKi4tT69at1bZtWyUlJSkvL0/9+vWTJPXt21f16tVTYmKiAgICzrpIrEaNGpLExWMAPAoFEKgA06ZN05EjR3T99dere/fupuPgPHr06KFDhw5p1KhRyszMVIsWLZScnOy8MGTv3r0UeACVjpdlWZbpEJ4qJydHwcHBys7O5qbQcMrNzdU111yjw4cPa+7cuYqLizMdqdLy1HPQU3MDlQXnIJ8BBMrd5MmTdfjwYV133XXq3bu36TgAAJyFAgiUo+zsbL3++uuSpNGjR8vXl09ZAADcDwUQKEdJSUk6evSoGjdurJ49e5qOAwBAiSiAQDk5evSoJk2aJEl68cUXWU8aAOC2KIBAOZk4caJycnLUrFkz3X///abjAABwThRAoBwcPnzYuerHmDFjuG0IAMCt8VsKKAevvfaajh8/rptvvlmxsbGm4wAAcF4UQOASZWVl6e2335YkjR07ljV/AQBujwIIXKIJEyboxIkTioiIUOfOnU3HAQDggiiAwCXYu3evpk2bJunMZ/8Y/QMAeAIKIHAJxowZo/z8fN1+++3q2LGj6TgAAFwUCiBQRjt27NDcuXMlnZkGZvQPAOApKIBAGb3wwgtyOByKjY3VLbfcYjoOAAAXjQIIlMHGjRu1fPlyeXt76+WXXzYdBwCAUqEAAqVkWZaGDx8uSYqLi1OTJk0MJwIAoHQogEAprV27VuvWrZOfn59efPFF03EAACg1CiBQCg6Hwzn6N2jQIF111VWGEwEAUHoUQKAUlixZovT0dFWvXl3PP/+86TgAAJQJBRC4SAUFBRoxYoQk6bnnnlNISIjhRAAAlA0FELhI06ZN088//6zQ0FANHTrUdBwAAMqMAghchKNHj2rs2LGSpLFjx+qyyy4znAgAgLKjAAIX4eWXX9aRI0d044036uGHHzYdBwCAS0IBBC4gIyNDkydPliS99tpr8vX1NZwIAIBLQwEELiAhIUEFBQW66667dPfdd5uOAwDAJaMAAueRmpqqJUuWyMvLS6+99pq8vLxMRwIA4JJRAIFzsCxL8fHxkqR+/fqpefPmhhMBAFA+KIDAOSxdulTffPONqlatqnHjxpmOAwBAuaEAAiXIz893Lvn23HPPqW7duoYTAQBQfiiAQAneeOMNZWRkqE6dOnruuedMxwEAoFxRAIE/OXDggF566SVJ0iuvvKJq1aoZTgQAQPmiAAJ/Mnz4cOXl5emWW25R7969TccBAKDcUQCB/5Gamqr3339fXl5eeuutt+TtzSkCAKh8+O0G/JfD4dCTTz4p6cxtX9q0aWM4EQAAFYMCCPzXnDlzlJaWpqCgII0fP950HAAAKgwFEJCUnZ2t559/XpI0evRohYaGGk4EAEDFoQACksaOHauDBw/qhhtu0ODBg03HAQCgQlEAYXs7duzQW2+9JUlKSkqSn5+f4UQAAFQsCiBszbIsDRw4UKdPn1ZMTIzuvvtu05EAAKhwFEDY2vz587Vu3ToFBgY6RwEBAKjsKICwraNHj+qZZ56RJI0cOVL169c3GwgAABehAMK2nn/+eR06dEiNGzd2FkEAAOyAAghb2rRpk9555x1J0tSpU7nwAwBgKxRA2M7p06f1+OOPy7Is9enTR+3btzcdCQAAl6IAwnamTp2qLVu2qEaNGnr99ddNxwEAwOUogLCVAwcOaMSIEZKkxMRE1a5d23AiAABcjwIIWxk8eLByc3PVtm1bDRgwwHQcAACMoADCNj766CMtX75cvr6+mjlzpry9efkDAOyJ34CwhSNHjmjQoEGSpGHDhummm24ynAgAAHMogLCFZ599VllZWWrUqJHzM4AAANgVBRCV3hdffKE5c+bIy8tLs2bNUkBAgOlIAAAYRQFEpZaXl+e82GPgwIGKiooynAgAAPMogKjURo0apT179ig8PFyJiYmm4wAA4BZsXQATExPVpk0bVa9eXbVr11ZsbKx27dplOhbKyaZNm5SUlCRJmj59uqpXr242EAAAbsLWBXD9+vUaNGiQvvnmG33++ec6deqUOnbsqLy8PNPRcIn++OMPxcXFyeFwqFevXurcubPpSAAAuA1f0wFMSk5OLvZ87ty5ql27ttLS0vSXv/zFUCqUhxEjRmjnzp0KCwvTW2+9ZToOAABuxdYF8M+ys7MlSZdffnmJ/56fn6/8/Hzn85ycHJfkQumsX79eb7zxhiRp1qxZqlWrluFEAAC4F1tPAf8vh8OhIUOGKCoqSk2bNi1xn8TERAUHBzsf4eHhLk6JC8nNzVW/fv1kWZYeeeQRdenSxXQkAADcDgXwvwYNGqRt27Zp0aJF59wnISFB2dnZzse+fftcmBAX49lnn9WePXt09dVXa9KkSabjAADglpgCljR48GCtWrVKX375pa688spz7ufv7y9/f38XJkNpJCcna8aMGZKkOXPmKCgoyHAiAADck60LoGVZevLJJ7V8+XKtW7dODRo0MB0JZXT06FE98sgjkqSnnnpKHTp0MJwIAAD3ZesCOGjQIC1cuFArV65U9erVlZmZKUkKDg5WYGCg4XS4WJZlaeDAgTpw4ICuv/56bvgMAMAF2PozgNOmTVN2drbat2+vOnXqOB+LFy82HQ2lMG/ePC1atEg+Pj567733VLVqVdORAABwa7YeAbQsy3QEXKLdu3dr8ODBkqSxY8cqIiLCcCIAANyfrUcA4dkKCgrUq1cv5eXlqX379ho2bJjpSPBQU6ZMUf369RUQEKCIiAht2rTpnPvOnDlT7dq1U82aNVWzZk1FR0efd38AcEcUQHisESNGKC0tTZdffrnef/99+fj4mI4ED7R48WLFx8dr9OjR2rx5s5o3b65OnTrp4MGDJe6/bt06Pfjgg/rXv/6l1NRUhYeHq2PHjtq/f7+LkwNA2XlZzIOWWU5OjoKDg5Wdnc0tR1zs888/V8eOHSVJy5cvV2xsrNlAMKI8zsGIiAi1adNGb7/9tqQzN4UPDw/Xk08+qeHDh1/w+MLCQtWsWVNvv/22+vbt67LcAMqOc5ARQHigQ4cOOX/RPv7445Q/lFlBQYHS0tIUHR3t3Obt7a3o6GilpqZe1H/jxIkTOnXq1DmXkJTOLCOZk5NT7AEAJlEA4VEcDofi4uKUmZmpJk2aaOLEiaYjwYMdPnxYhYWFCg0NLbY9NDTUeVuoCxk2bJjq1q1brET+GctIAnA3FEB4lMTERH366acKCAjQBx98wC1fYNSECRO0aNEiLV++XAEBAefcj2UkAbgbW98GBp4lJSVFo0aNkiRNnTpVN910k+FE8HQhISHy8fFRVlZWse1ZWVkKCws777Gvv/66JkyYoC+++OKCr0WWkQTgbhgBhEfYv3+/HnzwQTkcDj3yyCPq16+f6UioBPz8/NSqVSulpKQ4tzkcDqWkpCgyMvKcx7366qsaN26ckpOT1bp1a1dEBYByxQgg3N6pU6fUo0cPHTp0SM2bN9fkyZNNR0IlEh8fr7i4OLVu3Vpt27ZVUlKS8vLynH9k9O3bV/Xq1XMuMfjKK69o1KhRWrhwoerXr+/8rOBll12myy67zNj3AQClQQGE2xs+fLi+/vprBQUFaenSpazTjHJV9MfFqFGjlJmZqRYtWig5Odl5YcjevXvl7f1/kyXTpk1TQUGB7r///mL/ndGjR+vFF190ZXQAKDPuA3gJuI9QxVu2bJn+9re/SeJ+fzibp56DnpobqCw4B/kMINzYDz/8oLi4OEnSs88+S/kDAKCcUADhln7//Xd17dpVx48fV/v27TV+/HjTkQAAqDQogHA7p06dUvfu3ZWRkaEGDRroww8/VJUqVUzHAgCg0qAAwu0888wz+uc//6lq1app5cqVCgkJMR0JAIBKhQIItzJr1iznbV7mz5+vZs2aGU4EAEDlQwGE2/jqq680cOBASdLYsWO56AMAgApCAYRbyMjIULdu3XTq1Ck98MADGjFihOlIAABUWhRAGHfkyBF17txZhw4dUsuWLTVnzhx5eXmZjgUAQKVFAYRRJ0+eVGxsrHbt2qXw8HCtWrVK1apVMx0LAIBKjQIIYxwOh/r166d///vfCgoK0po1a1S3bl3TsQAAqPQogDDmhRde0KJFi+Tr66tly5apadOmpiMBAGALFEAY8eabb2rChAmSztz65c477zScCAAA+6AAwuXef/99DRkyRJI0btw453q/AADANSiAcKlPPvlE/fr1kyQNGTJEL7zwguFEAADYDwUQLvPll1+qe/fuKiwsVFxcnCZOnMjtXgAAMIACCJf49ttvFRMTo5MnT6pr166aNWuWvL15+QEAYAK/gVHhNm/erI4dOyonJ0e3336788pfAABgBgUQFSo9PV3R0dE6duyYoqKi9MknnygwMNB0LAAAbI0CiAqzdetWRUdH6+jRo7rlllu0Zs0aVa9e3XQsAABsjwKICvH999/rzjvv1O+//642bdooOTlZQUFBpmMBAABRAFEBNm3apPbt2+vQoUNq1aqV1q5dq+DgYNOxAADAf1EAUa7Wr1+vO++8U0ePHlVkZKS++OIL1ahRw3QsAADwPyiAKDfJycm6++67dfz4cd1xxx1au3Yt5Q8AADdEAUS5WLJkibp27aqTJ0/q3nvv1erVq3XZZZeZjgUAAEpAAcQlmzRpknr06KFTp06pe/fuWrZsmQICAkzHAgAA50ABRJk5HA4NHTpUzzzzjCTpqaee0sKFC1WlShXDyQAAwPmwHAPK5OTJk+rTp4+WLl0qSXr99dcVHx/P2r4AAHgACiBK7cCBA+rWrZs2btwoPz8/zZs3Tz179jQdCwAAXCQKIErl22+/VWxsrA4cOKCaNWtq2bJlat++velYAACgFPgMIC7a/Pnz1a5dOx04cEBNmjTRt99+S/kDAMADUQBxQQUFBRo6dKj69Omj/Px8xcTEKDU1VQ0bNjQdDQAAlAEFEOf166+/6i9/+YuSkpIkSQkJCVqxYgXr+gIA4MH4DCDOadWqVerbt6+OHj2qGjVqaO7cubrvvvtMxwIAAJeIEUCc5eTJk4qPj1dMTIyOHj2qNm3aaPPmzZQ/AAAqCQogitm8ebNatWqlN954Q5L05JNP6t///rcaNGhgOBkAACgvFEBIkk6fPq2XXnpJERER2r59u0JDQ/XJJ5/orbfekr+/v+l4AACgHPEZQCgtLU2PPfaY0tLSJEl/+9vfNH36dIWEhBhOBgAAKgIjgDaWk5Ojp59+Wm3btlVaWppq1Kih+fPn68MPP6T8AQBQiTECaEOWZWnp0qUaOnSo9u/fL0nq1auXJk2apNDQUMPpAABARaMA2szGjRv1zDPP6Ouvv5YkNWzYUFOnTlXHjh0NJwMAAK7CFLBN7NmzRz179tQtt9yir7/+WoGBgRo9erS2bt1K+QMAwGYYAazkMjIyNH78eM2bN0+nT5+Wl5eXHnroIY0bN0716tUzHQ8AABhAAaykdu/erfHjx2v+/PkqLCyUJN1111169dVX1aJFC7PhAACAURTASsThcGjt2rWaPHmyPv30U1mWJUm6++67NXLkSN16662GEwIAAHdAAawEDh48qIULF2rq1Kn68ccfndtjYmI0YsQItW3b1mA6AADgbiiAHurEiRP6+OOP9f777+uzzz5zTvMGBQXp4Ycf1qBBg3TttdcaTgkAANyR7QvglClT9NprrykzM1PNmzfX5MmT3XbE7LffftOaNWu0evVqrV27Vnl5ec5/a9OmjR566CH17dtXl112mcGUAADA3dm6AC5evFjx8fGaPn26IiIilJSUpE6dOmnXrl2qXbu26Xg6cOCAvv76a23YsEFffvmlNm/eXOzf69evr7///e/q3bu3GjVqZCglAADwNLYugJMmTVL//v3Vr18/SdL06dO1evVqzZ49W8OHD3dJhsLCQh06dEj79u3Tjh07tH37dm3fvl3ff/+9fv3117P2b9u2rbp06aIuXbqoZcuW8vbmVo4AAKB0bFsACwoKlJaWpoSEBOc2b29vRUdHKzU1tcRj8vPzlZ+f73yek5NzUV9r1KhR+vHHH3Xy5En98ccfOnnypHJzc/Xbb78pKytLDoejxOO8vb3VrFkzRUVFKSoqSnfccYfCwsJK8V0CAACczbYF8PDhwyosLDxr7dvQ0FDt3LmzxGMSExM1ZsyYUn+tTz/9VN999905/93b21uhoaFq1KiRmjRposaNG6tJkyZq1aqVgoKCSv31AAAAzse2BbAsEhISFB8f73yek5Oj8PDwCx43ZMgQHT58WAEBAQoMDFRAQICqVaumsLAw1a1bV1dccYV8fflfAQAAXMO2rSMkJEQ+Pj7Kysoqtj0rK+uc06z+/v7y9/cv9dfq3bt3mTICAABUBNteQeDn56dWrVopJSXFuc3hcCglJUWRkZEGkwEAAFQs244ASlJ8fLzi4uLUunVrtW3bVklJScrLy3NeFQwAAFAZ2boA9ujRQ4cOHdKoUaOUmZmpFi1aKDk5+awLQwAAACoTWxdASRo8eLAGDx5sOgYAAIDL2PYzgAAAAHZFAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAArC9KVOmqH79+goICFBERIQ2bdp03v0//PBDNWrUSAEBAWrWrJnWrFnjoqQAUD4ogABsbfHixYqPj9fo0aO1efNmNW/eXJ06ddLBgwdL3H/Dhg168MEH9cgjj2jLli2KjY1VbGystm3b5uLkAFB2XpZlWaZDeKqcnBwFBwcrOztbQUFBpuMAtlMe52BERITatGmjt99+W9KZNcHDw8P15JNPavjw4Wft36NHD+Xl5WnVqlXObbfccotatGih6dOnuyw3gLLjHGQlkEtS1J1zcnIMJwHsqejcK+vfsQUFBUpLS1NCQoJzm7e3t6Kjo5WamlriMampqYqPjy+2rVOnTlqxYsU5v05+fr7y8/Odz7Ozs4vlB+Bal/reURlQAC9Bbm6uJCk8PNxwEsDecnNzFRwcXOrjDh8+rMLCwrPW/w4NDdXOnTtLPCYzM7PE/TMzM8/5dRITEzVmzJiztvPeAZj1+++/l+m9ozKgAF6CunXrat++fapevbq8vLzOuV9OTo7Cw8O1b98+2w418zM4g59D+f4MLMtSbm6u6tatW07pKkZCQkKxUcNjx47p6quv1t69ez3ml48nvnbJ7BqemDk7O1tXXXWVLr/8ctNRjKEAXgJvb29deeWVF71/UFCQx5wcFYWfwRn8HMrvZ3ApBSokJEQ+Pj7Kysoqtj0rK0thYWElHhMWFlaq/SXJ399f/v7+Z20PDg72uNeBJ752yewanpjZ29u+18La9zsHYHt+fn5q1aqVUlJSnNscDodSUlIUGRlZ4jGRkZHF9pekzz///Jz7A4A7YgQQgK3Fx8crLi5OrVu3Vtu2bZWUlKS8vDz169dPktS3b1/Vq1dPiYmJkqSnn35at99+uyZOnKguXbpo0aJF+u677zRjxgyT3wYAlAoF0AX8/f01evToEqeA7IKfwRn8HNzvZ9CjRw8dOnRIo0aNUmZmplq0aKHk5GTnhR579+4tNk106623auHChRoxYoSef/55XXfddVqxYoWaNm160V/T3X4GF4PMrkFm1/DEzOWN+wACAADYDJ8BBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAXmDJliurXr6+AgABFRERo06ZNpiO51JdffqmYmBjVrVtXXl5e510ztTJKTExUmzZtVL16ddWuXVuxsbHatWuX6VguN23aNN10003Om8VGRkbq008/NR2rQpT2nP/www/VqFEjBQQEqFmzZlqzZo2LkhZXmtwzZ85Uu3btVLNmTdWsWVPR0dFG3tvK+v66aNEieXl5KTY2tmIDlqC0mY8dO6ZBgwapTp068vf31/XXX+/y10hpMyclJemGG25QYGCgwsPDNXToUJ08edJFacv2e2fdunW6+eab5e/vr2uvvVZz586t8JxGWahQixYtsvz8/KzZs2dbP/zwg9W/f3+rRo0aVlZWluloLrNmzRrrhRdesJYtW2ZJspYvX246kkt16tTJmjNnjrVt2zYrPT3d6ty5s3XVVVdZx48fNx3NpT7++GNr9erV1u7du61du3ZZzz//vFWlShVr27ZtpqOVq9Ke819//bXl4+Njvfrqq9b27dutESNGWFWqVLG2bt3q1rl79eplTZkyxdqyZYu1Y8cO66GHHrKCg4Ot//f//p/bZi6yZ88eq169ela7du2s++67zzVh/6u0mfPz863WrVtbnTt3tr766itrz5491rp166z09HS3zbxgwQLL39/fWrBggbVnzx7rs88+s+rUqWMNHTrUZZlL+3snIyPDqlq1qhUfH29t377dmjx5suXj42MlJye7JrABFMAK1rZtW2vQoEHO54WFhVbdunWtxMREg6nMsWMB/LODBw9akqz169ebjmJczZo1rVmzZpmOUa5Ke853797d6tKlS7FtERER1mOPPVahOf/sUt+rTp8+bVWvXt2aN29eRUU8S1kynz592rr11lutWbNmWXFxcS4vgKXNPG3aNOuaa66xCgoKXBXxLKXNPGjQIOuOO+4oti0+Pt6Kioqq0JzncjG/d/7xj39YN954Y7FtPXr0sDp16lSBycxiCrgCFRQUKC0tTdHR0c5t3t7eio6OVmpqqsFkMCk7O1uSbL0IeWFhoRYtWqS8vLxKtYRaWc751NTUYvtLUqdOnVz6HlEe71UnTpzQqVOnXPa6LmvmsWPHqnbt2nrkkUdcEbOYsmT++OOPFRkZqUGDBik0NFRNmzbV+PHjVVhY6LaZb731VqWlpTmniTMyMrRmzRp17tzZJZnLwh3OQ1djJZAKdPjwYRUWFjpXFCgSGhqqnTt3GkoFkxwOh4YMGaKoqKhSrRxRWWzdulWRkZE6efKkLrvsMi1fvlxNmjQxHavclOWcz8zMLHH/zMzMCsv5Z+XxXjVs2DDVrVv3rF+iFaUsmb/66iu9++67Sk9Pd0HCs5Ulc0ZGhv75z3+qd+/eWrNmjX766ScNHDhQp06d0ujRo90yc69evXT48GHddtttsixLp0+f1uOPP67nn3++wvOW1bnOw5ycHP3xxx8KDAw0lKziMAIIuNCgQYO0bds2LVq0yHQUI2644Qalp6dr48aNeuKJJxQXF6ft27ebjoVLNGHCBC1atEjLly9XQECA6Tglys3NVZ8+fTRz5kyFhISYjnPRHA6HateurRkzZqhVq1bq0aOHXnjhBU2fPt10tHNat26dxo8fr6lTp2rz5s1atmyZVq9erXHjxpmOhv/BCGAFCgkJkY+Pj7Kysoptz8rKUlhYmKFUMGXw4MFatWqVvvzyS1155ZWm4xjh5+ena6+9VpLUqlUrffvtt3rzzTf1zjvvGE5WPspyzoeFhRl/j7iU96rXX39dEyZM0BdffKGbbrqpImMWU9rMP//8s3755RfFxMQ4tzkcDkmSr6+vdu3apYYNG7pVZkmqU6eOqlSpIh8fH+e2xo0bKzMzUwUFBfLz83O7zCNHjlSfPn306KOPSpKaNWumvLw8DRgwQC+88EKxtbXdxbnOw6CgoEo5+icxAlih/Pz81KpVK6WkpDi3ORwOpaSkVKrPPeH8LMvS4MGDtXz5cv3zn/9UgwYNTEdyGw6HQ/n5+aZjlJuynPORkZHF9pekzz//3KXvEWV9r3r11Vc1btw4JScnq3Xr1q6I6lTazI0aNdLWrVuVnp7ufHTt2lUdOnRQenq6wsPD3S6zJEVFRemnn35yllVJ2r17t+rUqVPh5a+smU+cOHFWySsqsJZlVVzYS+AO56HLGb4IpdJbtGiR5e/vb82dO9favn27NWDAAKtGjRpWZmam6Wguk5uba23ZssXasmWLJcmaNGmStWXLFuvXX381Hc0lnnjiCSs4ONhat26d9dtvvzkfJ06cMB3NpYYPH26tX7/e2rNnj/X9999bw4cPt7y8vKy1a9eajlauLnTO9+nTxxo+fLhz/6+//try9fW1Xn/9dWvHjh3W6NGjjd0GpjS5J0yYYPn5+VlLly4t9rrOzc1128x/ZuIq4NJm3rt3r1W9enVr8ODB1q5du6xVq1ZZtWvXtl566SW3zTx69GirevXq1gcffGBlZGRYa9eutRo2bGh1797dZZkv9Htn+PDhVp8+fZz7F90G5rnnnrN27NhhTZkyhdvA4NJNnjzZuuqqqyw/Pz+rbdu21jfffGM6kkv961//siSd9YiLizMdzSVK+t4lWXPmzDEdzaUefvhh6+qrr7b8/PysK664wrrzzjsrXfkrcr5z/vbbbz/rtb9kyRLr+uuvt/z8/Kwbb7zRWr16tYsTn1Ga3FdffXWJr+vRo0e7beY/M1EALav0mTds2GBFRERY/v7+1jXXXGO9/PLL1unTp90286lTp6wXX3zRatiwoRUQEGCFh4dbAwcOtI4ePeqyvBf6vRMXF2fdfvvtZx3TokULy8/Pz7rmmmsq/Xu0l2W56XgsAAAAKgSfAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGb+P+L8AOPWQKQlAAAAAElFTkSuQmCC", "text/html": [ "\n", "
\n", "
\n", " Figure\n", "
\n", - " \n", + " \n", "
\n", " " ], @@ -1731,7 +1738,7 @@ } ], "source": [ - "y_drp2 = - drp_stencil2[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil2[1:], m)], axis=0)\n", + "y_drp2 = - drp_stencil2[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil2[1:], m, strict=False)], axis=0)\n", "\n", "fig, ax = plt.subplots(1, 2)\n", "ax[0].plot(x, x**2, 'k')\n", diff --git a/examples/seismic/tutorials/07_DRP_schemes.ipynb b/examples/seismic/tutorials/07_DRP_schemes.ipynb index 80f7c79b78..74f310ad6e 100644 --- a/examples/seismic/tutorials/07_DRP_schemes.ipynb +++ b/examples/seismic/tutorials/07_DRP_schemes.ipynb @@ -199,7 +199,8 @@ } ], "source": [ - "from examples.seismic import TimeAxis\n", + "# NBVAL_IGNORE_OUTPUT\n", + "from examples.seismic import RickerSource, TimeAxis\n", "\n", "t0 = 0. # Simulation starts a t=0\n", "tn = 500. # Simulation lasts 0.5 seconds (500 ms)\n", @@ -207,9 +208,6 @@ "\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "\n", - "# NBVAL_IGNORE_OUTPUT\n", - "from examples.seismic import RickerSource\n", - "\n", "f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)\n", "src = RickerSource(name='src', grid=model.grid, f0=f0,\n", " npoint=1, time_range=time_range)\n", @@ -235,6 +233,8 @@ "metadata": {}, "outputs": [], "source": [ + "from devito import solve\n", + "\n", "# Define the wavefield with the size of the model and the time dimension\n", "u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=order)\n", "\n", @@ -244,7 +244,6 @@ "# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as\n", "# a time marching updating equation known as a stencil using customized SymPy functions\n", - "from devito import solve\n", "\n", "stencil = Eq(u.forward, solve(pde, u.forward).subs({H: u.laplace}))" ] @@ -620,7 +619,13 @@ "# NBVAL_IGNORE_OUTPUT\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u_DRP.data[0, :, :].T-u.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(\n", + " u_DRP.data[0, :, :].T - u.data[0, :, :].T,\n", + " vmin=-clip,\n", + " vmax=clip,\n", + " cmap=cm.seismic,\n", + " extent=[0, Lx, 0, Lz]\n", + ")\n", "fig.colorbar(cont)\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$z$')\n", diff --git a/examples/seismic/tutorials/08_snapshotting.ipynb b/examples/seismic/tutorials/08_snapshotting.ipynb index 3cf255b8df..080e054b1c 100644 --- a/examples/seismic/tutorials/08_snapshotting.ipynb +++ b/examples/seismic/tutorials/08_snapshotting.ipynb @@ -181,10 +181,9 @@ "factor = round(u.shape[0] / nsnaps) # Get approx nsnaps, for any nt\n", "ucopy = u.data.copy(order='C')\n", "filename = \"naivsnaps.bin\"\n", - "file_u = open(filename, 'wb')\n", - "for it in range(0, nsnaps):\n", - " file_u.write(ucopy[it*factor, :, :])\n", - "file_u.close()" + "with open(filename, 'wb') as fh:\n", + " for it in range(0, nsnaps):\n", + " fh.write(ucopy[it*factor, :, :])" ] }, { @@ -250,10 +249,9 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "fobj = open(\"naivsnaps.bin\", \"rb\")\n", - "snaps = np.fromfile(fobj, dtype=np.float32)\n", - "snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) # reshape vec2mtx, devito format. nx first\n", - "fobj.close()\n", + "with open(\"naivsnaps.bin\", \"rb\") as fh:\n", + " snaps = np.fromfile(fh, dtype=np.float32)\n", + " snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) # reshape vec2mtx, devito format. nx first\n", "\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", @@ -261,9 +259,9 @@ "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", - " plt.subplot(1, plot_num+1, imcnt+1)\n", - " imcnt = imcnt + 1\n", - " plt.imshow(np.transpose(snaps[i, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.subplot(1, plot_num+1, imcnt+1)\n", + " imcnt = imcnt + 1\n", + " plt.imshow(np.transpose(snaps[i, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -393,20 +391,19 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "fobj = open(\"snaps2.bin\", \"rb\")\n", - "snaps = np.fromfile(fobj, dtype=np.float32)\n", - "snaps = np.reshape(snaps, (nsnaps, vnx, vnz))\n", - "fobj.close()\n", + "with open(\"snaps2.bin\", \"rb\") as fh:\n", + " snaps = np.fromfile(fh, dtype=np.float32)\n", + " snaps = np.reshape(snaps, (nsnaps, vnx, vnz))\n", "\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", "imcnt = 1 # Image counter for plotting\n", "plot_num = 5 # Number of images to plot\n", "for i in range(0, plot_num):\n", - " plt.subplot(1, plot_num, i+1)\n", - " imcnt = imcnt + 1\n", - " ind = i * int(nsnaps/plot_num)\n", - " plt.imshow(np.transpose(snaps[ind, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.subplot(1, plot_num, i+1)\n", + " imcnt = imcnt + 1\n", + " ind = i * int(nsnaps/plot_num)\n", + " plt.imshow(np.transpose(snaps[ind, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -620,10 +617,10 @@ "\n", " orig_stdout = sys.stdout\n", "\n", - " f = open(filename, 'w')\n", - " sys.stdout = f\n", + " with open(filename, 'w') as fh:\n", + " sys.stdout = fh\n", + "\n", " print(thingToPrint)\n", - " f.close()\n", "\n", " sys.stdout = orig_stdout\n", "\n", @@ -3348,10 +3345,9 @@ "\n", "filename = \"naivsnaps.bin\"\n", "nsnaps = 100\n", - "fobj = open(filename, \"rb\")\n", - "snapsObj = np.fromfile(fobj, dtype=np.float32)\n", - "snapsObj = np.reshape(snapsObj, (nsnaps, vnx, vnz))\n", - "fobj.close()\n", + "with open(filename, \"rb\") as fh:\n", + " snapsObj = np.fromfile(fh, dtype=np.float32)\n", + " snapsObj = np.reshape(snapsObj, (nsnaps, vnx, vnz))\n", "\n", "fig, ax = plt.subplots()\n", "fig.set_size_inches(10, 8)\n", @@ -3372,7 +3368,7 @@ "ani = animation.FuncAnimation(fig, update, frames=nsnaps, interval=50, blit=True)\n", "\n", "plt.close(ani._fig)\n", - "HTML(ani.to_html5_video())\n" + "HTML(ani.to_html5_video())" ] }, { diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index d8cb5f93de..4cc455e5fa 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -298,7 +298,7 @@ "import sys\n", "_ = sys.executable\n", "!{sys.executable} -m pip install blosc\n", - "import blosc" + "import blosc # noqa: E402" ] }, { @@ -409,7 +409,7 @@ "shape = (nx, nz) # Number of grid points\n", "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define the dimensions\n", "x = SpaceDimension(name='x', spacing=Constant(name='h_x', value=extent[0]/(shape[0]-1)))\n", @@ -482,8 +482,14 @@ "print(\"\")\n", "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", - "print(f\"rec_coordinates X min/max; {np.min(nl_rec1.coordinates.data[:, 0]):+12.4f} {np.max(nl_rec1.coordinates.data[:, 0]):+12.4f}\")\n", - "print(f\"rec_coordinates Z min/max; {np.min(nl_rec1.coordinates.data[:, 1]):+12.4f} {np.max(nl_rec1.coordinates.data[:, 1]):+12.4f}\")" + "print(\n", + " f'rec_coordinates X min/max; {np.min(nl_rec1.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(nl_rec1.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(nl_rec1.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(nl_rec1.coordinates.data[:, 1]):+12.4f}'\n", + ")" ] }, { @@ -916,56 +922,52 @@ "\n", "if os.path.exists(filename):\n", " os.remove(filename)\n", - "f = open(filename, \"ab\")\n", - "\n", - "# Arrays to save offset and length of compressed data\n", - "file_offset = np.zeros(nt, dtype=np.int64)\n", - "file_length = np.zeros(nt, dtype=np.int64)\n", - "\n", - "# The length of the data type, 4 bytes for float32\n", - "itemsize = v2.data[0, :, :].dtype.itemsize\n", - "\n", - "# The length of a an uncompressed wavefield, used to compute compression ratio below\n", - "len0 = 4.0 * np.prod(v2._data[0, :, :].shape)\n", - "\n", - "# Loop over time blocks\n", - "v2_all[:] = 0\n", - "u2.data[:] = 0\n", - "v2.data[:] = 0\n", - "nl_rec2.data[:] = 0\n", - "for kN in range(0, N, 1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - " nl_op2(time_m=kt1, time_M=kt2)\n", - "\n", - " # Copy computed Born term for correctness testing\n", - " for kt in range(kt1, kt2+1):\n", - "\n", - " # assign\n", - " v2_all[kt, :, :] = v2.data[(kt % M), :, :]\n", - "\n", - " # compression\n", - " c = blosc.compress_ptr(v2._data[(kt % M), :, :].__array_interface__['data'][0],\n", - " np.prod(v2._data[(kt % M), :, :].shape),\n", - " v2._data[(kt % M), :, :].dtype.itemsize, 9, True, 'zstd')\n", - "\n", - " # compression ratio\n", - " cratio = len0 / (1.0 * len(c))\n", - "\n", - " # serialization\n", - " file_offset[kt] = f.tell()\n", - " f.write(c)\n", - " file_length[kt] = len(c)\n", + "with open(filename, \"ab\") as f:\n", + " # Arrays to save offset and length of compressed data\n", + " file_offset = np.zeros(nt, dtype=np.int64)\n", + " file_length = np.zeros(nt, dtype=np.int64)\n", + "\n", + " # The length of the data type, 4 bytes for float32\n", + " itemsize = v2.data[0, :, :].dtype.itemsize\n", + "\n", + " # The length of a an uncompressed wavefield, used to compute compression ratio below\n", + " len0 = 4.0 * np.prod(v2._data[0, :, :].shape)\n", + "\n", + " # Loop over time blocks\n", + " v2_all[:] = 0\n", + " u2.data[:] = 0\n", + " v2.data[:] = 0\n", + " nl_rec2.data[:] = 0\n", + " for kN in range(0, N, 1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + " nl_op2(time_m=kt1, time_M=kt2)\n", + "\n", + " # Copy computed Born term for correctness testing\n", + " for kt in range(kt1, kt2+1):\n", + "\n", + " # assign\n", + " v2_all[kt, :, :] = v2.data[(kt % M), :, :]\n", + "\n", + " # compression\n", + " c = blosc.compress_ptr(v2._data[(kt % M), :, :].__array_interface__['data'][0],\n", + " np.prod(v2._data[(kt % M), :, :].shape),\n", + " v2._data[(kt % M), :, :].dtype.itemsize, 9, True, 'zstd')\n", + "\n", + " # compression ratio\n", + " cratio = len0 / (1.0 * len(c))\n", + "\n", + " # serialization\n", + " file_offset[kt] = f.tell()\n", + " f.write(c)\n", + " file_length[kt] = len(c)\n", "\n", " # Uncomment these lines to see per time step output\n", "# rms_v1 = np.linalg.norm(v1.data[kt,:,:].reshape(-1))\n", "# rms_v2 = np.linalg.norm(v2_all[kt,:,:].reshape(-1))\n", "# rms_12 = np.linalg.norm(v1.data[kt,:,:].reshape(-1) - v2_all[kt,:,:].reshape(-1))\n", "# print(\"kt1,kt2,len,cratio,|u1|,|u2|,|v1-v2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" %\n", - "# (kt1, kt2, kt2 - kt1 + 1, cratio, rms_v1, rms_v2, rms_12), flush=True)\n", - "\n", - "# Close the binary file\n", - "f.close()" + "# (kt1, kt2, kt2 - kt1 + 1, cratio, rms_v1, rms_v2, rms_12), flush=True)" ] }, { @@ -1285,32 +1287,31 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Open the binary file in read only mode\n", - "f = open(filename, \"rb\")\n", - "\n", - "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0, :, :])\n", - "\n", - "# Array to hold compression ratio\n", - "cratio = np.zeros(nt, dtype=dtype)\n", - "\n", - "# Loop over time blocks\n", - "duFwd2.data[:] = 0\n", - "ln_rec2.data[:] = 0\n", - "for kN in range(0, N, 1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - "\n", - " # 1. Seek to file_offset[kt]\n", - " # 2. Read file_length[kt1] bytes from file\n", - " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1, kt2+1):\n", - " f.seek(file_offset[kt], 0)\n", - " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", - " cratio[kt] = len0 / (1.0 * len(c))\n", - "\n", - " # Run the operator for this time block\n", - " lf_op2(time_m=kt1, time_M=kt2)\n", + "with open(filename, \"rb\") as f:\n", + " # Temporary nd array for decompression\n", + " d = copy.copy(v2._data[0, :, :])\n", + "\n", + " # Array to hold compression ratio\n", + " cratio = np.zeros(nt, dtype=dtype)\n", + "\n", + " # Loop over time blocks\n", + " duFwd2.data[:] = 0\n", + " ln_rec2.data[:] = 0\n", + " for kN in range(0, N, 1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + "\n", + " # 1. Seek to file_offset[kt]\n", + " # 2. Read file_length[kt1] bytes from file\n", + " # 3. Decompress wavefield and assign to v2 Buffer\n", + " for kt in range(kt1, kt2+1):\n", + " f.seek(file_offset[kt], 0)\n", + " c = f.read(file_length[kt])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", + " cratio[kt] = len0 / (1.0 * len(c))\n", + "\n", + " # Run the operator for this time block\n", + " lf_op2(time_m=kt1, time_M=kt2)\n", "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt1,kt2+1):\n", @@ -1644,32 +1645,31 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Open the binary file in read only mode\n", - "f = open(filename, \"rb\")\n", - "\n", - "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0, :, :])\n", - "\n", - "# Array to hold compression ratio\n", - "cratio = np.zeros(nt, dtype=dtype)\n", - "\n", - "# Loop over time blocks\n", - "duAdj2.data[:] = 0\n", - "dm2.data[:] = 0\n", - "for kN in range(N-1, -1, -1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - "\n", - " # 1. Seek to file_offset[kt]\n", - " # 2. Read file_length[kt1] bytes from file\n", - " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1, kt2+1, +1):\n", - " f.seek(file_offset[kt], 0)\n", - " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", - " cratio[kt] = len0 / (1.0 * len(c))\n", - "\n", - " # Run the operator for this time block\n", - " la_op2(time_m=kt1, time_M=kt2)\n", + "with open(filename, \"rb\") as f:\n", + " # Temporary nd array for decompression\n", + " d = copy.copy(v2._data[0, :, :])\n", + "\n", + " # Array to hold compression ratio\n", + " cratio = np.zeros(nt, dtype=dtype)\n", + "\n", + " # Loop over time blocks\n", + " duAdj2.data[:] = 0\n", + " dm2.data[:] = 0\n", + " for kN in range(N-1, -1, -1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + "\n", + " # 1. Seek to file_offset[kt]\n", + " # 2. Read file_length[kt1] bytes from file\n", + " # 3. Decompress wavefield and assign to v2 Buffer\n", + " for kt in range(kt1, kt2+1, +1):\n", + " f.seek(file_offset[kt], 0)\n", + " c = f.read(file_length[kt])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", + " cratio[kt] = len0 / (1.0 * len(c))\n", + "\n", + " # Run the operator for this time block\n", + " la_op2(time_m=kt1, time_M=kt2)\n", "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt2,kt1-1,-1):\n", diff --git a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb index ab0342e271..d2964f150f 100644 --- a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb +++ b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb @@ -135,7 +135,7 @@ "from examples.seismic import Model\n", "from examples.seismic import plot_velocity\n", "from examples.seismic import Receiver\n", - "from examples.seismic import plot_image, AcquisitionGeometry\n", + "from examples.seismic import AcquisitionGeometry\n", "from examples.seismic import TimeAxis\n", "\n", "from examples.seismic.self_adjoint import (setup_w_over_q)\n", diff --git a/examples/seismic/tutorials/14_creating_synthetics.ipynb b/examples/seismic/tutorials/14_creating_synthetics.ipynb index a6dbb244bc..287a0caefa 100644 --- a/examples/seismic/tutorials/14_creating_synthetics.ipynb +++ b/examples/seismic/tutorials/14_creating_synthetics.ipynb @@ -69,7 +69,7 @@ "\n", "try:\n", " # Import jinja2 (used for colour coding geology)\n", - " import jinja2\n", + " import jinja2 # noqa: F401\n", "except ModuleNotFoundError:\n", " # Install jinja2\n", " ! pip install jinja2\n", @@ -77,7 +77,7 @@ "\n", "try:\n", " # Check vtk notebook backend is installed\n", - " import ipyvtklink\n", + " import ipyvtklink # noqa: F401\n", "except ModuleNotFoundError:\n", " ! pip install ipyvtklink" ] @@ -333,7 +333,7 @@ " \"\"\"Add a list of points to a surface in a model\"\"\"\n", " xyz = ('X', 'Y', 'Z')\n", " for point in points:\n", - " kwargs = {**dict(zip(xyz, point)), 'surface': surface}\n", + " kwargs = {**dict(zip(xyz, point, strict=True)), 'surface': surface}\n", " model.add_surface_points(**kwargs)\n", "\n", "\n", @@ -1140,7 +1140,15 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "seis_model = Model(vp=reshaped, origin=(0., 0., -1000.), spacing=(10., 10., 10.), shape=shape, nbl=30, space_order=4, bcs=\"damp\")" + "seis_model = Model(\n", + " vp=reshaped,\n", + " origin=(0., 0., -1000.),\n", + " spacing=(10., 10., 10.),\n", + " shape=shape,\n", + " nbl=30,\n", + " space_order=4,\n", + " bcs=\"damp\"\n", + ")" ] }, { diff --git a/examples/seismic/tutorials/15_tti_qp_pure.ipynb b/examples/seismic/tutorials/15_tti_qp_pure.ipynb index 1ce1c7c253..2601fc3be9 100644 --- a/examples/seismic/tutorials/15_tti_qp_pure.ipynb +++ b/examples/seismic/tutorials/15_tti_qp_pure.ipynb @@ -214,8 +214,14 @@ "x, z = model.grid.dimensions\n", "t = model.grid.stepping_dim\n", "\n", - "update_q = Eq(pp[t+1, x, z], ((pp[t, x+1, z] + pp[t, x-1, z])*z.spacing**2 + (pp[t, x, z+1] + pp[t, x, z-1])*x.spacing**2 -\n", - " b[x, z]*x.spacing**2*z.spacing**2) / (2*(x.spacing**2 + z.spacing**2)))\n", + "update_q = Eq(\n", + " pp[t+1, x, z],\n", + " (\n", + " (pp[t, x+1, z] + pp[t, x-1, z])*z.spacing**2\n", + " + (pp[t, x, z+1] + pp[t, x, z-1])*x.spacing**2\n", + " - b[x, z]*x.spacing**2*z.spacing**2\n", + " ) / (2*(x.spacing**2 + z.spacing**2))\n", + ")\n", "\n", "bc = [Eq(pp[t+1, x, 0], 0.)]\n", "bc += [Eq(pp[t+1, x, shape[1]+2*nbl-1], 0.)]\n", @@ -2648,8 +2654,8 @@ "# Some useful definitions for plotting if nbl is set to any other value than zero\n", "nxpad, nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\n", "shape_pad = np.array(shape) + 2 * nbl\n", - "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])" + "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])" ] }, { diff --git a/examples/seismic/utils.py b/examples/seismic/utils.py index e90fc1b028..468e44ddd6 100644 --- a/examples/seismic/utils.py +++ b/examples/seismic/utils.py @@ -228,14 +228,21 @@ def __call__(self, parser, args, values, option_string=None): # E.g., `('advanced', {'par-tile': True})` values = eval(values) if not isinstance(values, tuple) and len(values) >= 1: - raise ArgumentError(self, (f"Invalid choice `{str(values)}` (`opt` must be " - "either str or tuple)")) + raise ArgumentError( + self, + f'Invalid choice `{str(values)}` ' + '(`opt` must be either str or tuple)' + ) opt = values[0] except NameError: # E.g. `'advanced'` opt = values if opt not in configuration._accepted['opt']: - raise ArgumentError(self, ("Invalid choice `{}` (choose from {})".format(opt, str(configuration._accepted['opt'])))) + raise ArgumentError( + self, + f'Invalid choice `{opt}`' + f'(choose from {configuration._accepted["opt"]!s})' + ) setattr(args, self.dest, values) parser = ArgumentParser(description=description) diff --git a/examples/seismic/viscoacoustic/operators.py b/examples/seismic/viscoacoustic/operators.py index d4a0d0adb4..e74a62a4a5 100755 --- a/examples/seismic/viscoacoustic/operators.py +++ b/examples/seismic/viscoacoustic/operators.py @@ -612,7 +612,7 @@ def GradientOperator(model, geometry, space_order=4, kernel='sls', time_order=2, eq_kernel = kernels[kernel] eqn = eq_kernel(model, geometry, pa, forward=False, save=False, **kwargs) - if time_order == 1: + if time_order == 1: # noqa: SIM108 gradient_update = Eq(grad, grad - p.dt * pa) else: gradient_update = Eq(grad, grad + p.dt * pa.dt) diff --git a/examples/seismic/viscoacoustic/viscoacoustic_example.py b/examples/seismic/viscoacoustic/viscoacoustic_example.py index fdf6992f89..142d3c433e 100755 --- a/examples/seismic/viscoacoustic/viscoacoustic_example.py +++ b/examples/seismic/viscoacoustic/viscoacoustic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import norm from devito.logger import info diff --git a/examples/seismic/viscoacoustic/wavesolver.py b/examples/seismic/viscoacoustic/wavesolver.py index 9b7a6c5b8a..ca971ef23e 100755 --- a/examples/seismic/viscoacoustic/wavesolver.py +++ b/examples/seismic/viscoacoustic/wavesolver.py @@ -285,7 +285,7 @@ def jacobian_adjoint(self, rec, p, pa=None, grad=None, r=None, va=None, model=No space_order=self.space_order, staggered=NODE) if self.time_order == 1: - for i in {k.name: k for k in v}.keys(): + for i in {k.name: k for k in v}: kwargs.pop(i) va = VectorTimeFunction(name="va", grid=self.model.grid, time_order=self.time_order, diff --git a/examples/seismic/viscoelastic/viscoelastic_example.py b/examples/seismic/viscoelastic/viscoelastic_example.py index 4813131325..f8d9037045 100644 --- a/examples/seismic/viscoelastic/viscoelastic_example.py +++ b/examples/seismic/viscoelastic/viscoelastic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import norm from devito.logger import info diff --git a/examples/timestepping/ic_superstep.py b/examples/timestepping/ic_superstep.py index d102991d3f..4a8bb5f680 100644 --- a/examples/timestepping/ic_superstep.py +++ b/examples/timestepping/ic_superstep.py @@ -63,7 +63,7 @@ def simulate_ic(parameters, step=1, snapshots=-1): # Initial condition msh = np.meshgrid(*[ np.linspace(o, e, s) for o, e, s - in zip(p.origin, p.extent, p.shape) + in zip(p.origin, p.extent, p.shape, strict=True) ]) ic = gaussian(msh, mu=p.mu, sigma_sq=p.sigma_sq) diff --git a/examples/timestepping/superstep.ipynb b/examples/timestepping/superstep.ipynb index b1e8f34553..838fda9853 100644 --- a/examples/timestepping/superstep.ipynb +++ b/examples/timestepping/superstep.ipynb @@ -234,7 +234,15 @@ } ], "source": [ - "source = SparseTimeFunction(name=\"ricker\", npoint=1, coordinates=[source_loc], nt=nt1, grid=grid, time_order=2, space_order=4)\n", + "source = SparseTimeFunction(\n", + " name=\"ricker\",\n", + " npoint=1,\n", + " coordinates=[source_loc],\n", + " nt=nt1,\n", + " grid=grid,\n", + " time_order=2,\n", + " space_order=4\n", + ")\n", "source.data[:, 0] = rick\n", "src_term = source.inject(field=u.forward, expr=source*velocity**2*dt**2)\n", "\n", diff --git a/examples/userapi/00_sympy.ipynb b/examples/userapi/00_sympy.ipynb index 431cf31534..d0435907c8 100644 --- a/examples/userapi/00_sympy.ipynb +++ b/examples/userapi/00_sympy.ipynb @@ -438,7 +438,7 @@ "# The following piece of code is supposed to fail as it is\n", "# The exercise is to fix the code\n", "\n", - "expr2 = x + 2*y +3*z" + "expr2 = x + 2*y +3*z # noqa: F821" ] }, { @@ -542,7 +542,7 @@ "# NBVAL_SKIP\n", "# The following code will error until the code in cell 16 above is\n", "# fixed\n", - "z" + "z # noqa: F821" ] }, { diff --git a/examples/userapi/02_apply.ipynb b/examples/userapi/02_apply.ipynb index 9a391ad3c8..d8776e25eb 100644 --- a/examples/userapi/02_apply.ipynb +++ b/examples/userapi/02_apply.ipynb @@ -637,7 +637,10 @@ "\n", "function_size = (f.size_allocated + g.size_allocated + a.size_allocated)*np.dtype(f.dtype).itemsize\n", "\n", - "print(f\"Functions have a total size of {function_size} bytes, but {memreport['host']} bytes are consumed by the `Operator`\")" + "print(\n", + " f'Functions have a total size of {function_size} bytes, '\n", + " f'but {memreport[\"host\"]} bytes are consumed by the `Operator`'\n", + ")" ] }, { diff --git a/examples/userapi/03_subdomains.ipynb b/examples/userapi/03_subdomains.ipynb index 341f5edee8..3980638815 100644 --- a/examples/userapi/03_subdomains.ipynb +++ b/examples/userapi/03_subdomains.ipynb @@ -304,7 +304,6 @@ " name = 'inner'\n", "\n", " def define(self, dimensions):\n", - " d = dimensions\n", " return {d: ('middle', 1, 1) for d in dimensions}" ] }, @@ -796,9 +795,15 @@ " subdomain=ur)\n", "\n", "u_v_l = Eq(v.forward, model.damp*(v + s*ro*div(tau)), subdomain=lr)\n", - "u_t_l = Eq(tau.forward,\n", - " model.damp*(tau + s*l*diag(div(v.forward)) + s*mu*(grad(v.forward) + grad(v.forward).transpose(inner=False))),\n", - " subdomain=lr)" + "u_t_l = Eq(\n", + " tau.forward,\n", + " model.damp*(\n", + " tau\n", + " + s*l*diag(div(v.forward))\n", + " + s*mu*(grad(v.forward) + grad(v.forward).transpose(inner=False))\n", + " ),\n", + " subdomain=lr\n", + ")" ] }, { diff --git a/examples/userapi/04_boundary_conditions.ipynb b/examples/userapi/04_boundary_conditions.ipynb index 3eb917bfbd..3f662de5ea 100644 --- a/examples/userapi/04_boundary_conditions.ipynb +++ b/examples/userapi/04_boundary_conditions.ipynb @@ -429,7 +429,7 @@ " velocity free-surface. This is the MPI-safe method\n", " of implementing a free-surface boundary condition\n", " in Devito.\n", - " \n", + "\n", " Parameters\n", " ----------\n", " eq : Eq\n", diff --git a/examples/userapi/05_conditional_dimension.ipynb b/examples/userapi/05_conditional_dimension.ipynb index 5ae8451cd5..7f5e75fed5 100644 --- a/examples/userapi/05_conditional_dimension.ipynb +++ b/examples/userapi/05_conditional_dimension.ipynb @@ -789,7 +789,7 @@ "eqii = Inc(k, 1, implicit_dims=(f.dimensions + (ci,)))\n", "eqs.append(eqii)\n", "\n", - "for n, i in enumerate(f.dimensions):\n", + "for n, _ in enumerate(f.dimensions):\n", " eqs.append(Eq(g[k, n], f.dimensions[n], implicit_dims=(f.dimensions + (ci,))))\n", "\n", "# TODO: Must be language='C' for now due to issue #2061\n", diff --git a/examples/userapi/07_functions_on_subdomains.ipynb b/examples/userapi/07_functions_on_subdomains.ipynb index a0df920fbc..f18747348b 100644 --- a/examples/userapi/07_functions_on_subdomains.ipynb +++ b/examples/userapi/07_functions_on_subdomains.ipynb @@ -895,7 +895,7 @@ "origin = (0., 0.)\n", "shape = (201, 201)\n", "spacing = (5., 5.)\n", - "extent = tuple((sh-1)*sp for sh, sp in zip(shape, spacing))\n", + "extent = tuple((sh-1)*sp for sh, sp in zip(shape, spacing, strict=True))\n", "\n", "# Layered model\n", "vp = np.full(shape, 1.5)\n", @@ -2839,7 +2839,10 @@ "\n", "# Elastic update\n", "pde_v = v.dt - b*div(tau) + damp*v.forward\n", - "pde_tau = tau.dt - lam*diag(div(v.forward)) - mu*(grad(v.forward) + grad(v.forward).transpose(inner=False)) + damp*tau.forward\n", + "pde_tau = tau.dt \\\n", + " - lam*diag(div(v.forward)) \\\n", + " - mu*(grad(v.forward) + grad(v.forward).transpose(inner=False)) \\\n", + " + damp*tau.forward\n", "\n", "eq_v = Eq(v.forward, solve(pde_v, v.forward), subdomain=lowerfield)\n", "eq_t = Eq(tau.forward, solve(pde_tau, tau.forward), subdomain=lower)\n", diff --git a/requirements-testing.txt b/requirements-testing.txt index b3d7f3db49..464dbc15d3 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -9,4 +9,3 @@ click<9.0 cloudpickle<3.1.3 ipympl<0.9.9 ipykernel<7.0.0 -pytest-timeout diff --git a/tests/test_adjoint.py b/tests/test_adjoint.py index 0dec46f6f1..a0dcc69169 100644 --- a/tests/test_adjoint.py +++ b/tests/test_adjoint.py @@ -114,7 +114,10 @@ def test_adjoint_F(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = inner(srca, solver.geometry.src) term2 = norm(rec) ** 2 - info(f': {term1:f}, : {term2:f}, difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}') + info( + f': {term1:f}, : {term2:f}, ' + f'difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}' + ) assert np.isclose((term1 - term2)/term1, 0., atol=1.e-11) @pytest.mark.parametrize('mkey, shape, kernel, space_order, time_order, setup_func', [ @@ -191,7 +194,10 @@ def test_adjoint_J(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = np.dot(im.data.reshape(-1), dm.reshape(-1)) term2 = norm(du)**2 - info(f': {term1:f}, : {term2:f}, difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}') + info( + f': {term1:f}, : {term2:f}, ' + f'difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}' + ) assert np.isclose((term1 - term2)/term1, 0., atol=1.e-12) @pytest.mark.parametrize('shape, coords', [ diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 83915f57a8..dfb71a3135 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -36,9 +36,10 @@ def test_bench(mode, problem, op): baseline = os.path.realpath(__file__).split("tests/test_benchmark.py")[0] benchpath = f'{baseline}benchmarks/user/benchmark.py' - command_bench = [pyversion, benchpath, mode, - '-P', problem, '-d', '%d' % nx, '%d' % ny, '%d' % nz, '--tn', - '%d' % tn, '-op', op] + command_bench = [ + pyversion, benchpath, mode, '-P', problem, + '-d', str(nx), str(ny), str(nz), '--tn', str(tn), '-op', op + ] if mode == "bench": command_bench.extend(['-x', '1']) check_call(command_bench) @@ -48,14 +49,14 @@ def test_bench(mode, problem, op): base_filename = problem filename_suffix = '.json' arch = 'arch[unknown]' - shape = 'shape[%d,%d,%d]' % (nx, ny, nz) + shape = f'shape[{nx}{ny}{nz}]' nbl = 'nbl[10]' - t = 'tn[%d]' % tn + t = f'tn[{tn}]' so = 'so[2]' to = 'to[2]' opt = 'opt[advanced]' at = 'at[aggressive]' - nt = 'nt[%d]' % nthreads + nt = f'nt[{nthreads}]' mpi = 'mpi[False]' np = 'np[1]' rank = 'rank[0]' diff --git a/tests/test_builtins.py b/tests/test_builtins.py index 1a5c98dcb0..275ee4bbe2 100644 --- a/tests/test_builtins.py +++ b/tests/test_builtins.py @@ -112,7 +112,7 @@ def test_assign_parallel(self, mode): stop = loc_shape*(loc_coords+1) slices = [] - for i, j in zip(start, stop): + for i, j in zip(start, stop, strict=True): slices.append(slice(i, j, 1)) slices = as_tuple(slices) assert np.all(a[slices] - np.array(g.data[:]) == 0) @@ -194,7 +194,7 @@ def test_gs_parallel(self, mode): stop = loc_shape*(loc_coords+1) slices = [] - for i, j in zip(start, stop): + for i, j in zip(start, stop, strict=True): slices.append(slice(i, j, 1)) slices = as_tuple(slices) assert np.all(sp_smoothed[slices] - np.array(dv_smoothed.data[:]) == 0) diff --git a/tests/test_caching.py b/tests/test_caching.py index 71ace0680a..0cc49cc833 100644 --- a/tests/test_caching.py +++ b/tests/test_caching.py @@ -581,7 +581,7 @@ def test_clear_cache(self, operate_on_empty_cache, nx=1000, ny=1000): grid = Grid(shape=(nx, ny), dtype=np.float64) cache_size = len(_SymbolCache) - for i in range(10): + for _ in range(10): assert(len(_SymbolCache) == cache_size) Function(name='u', grid=grid, space_order=2) @@ -604,7 +604,7 @@ def test_clear_cache_with_Csymbol(self, operate_on_empty_cache, nx=1000, ny=1000 ncreated = 0 assert(len(_SymbolCache) == cache_size + ncreated) - u._C_symbol + _ = u._C_symbol # Cache size won't change since _C_symbol isn't cached by devito to # avoid circular references in the cache assert(len(_SymbolCache) == cache_size + ncreated) @@ -678,7 +678,7 @@ def test_sparse_function(self, operate_on_empty_cache): ncreated = 2+1+2+2+2+1+4 # Note that injection is now lazy so no new symbols should be created assert len(_SymbolCache) == cur_cache_size - i.evaluate + _ = i.evaluate assert len(_SymbolCache) == cur_cache_size + ncreated diff --git a/tests/test_cse.py b/tests/test_cse.py index fe41ca0564..cb459ace26 100644 --- a/tests/test_cse.py +++ b/tests/test_cse.py @@ -109,11 +109,11 @@ def test_default_algo(exprs, expected, min_cost): exprs[i] = DummyEq(indexify(diffify(eval(e).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter()).indexify() + make = lambda _: CTemp(name=f'r{counter()}').indexify() processed = _cse(exprs, make, min_cost) assert len(processed) == len(expected) - assert all(str(i.rhs) == j for i, j in zip(processed, expected)) + assert all(str(i.rhs) == j for i, j in zip(processed, expected, strict=True)) def test_temp_order(): @@ -241,11 +241,11 @@ def test_advanced_algo(exprs, expected): exprs[i] = DummyEq(indexify(diffify(eval(e).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter(), dtype=np.float32).indexify() + make = lambda _: CTemp(name=f'r{counter()}', dtype=np.float32).indexify() processed = _cse(exprs, make, mode='advanced') assert len(processed) == len(expected) - assert all(str(i.rhs) == j for i, j in zip(processed, expected)) + assert all(str(i.rhs) == j for i, j in zip(processed, expected, strict=True)) def test_advanced_algo_order(): @@ -261,7 +261,7 @@ def test_advanced_algo_order(): eq_b = DummyEq(indexify(diffify(Eq(v.forward, v + u.forward).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter(), dtype=np.float32).indexify() + make = lambda _: CTemp(name=f'r{counter()}', dtype=np.float32).indexify() processed = _cse([eq0, eq1, eq_b], make, mode='advanced') # Three input equation and 2 CTemps diff --git a/tests/test_data.py b/tests/test_data.py index 62efb93739..73a76fcc79 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -131,8 +131,8 @@ def test_broadcasting(self): u.data[:] = v except ValueError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Assign from array having shape with some 1-valued entries v = np.zeros(shape=(4, 1, 4), dtype=u.dtype) @@ -175,12 +175,12 @@ def test_illegal_indexing(self): try: u.data[5] - assert False + raise AssertionError('Assert False') except IndexError: pass try: v.data[nt] - assert False + raise AssertionError('Assert False') except IndexError: pass @@ -269,8 +269,9 @@ def test_w_halo_wo_padding(self): assert u._offset_domain == (2, 2, 2) assert u._offset_halo == ((0, 6), (0, 6), (0, 6)) assert u._offset_owned == ((2, 4), (2, 4), (2, 4)) - assert tuple(i + j*2 for i, j in zip(u.shape, u._size_halo.left)) ==\ - u.shape_with_halo + assert tuple( + i + j*2 for i, j in zip(u.shape, u._size_halo.left, strict=True) + ) == u.shape_with_halo # Try with different grid shape and space_order grid2 = Grid(shape=(3, 3, 3)) @@ -278,16 +279,18 @@ def test_w_halo_wo_padding(self): assert u2.shape == (3, 3, 3) assert u2._offset_domain == (4, 4, 4) assert u2._offset_halo == ((0, 7), (0, 7), (0, 7)) - assert tuple(i + j*2 for i, j in zip(u2.shape, u2._size_halo.left)) ==\ - u2.shape_with_halo + assert tuple( + i + j*2 for i, j in zip(u2.shape, u2._size_halo.left, strict=True) + ) == u2.shape_with_halo assert u2.shape_with_halo == (11, 11, 11) def test_wo_halo_w_padding(self): grid = Grid(shape=(4, 4, 4)) u = Function(name='u', grid=grid, space_order=2, padding=((1, 1), (3, 3), (4, 4))) - assert tuple(i + j + k for i, (j, k) in zip(u.shape_with_halo, u._padding)) ==\ - u.shape_allocated + assert tuple( + i + j + k for i, (j, k) in zip(u.shape_with_halo, u._padding, strict=True) + ) == u.shape_allocated assert u._halo == ((2, 2), (2, 2), (2, 2)) assert u._size_padding == ((1, 1), (3, 3), (4, 4)) assert u._size_padding.left == u._size_padding.right == (1, 3, 4) @@ -409,7 +412,7 @@ def test_convert_index(self): idx0 = (5, slice(8, 11, 1)) result0 = [] - for i, j in zip(idx0, decomposition): + for i, j in zip(idx0, decomposition, strict=True): result0.append(convert_index(i, j)) expected0 = (0, slice(0, 3, 1)) assert as_tuple(result0) == expected0 @@ -419,58 +422,92 @@ def test_reshape_identity(self): # Identity decomposition assert len(d.reshape(0, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, 0), [[0, 1], [2, 3]])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, 0), [[0, 1], [2, 3]], strict=True) + ) def test_reshape_right_only(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at right only assert len(d.reshape(0, 2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, 2), [[0, 1], [2, 3, 4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(0, 2), [[0, 1], [2, 3, 4, 5]], strict=True + ) + ) # Reduction at right affecting one sub-domain only, but not the whole subdomain assert len(d.reshape(0, -1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -1), [[0, 1], [2]])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -1), [[0, 1], [2]], strict=True) + ) # Reduction at right over one whole sub-domain assert len(d.reshape(0, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -2), [[0, 1], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -2), [[0, 1], []], strict=True) + ) # Reduction at right over multiple sub-domains assert len(d.reshape(0, -3)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -3), [[0], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -3), [[0], []], strict=True) + ) def test_reshape_left_only(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at left only assert len(d.reshape(2, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(2, 0), [[0, 1, 2, 3], [4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(2, 0), [[0, 1, 2, 3], [4, 5]], strict=True + ) + ) # Reduction at left affecting one sub-domain only, but not the whole subdomain assert len(d.reshape(-1, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, 0), [[0], [1, 2]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, 0), [[0], [1, 2]], strict=True) + ) # Reduction at left over one whole sub-domain assert len(d.reshape(-2, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-2, 0), [[], [0, 1]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-2, 0), [[], [0, 1]], strict=True) + ) # Reduction at right over multiple sub-domains assert len(d.reshape(-3, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-3, 0), [[], [0]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-3, 0), [[], [0]], strict=True) + ) def test_reshape_left_right(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at both left and right assert len(d.reshape(1, 1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(1, 1), [[0, 1, 2], [3, 4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(1, 1), [[0, 1, 2], [3, 4, 5]], strict=True + ) + ) # Reduction at both left and right assert len(d.reshape(-1, -1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -1), [[0], [1]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -1), [[0], [1]], strict=True) + ) # Reduction at both left and right, with the right one obliterating one subdomain assert len(d.reshape(-1, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -2), [[0], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -2), [[0], []], strict=True) + ) # Reduction at both left and right obliterating all subdomains # triggering an exception assert len(d.reshape(-1, -3)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []], strict=True) + ) assert len(d.reshape(-2, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []], strict=True) + ) def test_reshape_slice(self): d = Decomposition([[0, 1, 2], [3, 4], [5, 6, 7], [8, 9, 10, 11]], 2) @@ -1209,8 +1246,8 @@ def test_from_replicated_to_distributed(self, mode): u.data[1:3, 1:3] = a[1:2, 1:2] except ValueError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parallel(mode=4) def test_misc_setup(self, mode): @@ -1232,7 +1269,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c3', grid=grid, dimensions=(y, dy)) - assert False + raise AssertionError('Assert False') except TypeError: # Missing `shape` assert True @@ -1240,7 +1277,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c4', grid=grid, dimensions=(y, dy), shape=(3, 5)) - assert False + raise AssertionError('Assert False') except ValueError: # The provided y-size, 3, doesn't match the y-size in grid (4) assert True @@ -1248,7 +1285,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c4', grid=grid, dimensions=(y, dy), shape=(4,)) - assert False + raise AssertionError('Assert False') except ValueError: # Too few entries for `shape` (two expected, for `y` and `dy`) assert True @@ -1321,7 +1358,7 @@ def test_inversions(self, gslice, mode): lslice = loc_data_idx(f.data._index_glb_to_loc(gslice)) sl = [] Null = slice(-1, -2, None) - for s, gs, d in zip(lslice, gslice, f._decomposition): + for s, gs, d in zip(lslice, gslice, f._decomposition, strict=True): if type(s) is slice and s == Null: sl.append(s) elif type(gs) is not slice: @@ -1427,7 +1464,7 @@ def test_sliced_gather_2D(self, start, stop, step, mode): if isinstance(stop, int) or stop is None: stop = [stop for _ in grid.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) @@ -1461,7 +1498,7 @@ def test_sliced_gather_3D(self, start, stop, step, mode): if isinstance(stop, int) or stop is None: stop = [stop for _ in grid.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index abbd3dd74b..a18771d245 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -112,7 +112,7 @@ def test_unevaluation(self, SymbolType, derivative, dim, expected): expr = getattr(expr, d) assert(expr.__str__() == expected) # Make sure the FD evaluation executes - expr.evaluate + _ = expr.evaluate @pytest.mark.parametrize('expr,expected', [ ('u.dx + u.dy', 'Derivative(u, x) + Derivative(u, y)'), @@ -621,7 +621,7 @@ def test_shifted_grad(self, shift, ndim): f = Function(name="f", grid=grid, space_order=4) for order in [None, 2]: g = grad(f, shift=shift, order=order).evaluate - for i, (d, gi) in enumerate(zip(grid.dimensions, g)): + for i, (d, gi) in enumerate(zip(grid.dimensions, g, strict=True)): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) gk = getattr(f, f'd{d.name}')(x0=x0, fd_order=order).evaluate @@ -1170,7 +1170,7 @@ def test_tensor_algebra(self): v = grad(f)._evaluate(expand=False) assert all(isinstance(i, IndexDerivative) for i in v) - assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate)) + assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate, strict=True)) def test_laplacian_opt(self): grid = Grid(shape=(4, 4)) @@ -1178,7 +1178,7 @@ def test_laplacian_opt(self): assert f.laplacian() == f.laplace df = f.laplacian(order=2, shift=.5) - for (v, d) in zip(df.args, grid.dimensions): + for (v, d) in zip(df.args, grid.dimensions, strict=True): assert v.dims[0] == d assert v.fd_order == (2,) assert v.deriv_order == (2,) diff --git a/tests/test_differentiable.py b/tests/test_differentiable.py index 0f97a2ab2a..65554d4487 100644 --- a/tests/test_differentiable.py +++ b/tests/test_differentiable.py @@ -135,15 +135,20 @@ def test_avg_mode(ndim, io): vars = ['i', 'j', 'k'][:ndim] rule = ','.join(vars) + '->' + ''.join(vars) ndcoeffs = np.einsum(rule, *([coeffs]*ndim)) - args = [{d: d + i * d.spacing for d, i in zip(grid.dimensions, s)} for s in all_shift] + args = [ + {d: d + i * d.spacing for d, i in zip(grid.dimensions, s, strict=True)} + for s in all_shift + ] # Default is arithmetic average - expected = sum(c * a.subs(arg) for c, arg in zip(ndcoeffs.flatten(), args)) + expected = sum( + c * a.subs(arg) for c, arg in zip(ndcoeffs.flatten(), args, strict=True) + ) assert sympy.simplify(a_avg - expected) == 0 # Harmonic average, h(a[.5]) = 1/(.5/a[0] + .5/a[1]) expected = (sum(c * SafeInv(b.subs(arg), b.subs(arg)) - for c, arg in zip(ndcoeffs.flatten(), args))) + for c, arg in zip(ndcoeffs.flatten(), args, strict=True))) assert sympy.simplify(b_avg.args[0] - expected) == 0 assert isinstance(b_avg, SafeInv) assert b_avg.base == b diff --git a/tests/test_dimension.py b/tests/test_dimension.py index b27b727eff..6c6a4c3d76 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -858,7 +858,7 @@ def test_overrides(self): op.apply(u=u, usave1=u1, usave2=u2, time_M=nt-2) assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1)) - for (uk, fk) in zip((u1, u2), (f1, f2)): + for (uk, fk) in zip((u1, u2), (f1, f2), strict=True): assert np.all([np.allclose(uk.data[i], i*fk) for i in range((nt+fk-1)//fk)]) @@ -886,7 +886,7 @@ def test_overrides_newfact(self): op.apply(u=u, usave1=u2, time_M=nt-2) assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1)) - for (uk, fk) in zip((u1, u2), (f1, f2)): + for (uk, fk) in zip((u1, u2), (f1, f2), strict=True): assert np.all([np.allclose(uk.data[i], i*fk) for i in range((nt+fk-1)//fk)]) @@ -1253,15 +1253,15 @@ def test_no_index_sparse(self): radius = 1 indices = [(INT(floor(i)), INT(floor(i))+radius) - for i in sf._position_map.keys()] + for i in sf._position_map] bounds = [i.symbolic_size - radius for i in grid.dimensions] eqs = [Eq(p, v) for (v, p) in sf._position_map.items()] for e, i in enumerate(product(*indices)): args = [j > 0 for j in i] - args.extend([j < k for j, k in zip(i, bounds)]) + args.extend([j < k for j, k in zip(i, bounds, strict=True)]) condition = And(*args, evaluate=False) - cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition) + cd = ConditionalDimension(f'sfc{e}', parent=sd, condition=condition) index = [time] + list(i) eqs.append(Eq(f[index], f[index] + sf[cd])) @@ -1290,7 +1290,7 @@ def test_no_index_symbolic(self): # Ensure both code generation and jitting work op = Operator(eq) - op.cfunction + _ = op.cfunction @pytest.mark.parametrize('value', [0, 1]) def test_constant_as_condition(self, value): @@ -1492,8 +1492,8 @@ def test_stepping_dim_in_condition_lowering(self): op.apply(time_M=threshold+3) assert np.all(g.data[0, :, :] == threshold) assert np.all(g.data[1, :, :] == threshold + 1) - assert 'if (g[t0][x + 1][y + 1] <= 10)\n' - '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode) + assert 'if (g[t0][x + 1][y + 1] <= 10)\n' + \ + '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode) def test_expr_like_lowering(self): """ @@ -1689,7 +1689,7 @@ def test_sparse_time_function(self): shape = (21, 21, 21) origin = (0., 0., 0.) spacing = (1., 1., 1.) - extent = tuple([d * (s - 1) for s, d in zip(shape, spacing)]) + extent = tuple([d * (s - 1) for s, d in zip(shape, spacing, strict=True)]) grid = Grid(shape=shape, extent=extent, origin=origin) time = grid.time_dim x, y, z = grid.dimensions @@ -1698,7 +1698,10 @@ def test_sparse_time_function(self): # Place source in the middle of the grid src_coords = np.empty((1, len(shape)), dtype=np.float32) - src_coords[0, :] = [o + d * (s-1)//2 for o, d, s in zip(origin, spacing, shape)] + src_coords[0, :] = [ + o + d * (s-1)//2 + for o, d, s in zip(origin, spacing, shape, strict=True) + ] src = SparseTimeFunction(name='src', grid=grid, npoint=1, nt=nt) src.data[:] = 1. src.coordinates.data[:] = src_coords[:] @@ -1746,7 +1749,7 @@ def test_issue_1435(self): Eq(f2[t5, t6, t7, t8], 2 * t9 + t10, implicit_dims=cd)]) # Check it compiles correctly! See issue report - op.cfunction + _ = op.cfunction @pytest.mark.parametrize('factor', [ 4, @@ -1859,7 +1862,7 @@ def test_diff_guards_halts_topofuse(self): op = Operator(eqns) - op.cfunction + _ = op.cfunction assert_structure(op, ['t', 't,x', 't,x'], 't,x,x') @@ -2029,7 +2032,7 @@ def test_shifted_minmax(self): for d in grid.dimensions] eqn = Eq(v, u) - eqn = eqn.xreplace(dict(zip(grid.dimensions, subdims))) + eqn = eqn.xreplace(dict(zip(grid.dimensions, subdims, strict=True))) op = Operator(eqn) diff --git a/tests/test_dle.py b/tests/test_dle.py index 88c63a1c4b..cb01322f22 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -21,8 +21,8 @@ def get_blocksizes(op, opt, grid, blockshape, level=0): - blocksizes = {'%s0_blk%d_size' % (d, level): v - for d, v in zip(grid.dimensions, blockshape)} + blocksizes = {f'{d}0_blk{level}_size': v + for d, v in zip(grid.dimensions, blockshape, strict=True)} blocksizes = {k: v for k, v in blocksizes.items() if k in op._known_arguments} # Sanity check if grid.dim == 1 or len(blockshape) == 0: @@ -171,7 +171,7 @@ def test_cache_blocking_structure_distributed(mode): eqns += [Eq(U.forward, U.dx + u.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction bns0, _ = assert_blocking(op._func_table['compute0'].root, {'x0_blk0'}) bns1, _ = assert_blocking(op._func_table['compute2'].root, {'x1_blk0'}) @@ -340,11 +340,11 @@ def test_structure(self, par_tile, expected): bns, _ = assert_blocking(op, {'x0_blk0', 'x1_blk0'}) assert len(bns) == len(expected) - for root, v in zip(bns.values(), expected): + for root, v in zip(bns.values(), expected, strict=True): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == len(v) - assert all(i.step == j for i, j in zip(iters, v)) + assert all(i.step == j for i, j in zip(iters, v, strict=True)) def test_structure_2p5D(self): grid = Grid(shape=(80, 80, 80)) @@ -396,7 +396,7 @@ def test_custom_rule0(self): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == 3 - assert all(i.step == j for i, j in zip(iters, par_tile)) + assert all(i.step == j for i, j in zip(iters, par_tile, strict=True)) def test_custom_rule1(self): grid = Grid(shape=(8, 8, 8)) @@ -426,7 +426,7 @@ def test_custom_rule1(self): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == 3 - assert all(i.step == j for i, j in zip(iters, par_tile)) + assert all(i.step == j for i, j in zip(iters, par_tile, strict=True)) @pytest.mark.parametrize("shape", [(10,), (10, 45), (20, 33), (10, 31, 45), (45, 31, 45)]) @@ -508,8 +508,8 @@ def test_cache_blocking_hierarchical(blockshape0, blockshape1, exception): assert np.allclose(wo_blocking, w_blocking, rtol=1e-12) except InvalidArgument: assert exception - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize("blockinner", [False, True]) @@ -533,7 +533,7 @@ def test_cache_blocking_imperfect_nest(blockinner): trees = retrieve_iteration_tree(bns['x0_blk0']) assert len(trees) == 2 assert len(trees[0]) == len(trees[1]) - assert all(i is j for i, j in zip(trees[0][:4], trees[1][:4])) + assert all(i is j for i, j in zip(trees[0][:4], trees[1][:4], strict=True)) assert trees[0][4] is not trees[1][4] assert trees[0].root.dim.is_Block assert trees[1].root.dim.is_Block @@ -581,7 +581,7 @@ def test_cache_blocking_imperfect_nest_v2(blockinner): trees = retrieve_iteration_tree(bns['x0_blk0']) assert len(trees) == 2 assert len(trees[0]) == len(trees[1]) - assert all(i is j for i, j in zip(trees[0][:2], trees[1][:2])) + assert all(i is j for i, j in zip(trees[0][:2], trees[1][:2], strict=True)) assert trees[0][2] is not trees[1][2] assert trees[0].root.dim.is_Block assert trees[1].root.dim.is_Block @@ -710,7 +710,7 @@ def test_iterations_ompized(self, exprs, expected): assert len(iterations) == len(expected) # Check for presence of pragma omp - for i, j in zip(iterations, expected): + for i, j in zip(iterations, expected, strict=True): pragmas = i.pragmas if j is True: assert len(pragmas) == 1 @@ -776,11 +776,11 @@ def test_collapsing(self, eqns, expected, blocking): assert len(iterations) == len(expected) # Check for presence of pragma omp + collapse clause - for i, j in zip(iterations, expected): + for i, j in zip(iterations, expected, strict=True): if j > 0: assert len(i.pragmas) == 1 pragma = i.pragmas[0] - assert 'omp for collapse(%d)' % j in pragma.ccode.value + assert f'omp for collapse({j})' in pragma.ccode.value else: for k in i.pragmas: assert 'omp for collapse' not in k.ccode.value diff --git a/tests/test_dse.py b/tests/test_dse.py index ca0bb46d47..24eb0cda2b 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -50,7 +50,9 @@ def test_scheduling_after_rewrite(): trees = retrieve_iteration_tree(op) # Check loop nest structure - assert all(i.dim is j for i, j in zip(trees[0], grid.dimensions)) # time invariant + assert all( + i.dim is j for i, j in zip(trees[0], grid.dimensions, strict=True) + ) # time invariant assert trees[1].root.dim is grid.time_dim assert all(trees[1].root.dim is tree.root.dim for tree in trees[1:]) @@ -703,8 +705,8 @@ def test_min_storage_in_isolation(self): Operator(eqn, opt=('advanced-fsg', {'openmp': True, 'min-storage': True})) except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Check that `cire-rotate=True` has no effect in this code has there's # no blocking @@ -2132,7 +2134,7 @@ def test_sum_of_nested_derivatives(self, expr, exp_arrays, exp_ops): # Also check against expected operation count to make sure # all redundancies have been detected correctly for i, expected in enumerate(as_tuple(exp_ops[n])): - assert summary[('section%d' % i, None)].ops == expected + assert summary[(f'section{i}', None)].ops == expected def test_derivatives_from_different_levels(self): """ @@ -2551,7 +2553,7 @@ def test_invariants_with_conditional(self): op = Operator(eqn, opt='advanced') assert_structure(op, ['t', 't,fd', 't,fd,x,y'], 't,fd,x,y') # Make sure it compiles - op.cfunction + _ = op.cfunction # Check hoisting for time invariant eqn = Eq(u, u - (cos(time_sub * factor * f) * sin(g) * uf)) @@ -2559,7 +2561,7 @@ def test_invariants_with_conditional(self): op = Operator(eqn, opt='advanced') assert_structure(op, ['x,y', 't', 't,fd', 't,fd,x,y'], 'x,y,t,fd,x,y') # Make sure it compiles - op.cfunction + _ = op.cfunction def test_hoisting_pow_one(self): """ @@ -2675,7 +2677,7 @@ def test_space_and_time_invariant_together(self): op = Operator(eqn, opt=('advanced', {'openmp': False})) - op.cfunction + _ = op.cfunction assert_structure( op, diff --git a/tests/test_dtypes.py b/tests/test_dtypes.py index f30fbd7b2a..9e4872f058 100644 --- a/tests/test_dtypes.py +++ b/tests/test_dtypes.py @@ -94,7 +94,7 @@ def test_dtype_mapping(dtype: np.dtype[np.inexact], kwargs: dict[str, str], # Check ctypes of the mapped parameters params: dict[str, Basic] = {p.name: p for p in op.parameters} _u, _c = params['u'], params['c'] - assert type(_u.indexed._C_ctype._type_()) == ctypes_vector_mapper[dtype] + assert isinstance(_u.indexed._C_ctype._type_(), ctypes_vector_mapper[dtype]) assert _c._C_ctype == expected or ctypes_vector_mapper[dtype] @@ -125,7 +125,7 @@ def test_cse_ctypes(dtype: np.dtype[np.inexact], kwargs: dict[str, str]) -> None @pytest.mark.parametrize('dtype', [np.float32, np.complex64, np.complex128]) @pytest.mark.parametrize('kwargs', _configs, ids=kw_id) def test_complex_headers(dtype: np.dtype[np.inexact], kwargs: dict[str, str]) -> None: - np.dtype + _ = np.dtype """ Tests that the correct complex headers are included when complex dtypes are present in the operator, and omitted otherwise. diff --git a/tests/test_gpu_common.py b/tests/test_gpu_common.py index af5c71fc11..a639c5bbe6 100644 --- a/tests/test_gpu_common.py +++ b/tests/test_gpu_common.py @@ -509,7 +509,7 @@ def test_attempt_tasking_but_no_temporaries(self, opt): # a host Function piters = FindNodes(OmpIteration).visit(op) assert len(piters) == 1 - assert type(piters.pop()) == OmpIteration + assert isinstance(piters.pop(), OmpIteration) def test_tasking_multi_output(self): nt = 10 diff --git a/tests/test_gpu_openacc.py b/tests/test_gpu_openacc.py index 7086d365cd..ada536197e 100644 --- a/tests/test_gpu_openacc.py +++ b/tests/test_gpu_openacc.py @@ -61,8 +61,8 @@ def test_basic_customop(self): platform='nvidiaX', language='openacc', opt='openmp') except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize('opt', opts_device_tiling) def test_blocking(self, opt): @@ -166,11 +166,11 @@ def test_multi_tile_blocking_structure(self): 'acc parallel loop tile(32,4,4) present(u)' assert bns['x1_blk0'].pragmas[0].ccode.value ==\ 'acc parallel loop tile(16,4,4) present(u,v)' - for root, v in zip(bns.values(), expected): + for root, v in zip(bns.values(), expected, strict=True): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == len(v) - assert all(i.step == j for i, j in zip(iters, v)) + assert all(i.step == j for i, j in zip(iters, v, strict=True)) def test_std_max(self): grid = Grid(shape=(3, 3, 3)) diff --git a/tests/test_gpu_openmp.py b/tests/test_gpu_openmp.py index 679fee23b6..d6505a6f71 100644 --- a/tests/test_gpu_openmp.py +++ b/tests/test_gpu_openmp.py @@ -84,8 +84,8 @@ def test_basic_customop(self): Operator(Eq(u.forward, u + 1), language='openmp', opt='openacc') except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize('opt', opts_device_tiling) def test_blocking(self, opt): @@ -162,16 +162,21 @@ def test_multiple_loops(self): # Check `u` and `v` for i, f in enumerate([u, v], 1): - assert op.body.maps[i].ccode.value ==\ - (f'omp target enter data map(to: {f.name}[0:{f.name}_vec->size[0]]' - f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])') - assert op.body.unmaps[2*i + 0].ccode.value ==\ - (f'omp target update from({f.name}[0:{f.name}_vec->size[0]]' - f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])') - assert op.body.unmaps[2*i + 1].ccode.value ==\ - (f'omp target exit data map(release: {f.name}[0:{f.name}_vec->size[0]]' - f'[0:{f.name}_vec->size[1]][0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]]) ' - 'if(devicerm)') + assert op.body.maps[i].ccode.value == ( + f'omp target enter data map(to: {f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])' + ) + assert op.body.unmaps[2*i + 0].ccode.value == ( + f'omp target update from({f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])' + ) + assert op.body.unmaps[2*i + 1].ccode.value == ( + f'omp target exit data map(release: {f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]]) ' + 'if(devicerm)') # Check `f` assert op.body.maps[0].ccode.value ==\ diff --git a/tests/test_gradient.py b/tests/test_gradient.py index cd42b9b8c6..3ab672d73a 100644 --- a/tests/test_gradient.py +++ b/tests/test_gradient.py @@ -220,7 +220,7 @@ def test_gradientFWI(self, dtype, space_order, kernel, shape, ckp, setup_func, # Add the perturbation to the model def initializer(data): data[:] = np.sqrt(vel0.data**2 * v**2 / - ((1 - H[i]) * v**2 + H[i] * vel0.data**2)) + ((1 - H[i]) * v**2 + H[i] * vel0.data**2)) # noqa: B023 vloc = Function(name='vloc', grid=wave.model.grid, space_order=space_order, initializer=initializer) # Data for the new model @@ -280,7 +280,7 @@ def test_gradientJ(self, dtype, space_order, kernel, shape, spacing, time_order, # Add the perturbation to the model def initializer(data): data[:] = np.sqrt(v0.data**2 * v**2 / - ((1 - H[i]) * v**2 + H[i] * v0.data**2)) + ((1 - H[i]) * v**2 + H[i] * v0.data**2)) # noqa: B023 vloc = Function(name='vloc', grid=wave.model.grid, space_order=space_order, initializer=initializer) # Data for the new model diff --git a/tests/test_iet.py b/tests/test_iet.py index 7f0675eaf0..7bc4f1e709 100644 --- a/tests/test_iet.py +++ b/tests/test_iet.py @@ -77,11 +77,11 @@ def test_make_efuncs(exprs, nfuncs, ntimeiters, nests): efuncs = [] for n, tree in enumerate(retrieve_iteration_tree(op)): root = filter_iterations(tree, key=lambda i: i.dim.is_Space)[0] - efuncs.append(make_efunc('f%d' % n, root)) + efuncs.append(make_efunc(f'f{n}', root)) assert len(efuncs) == len(nfuncs) == len(ntimeiters) == len(nests) - for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests): + for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests, strict=True): # Check the `efunc` parameters assert all(i in efunc.parameters for i in (x.symbolic_min, x.symbolic_max)) assert all(i in efunc.parameters for i in (y.symbolic_min, y.symbolic_max)) @@ -98,7 +98,7 @@ def test_make_efuncs(exprs, nfuncs, ntimeiters, nests): trees = retrieve_iteration_tree(efunc) assert len(trees) == 1 tree = trees[0] - assert all(i.dim.name == j for i, j in zip(tree, nest)) + assert all(i.dim.name == j for i, j in zip(tree, nest, strict=True)) assert efunc.make_call() diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index cdb6f7773f..1cde2e13e2 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -466,7 +466,7 @@ def test_multi_inject(shape, coords, nexpr, result, npoints=19): indices = [slice(4, 6, 1) for _ in coords] indices[0] = slice(1, -1, 1) result = (result, result) if nexpr == 1 else (result, 2 * result) - for r, a in zip(result, (a1, a2)): + for r, a in zip(result, (a1, a2), strict=True): assert np.allclose(a.data[indices], r, rtol=1.e-5) @@ -628,7 +628,7 @@ def test_edge_sparse(): sf1.coordinates.data[0, :] = (25.0, 35.0) expr = sf1.interpolate(u) - subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing)} + subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing, strict=True)} op = Operator(expr, subs=subs) op() @@ -766,7 +766,7 @@ def test_interpolation_radius(r, interp): r=r, interpolation=interp) try: src.interpolate(u) - assert False + raise AssertionError('Assert False') except ValueError: assert True diff --git a/tests/test_ir.py b/tests/test_ir.py index 684a7713d6..16440ec54a 100644 --- a/tests/test_ir.py +++ b/tests/test_ir.py @@ -172,11 +172,11 @@ def test_iteration_instance_arithmetic(self, x, y, ii_num, ii_literal): for ii in [fax, fa4]: try: ii + fcx1y - assert False + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_iteration_instance_distance(self, ii_num, ii_literal): """ @@ -197,11 +197,11 @@ def test_iteration_instance_distance(self, ii_num, ii_literal): # Should fail due mismatching indices try: fcxy.distance(fax) - assert False + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_iteration_instance_cmp(self, ii_num, ii_literal): """ @@ -219,14 +219,14 @@ def test_iteration_instance_cmp(self, ii_num, ii_literal): assert fc23 > fc00 assert fc00 >= fc00 - # Lexicographic comparison with numbers but different rank should faxl + # Lexicographic comparison with numbers but different rank should fail try: - fa4 > fc23 - assert False + fa4 > fc23 # noqa: B015 + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Lexicographic comparison with literals assert fcxy <= fcxy @@ -315,30 +315,30 @@ def test_timed_access_cmp(self, ta_literal): # Non-comparable due to different direction try: - rev_tcxy_w0 > tcxy_r0 - assert False + rev_tcxy_w0 > tcxy_r0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Non-comparable due to different aindices try: - tcxy_w0 > tcyx_irr0 - assert False + tcxy_w0 > tcyx_irr0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Non-comparable due to mismatching Intervals try: - tcxy_w0 > tcyx_irr0 - assert False + tcxy_w0 > tcyx_irr0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Comparable even though the TimedAccess is irregular (reflexivity) assert tcyx_irr0 >= tcyx_irr0 @@ -445,12 +445,12 @@ def test_intervals_union(self, x, y): for i, j in [(ix, nully), (ix, iy), (iy, ix), (ix, ixs1), (ixs1, ix)]: try: i.union(j) - assert False # Shouldn't arrive here + raise AssertionError('Assert False') # Shouldn't arrive here except ValueError: assert True - except: + except Exception as e: # No other types of exception expected - assert False + raise AssertionError('Assert False') from e # Mixed symbolic and non-symbolic c = Constant(name='c') diff --git a/tests/test_mpi.py b/tests/test_mpi.py index 25d76fe82e..9159071997 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -167,7 +167,9 @@ def test_ctypes_neighborhood(self, mode): (0, 1, PN, 2, 3, PN, PN, PN, PN)] } - mapper = dict(zip(attrs, expected[distributor.nprocs][distributor.myrank])) + mapper = dict(zip( + attrs, expected[distributor.nprocs][distributor.myrank], strict=True + )) obj = distributor._obj_neighborhood value = obj._arg_defaults()[obj.name] assert all(getattr(value._obj, k) == v for k, v in mapper.items()) @@ -357,8 +359,13 @@ def define(self, dimensions): md = MyDomain(grid=grid) d = md.distributor - for dec, pdec, sdi, sh in zip(d.decomposition, d.parent.decomposition, - d.subdomain_interval, grid.shape): + for dec, pdec, sdi, sh in zip( + d.decomposition, + d.parent.decomposition, + d.subdomain_interval, + grid.shape, + strict=True + ): # Get the global min and max lower_bounds = [np.amin(i) for i in dec if i.size != 0] upper_bounds = [np.amax(i) for i in dec if i.size != 0] @@ -578,8 +585,14 @@ def test_local_indices(self, shape, expected, mode): grid = Grid(shape=shape) f = Function(name='f', grid=grid) - assert all(i == slice(*j) - for i, j in zip(f.local_indices, expected[grid.distributor.myrank])) + assert all( + i == slice(*j) + for i, j in zip( + f.local_indices, + expected[grid.distributor.myrank], + strict=True + ) + ) @pytest.mark.parallel(mode=4) @pytest.mark.parametrize('shape', [(1,), (2, 3), (4, 5, 6)]) @@ -1498,7 +1511,7 @@ def test_avoid_merging_if_diff_functions(self, mode): eqns += [Eq(U.forward, U.dx + u.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction check_halo_exchanges(op, 2, 2) @@ -1521,7 +1534,7 @@ def test_merge_haloupdate_if_diff_locindices(self, mode): op = Operator(eqns) assert len(FindNodes(HaloUpdateCall).visit(op)) == 1 - op.cfunction + _ = op.cfunction @pytest.mark.parallel(mode=2) def test_merge_and_hoist_haloupdate_if_diff_locindices(self, mode): @@ -1601,7 +1614,7 @@ def test_merge_haloupdate_if_diff_but_equivalent_locindices(self, mode): rec.interpolate(expr=v1)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 2, 2) for i, v in enumerate([v2, v1]): @@ -1794,7 +1807,7 @@ def test_min_code_size(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls = FindNodes(Call).visit(op) @@ -1834,7 +1847,7 @@ def test_many_functions(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls = FindNodes(Call).visit(op) assert len(calls) == 2 @@ -1932,7 +1945,7 @@ def test_haloupdate_buffer_cases(self, sz, fwd, expr, exp0, exp1, args, mode): eval(expr)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, exp0, exp1) for i, v in enumerate(args): @@ -1954,7 +1967,7 @@ def test_avoid_hoisting_if_antidep(self, mode): Eq(v3, v2.laplace + v1)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 3, 2) # More specifically, we ensure HaloSpot(v2) is on the last loop nest @@ -1975,7 +1988,7 @@ def test_hoist_haloupdate_if_in_the_middle(self, mode): rec.interpolate(expr=v1.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 3, 2) assert calls[0].arguments[0] is v2 @@ -2011,7 +2024,7 @@ def test_merge_smart_if_within_conditional(self, mode): eq1 = Eq(f.backward, f.laplace + .002) op1 = Operator(rec + [eq1]) - op1.cfunction + _ = op1.cfunction check_halo_exchanges(op1, 1, 1) @@ -3127,7 +3140,7 @@ def test_interpolation_at_uforward(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 2, 1) args = calls[0].arguments @@ -3280,7 +3293,7 @@ def test_elastic_structure(self, mode): u_t = Eq(tau.forward, damp * solve(pde_tau, tau.forward)) op = Operator([u_v] + [u_t] + rec_term) - op.cfunction + _ = op.cfunction assert len(op._func_table) == 11 @@ -3350,7 +3363,7 @@ def test_issue_2448_v1(self, mode, setup): rec_term1 = rec.interpolate(expr=v.forward) op1 = Operator([u_v, u_tau, rec_term1]) - op1.cfunction + _ = op1.cfunction calls, _ = check_halo_exchanges(op1, 2, 2) assert calls[0].arguments[0] is tau @@ -3411,7 +3424,7 @@ def test_issue_2448_v3(self, mode, setup): rec_term3 = rec2.interpolate(expr=v2.forward) op3 = Operator([u_v, u_v2, u_tau, u_tau2, rec_term0, rec_term3]) - op3.cfunction + _ = op3.cfunction calls = [i for i in FindNodes(Call).visit(op3) if isinstance(i, HaloUpdateCall)] @@ -3493,7 +3506,7 @@ def get_time_loop(op): for i in iters: if i.dim.is_Time: return i - assert False + raise AssertionError('Assert False') if __name__ == "__main__": diff --git a/tests/test_operator.py b/tests/test_operator.py index 660192d52d..3e417d74b8 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -69,7 +69,7 @@ def test_platform_compiler_language(self): # Unrecognised platform name -> exception try: Operator(Eq(u, u + 1), platform='asga') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -93,7 +93,7 @@ def test_platform_compiler_language(self): # ... but it will raise an exception if an unknown one try: Operator(Eq(u, u + 1), platform='nvidiaX', compiler='asf') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -107,7 +107,7 @@ def test_platform_compiler_language(self): # Unsupported combination of `platform` and `language` should throw an error try: Operator(Eq(u, u + 1), platform='bdw', language='openacc') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -123,14 +123,14 @@ def test_opt_options(self): # Unknown pass try: Operator(Eq(u, u + 1), opt=('aaa')) - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True # Unknown optimization option try: Operator(Eq(u, u + 1), opt=('advanced', {'aaa': 1})) - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -302,7 +302,7 @@ def test_timedlist_wraps_time_if_parallel(self): ompreg = timedlist.body[0] assert ompreg.body[0].dim is grid.time_dim else: - timedlist.body[0].dim is grid.time_dim + timedlist.body[0].dim is grid.time_dim # noqa: B015 def test_nested_lowering(self): """ @@ -1172,7 +1172,7 @@ def test_argument_unknown(self): op = Operator(Eq(a, a + a)) try: op.apply(b=3) - assert False + raise AssertionError('Assert False') except ValueError: # `b` means nothing to `op`, so we end up here assert True @@ -1181,9 +1181,9 @@ def test_argument_unknown(self): configuration['ignore-unknowns'] = True op.apply(b=3) assert True - except ValueError: + except ValueError as e: # we should not end up here as we're now ignoring unknown arguments - assert False + raise AssertionError('Assert False') from e finally: configuration['ignore-unknowns'] = configuration._defaults['ignore-unknowns'] @@ -1225,11 +1225,11 @@ def test_illegal_override(self): try: op.apply(a=a1, b=b0) - assert False + raise AssertionError('Assert False') except ValueError as e: assert 'Override' in e.args[0] # Check it's hitting the right error msg - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_incomplete_override(self): """ @@ -1248,11 +1248,11 @@ def test_incomplete_override(self): try: op.apply(a=a1) - assert False + raise AssertionError('Assert False') except ValueError as e: assert 'Default' in e.args[0] # Check it's hitting the right error msg - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parallel(mode=1) def test_new_distributor(self, mode): @@ -1632,7 +1632,10 @@ def test_consistency_anti_dependences(self, exprs, directions, expected, visit): assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit # mapper just makes it quicker to write out the test parametrization mapper = {'+': Forward, '-': Backward, '*': Any} - assert all(i.direction == mapper[j] for i, j in zip(iters, directions)) + assert all( + i.direction == mapper[j] + for i, j in zip(iters, directions, strict=True) + ) def test_expressions_imperfect_loops(self): """ @@ -1749,7 +1752,10 @@ def test_equations_mixed_functions(self, shape): Eq(b, time*b*a + b)] eqns2 = [Eq(a.forward, a.laplace + 1.), Eq(b2, time*b2*a + b2)] - subs = {d.spacing: v for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim])} + subs = { + d.spacing: v + for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim], strict=True) + } op = Operator(eqns, subs=subs, opt='noop') trees = retrieve_iteration_tree(op) diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 09b18031fe..407e0433ff 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -388,8 +388,8 @@ def test_lock(self, pickle): pkl_lock = pickle.dumps(lock) new_lock = pickle.loads(pkl_lock) - lock.name == new_lock.name - new_lock.dimensions[0].symbolic_size == ld.symbolic_size + assert lock.name == new_lock.name + assert new_lock.dimensions[0].symbolic_size == ld.symbolic_size def test_p_thread_array(self, pickle): a = PThreadArray(name='threads', npthreads=4) @@ -683,7 +683,7 @@ def test_foreign(self): coordinates=[(0.,), (1.,), (2.,)]) # Plain `pickle` doesn't support pickling of dynamic classes - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 pickle0.dumps(msf) # But `cloudpickle` does @@ -773,7 +773,7 @@ def test_operator_function(self, pickle): def test_operator_function_w_preallocation(self, pickle): grid = Grid(shape=(3, 3, 3)) f = Function(name='f', grid=grid) - f.data + _ = f.data op = Operator(Eq(f, f + 1)) op.apply() @@ -804,7 +804,7 @@ def test_operator_timefunction(self, pickle): def test_operator_timefunction_w_preallocation(self, pickle): grid = Grid(shape=(3, 3, 3)) f = TimeFunction(name='f', grid=grid, save=3) - f.data + _ = f.data op = Operator(Eq(f.forward, f + 1)) op.apply(time=0) @@ -849,8 +849,8 @@ def test_elemental(self, pickle): pkl_op = pickle.dumps(op) new_op = pickle.loads(pkl_op) - op.cfunction - new_op.cfunction + _ = op.cfunction + _ = new_op.cfunction assert str(op) == str(new_op) @@ -950,7 +950,11 @@ def test_mpi_fullmode_objects(self, pickle, mode): assert obj.key == new_obj.key assert obj.name == new_obj.name assert len(new_obj.arguments) == 2 - assert all(d0.name == d1.name for d0, d1 in zip(obj.arguments, new_obj.arguments)) + assert all( + d0.name == d1.name for d0, d1 in zip( + obj.arguments, new_obj.arguments, strict=True + ) + ) assert all(new_obj.arguments[i] is new_obj.owned[i][0][0][0] # `x` and `y` for i in range(2)) assert new_obj.owned[0][0][0][1] is new_obj.owned[1][0][0][1] # `OWNED` @@ -1014,7 +1018,7 @@ def test_full_model(self, pickle): pkl_origin = pickle.dumps(model.grid.origin_symbols) new_origin = pickle.loads(pkl_origin) - for a, b in zip(model.grid.origin_symbols, new_origin): + for a, b in zip(model.grid.origin_symbols, new_origin, strict=True): assert a.compare(b) == 0 # Test Class TimeDimension pickling @@ -1037,7 +1041,7 @@ def test_full_model(self, pickle): assert model.grid.extent == new_grid.extent assert model.grid.shape == new_grid.shape - for a, b in zip(model.grid.dimensions, new_grid.dimensions): + for a, b in zip(model.grid.dimensions, new_grid.dimensions, strict=True): assert a.compare(b) == 0 ricker = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range) @@ -1073,7 +1077,9 @@ def test_usave_sampled(self, pickle, subs): op_fwd = Operator(eqn, subs=subs) tmp_pickle_op_fn = "tmp_operator.pickle" - pickle.dump(op_fwd, open(tmp_pickle_op_fn, "wb")) - op_new = pickle.load(open(tmp_pickle_op_fn, "rb")) + with open(tmp_pickle_op_fn, "wb") as fh: + pickle.dump(op_fwd, fh) + with open(tmp_pickle_op_fn, "rb") as fh: + op_new = pickle.load(fh) assert str(op_fwd) == str(op_new) diff --git a/tests/test_sparse.py b/tests/test_sparse.py index b8fab00c2d..578c4b16a8 100644 --- a/tests/test_sparse.py +++ b/tests/test_sparse.py @@ -193,7 +193,7 @@ def _pure_python_coeffs(self, mstf): m_coo = mstf.matrix.tocoo() - for row, col, val in zip(m_coo.row, m_coo.col, m_coo.data): + for row, col, val in zip(m_coo.row, m_coo.col, m_coo.data, strict=True): base_gridpoint = mstf.gridpoints.data[row, :] # construct the stencil and the slices to which it will be applied @@ -387,12 +387,12 @@ def test_mpi(self, mode): op = Operator(sf.interpolate(m)) sf.manual_scatter() args = op.arguments(time_m=0, time_M=9) - print("rank %d: %s" % (grid.distributor.myrank, str(args))) + print(f'rank {grid.distributor.myrank}: {args!s}') op.apply(time_m=0, time_M=0) sf.manual_gather() for i in range(grid.distributor.nprocs): - print("==== from rank %d" % i) + print(f'==== from rank {i}') if i == grid.distributor.myrank: print(repr(sf.data)) grid.distributor.comm.Barrier() diff --git a/tests/test_staggered_utils.py b/tests/test_staggered_utils.py index b522b70d3f..daec7f2108 100644 --- a/tests/test_staggered_utils.py +++ b/tests/test_staggered_utils.py @@ -50,7 +50,9 @@ def test_avg(ndim): shifted = f for dd in d: shifted = shifted.subs({dd: dd - dd.spacing/2}) - assert all(i == dd for i, dd in zip(shifted.indices, grid.dimensions)) + assert all( + i == dd for i, dd in zip(shifted.indices, grid.dimensions, strict=True) + ) # Average automatically i.e.: # f not defined at x so f(x, y) = 0.5*f(x - h_x/2, y) + 0.5*f(x + h_x/2, y) avg = f @@ -180,7 +182,7 @@ def test_staggered_rebuild(stagg): # Check that rebuild correctly set the staggered indices # with the new dimensions - for (d, nd) in zip(grid.dimensions, new_dims): + for (d, nd) in zip(grid.dimensions, new_dims, strict=True): if d in as_tuple(stagg) or stagg is CELL: assert f2.indices[nd] == nd + nd.spacing / 2 else: diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index 72c9a320a7..2fafd4e02b 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -536,7 +536,7 @@ class DummySubdomains(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y'], 'x,y,t,n0,x,y') @@ -572,7 +572,7 @@ class DummySubdomains2(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y', 't,n1', 't,n1,x,y'], @@ -607,7 +607,7 @@ class DummySubdomains2(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y', 't,n1', 't,n1,x,y', 't,n0', 't,n0,x,y'], @@ -633,7 +633,7 @@ class Dummy(SubDomainSet): op = Operator(eqn) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t,n0', 't,n0,x,y', 't,n0,x,y'], 't,n0,x,y,x,y') @@ -660,7 +660,7 @@ class Dummy(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t', 't,n0', 't,n0,x,y', 't,n0', 't,n0,x,y'], 't,n0,x,y,n0,x,y') @@ -680,7 +680,7 @@ class Dummy(SubDomainSet): op = Operator(eqn) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t,n0', 't,n0,x0_blk0,y0_blk0,x,y,z'], 't,n0,x0_blk0,y0_blk0,x,y,z') @@ -1527,7 +1527,7 @@ def test_function_data_shape_mpi(self, x, y, mode): assert np.count_nonzero(g.data) == f.data.size shape = [] - for i, s in zip(f._distributor.subdomain_interval, slices): + for i, s in zip(f._distributor.subdomain_interval, slices, strict=True): if i is None: shape.append(s.stop - s.start) else: diff --git a/tests/test_symbolics.py b/tests/test_symbolics.py index 5945e7ce9a..85d2818940 100644 --- a/tests/test_symbolics.py +++ b/tests/test_symbolics.py @@ -216,7 +216,7 @@ def test_bundle(): fg = Bundle(name='fg', components=(f, g)) # Test reconstruction - fg._rebuild().components == fg.components + assert fg._rebuild().components == fg.components def test_call_from_pointer(): diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 8f9d890a2c..512c48cf55 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -179,7 +179,7 @@ def test_transpose_vs_T(func1): # inner=True is the same as T assert f3 == f2 # inner=False doesn't transpose inner derivatives - for f4i, f2i in zip(f4, f2): + for f4i, f2i in zip(f4, f2, strict=True): assert f4i == f2i.T @@ -188,7 +188,7 @@ def test_transpose_vs_T(func1): def test_tensor_fd(func1): grid = Grid(tuple([5]*3)) f1 = func1(name="f1", grid=grid) - assert np.all([f.dx == f2 for f, f2 in zip(f1, f1.dx)]) + assert np.all([f.dx == f2 for f, f2 in zip(f1, f1.dx, strict=True)]) @pytest.mark.parametrize('func1, symm, diag, expected', @@ -229,8 +229,8 @@ def test_sympy_matrix(func1): sympy_f1 = f1.as_mutable() vec = sympy.Matrix(3, 1, np.random.rand(3)) mat = sympy.Matrix(3, 3, np.random.rand(3, 3).ravel()) - assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1)) - assert all(sp - dp == 0 for sp, dp in zip(f1 * vec, sympy_f1 * vec)) + assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1, strict=True)) + assert all(sp - dp == 0 for sp, dp in zip(f1 * vec, sympy_f1 * vec, strict=True)) @pytest.mark.parametrize('func1', [VectorFunction, VectorTimeFunction]) @@ -241,7 +241,7 @@ def test_sympy_vector(func1): sympy_f1 = f1.as_mutable() mat = sympy.Matrix(3, 3, np.random.rand(3, 3).ravel()) - assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1)) + assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1, strict=True)) @pytest.mark.parametrize('func1', [TensorFunction, TensorTimeFunction]) @@ -331,7 +331,7 @@ def test_shifted_div_of_tensor(shift, ndim): df = div(f, shift=shift, order=order).evaluate ref = [] - for i, a in enumerate(grid.dimensions): + for i, _ in enumerate(grid.dimensions): elems = [] for j, d in reversed(list(enumerate(grid.dimensions))): x0 = (None if shift is None else d + shift[i][j] * d.spacing if @@ -369,7 +369,7 @@ def test_shifted_lap_of_vector(shift, ndim): assert v.laplacian() == v.laplace for order in [None, 2]: df = v.laplacian(shift=shift, order=order) - for (vi, dfvi) in zip(v, df): + for (vi, dfvi) in zip(v, df, strict=True): ref = vi.laplacian(shift=shift, order=order) assert dfvi == ref @@ -398,10 +398,10 @@ def test_basic_arithmetic(): # Scalar operations t1 = tau + 1 - assert all(t1i == ti + 1 for (t1i, ti) in zip(t1, tau)) + assert all(t1i == ti + 1 for (t1i, ti) in zip(t1, tau, strict=True)) t1 = tau * 2 - assert all(t1i == ti * 2 for (t1i, ti) in zip(t1, tau)) + assert all(t1i == ti * 2 for (t1i, ti) in zip(t1, tau, strict=True)) def test_custom_coeffs_vector(): @@ -430,7 +430,7 @@ def test_custom_coeffs_tensor(): c = [10, 10, 10] dtau = div(tau, weights=c) - for i, d in enumerate(grid.dimensions): + for i, _ in enumerate(grid.dimensions): assert dtau[i] == tau[i, 0].dx(w=c) + tau[i, 1].dy(w=c) + tau[i, 2].dz(w=c) assert list(dtau[i].args[0].weights) == c @@ -451,7 +451,7 @@ def test_custom_coeffs_tensor_basic(func): c = [10, 20, 30] df = f.dx(w=c) - for (fi, dfi) in zip(f.values(), df.values()): + for (fi, dfi) in zip(f.values(), df.values(), strict=True): assert dfi == fi.dx(w=c) assert list(dfi.weights) == c @@ -465,7 +465,7 @@ def test_rebuild(func1): assert f1.grid == f2.grid assert f2.name == 'f2' - for (i, j) in zip(f1.flat(), f2.flat()): + for (i, j) in zip(f1.flat(), f2.flat(), strict=True): assert j.name == i.name.replace('f1', 'f2') assert j.grid == i.grid assert j.dimensions == i.dimensions @@ -477,7 +477,7 @@ def test_rebuild(func1): assert f3.grid == grid assert f3.name == f1.name - for (i, j) in zip(f1.flat(), f3.flat()): + for (i, j) in zip(f1.flat(), f3.flat(), strict=True): assert j.name == i.name assert j.grid == i.grid assert j.dimensions == tuple(new_dims) diff --git a/tests/test_threading.py b/tests/test_threading.py index 1c753a16a9..3bc0cce3f0 100644 --- a/tests/test_threading.py +++ b/tests/test_threading.py @@ -16,7 +16,7 @@ def test_concurrent_executing_operators(): op = Operator(Eq(u.forward, u + 1)) # this forces the compile - op.cfunction + _ = op.cfunction def do_run(op): # choose a new size @@ -42,7 +42,7 @@ def do_run(op): info("Running operator in threadpool") futures = [] - for i in range(1000): + for _ in range(1000): futures.append(tpe.submit(do_run, op)) # Get results - exceptions will be raised here if there are any diff --git a/tests/test_unexpansion.py b/tests/test_unexpansion.py index d5ef86c7c4..026effcf79 100644 --- a/tests/test_unexpansion.py +++ b/tests/test_unexpansion.py @@ -41,16 +41,16 @@ def test_numeric_coeffs(self): w = np.zeros(3) # Pure derivative - Operator(Eq(u, u.dx2(weights=w)), opt=opt).cfunction + _ = Operator(Eq(u, u.dx2(weights=w)), opt=opt).cfunction # Mixed derivative - Operator(Eq(u, u.dx.dx), opt=opt).cfunction + _ = Operator(Eq(u, u.dx.dx), opt=opt).cfunction # Non-perfect mixed derivative - Operator(Eq(u, (u.dx(weights=w) + v.dx).dx), opt=opt).cfunction + _ = Operator(Eq(u, (u.dx(weights=w) + v.dx).dx), opt=opt).cfunction # Compound expression - Operator(Eq(u, (v*u.dx).dy(weights=w)), opt=opt).cfunction + _ = Operator(Eq(u, (v*u.dx).dy(weights=w)), opt=opt).cfunction @pytest.mark.parametrize('coeffs,expected', [ ((7, 7, 7), 3), # We've had a bug triggered by identical coeffs @@ -71,7 +71,7 @@ def test_multiple_cross_derivs(self, coeffs, expected): p.dx(weights=coeffs0).dy(weights=coeffs1)) op = Operator(eq, opt=('advanced', {'expand': False})) - op.cfunction + _ = op.cfunction # w0, w1, ... functions = FindSymbols().visit(op) @@ -244,7 +244,7 @@ def test_v3(self): 'cire-mingain': 200})) # Check generated code -- redundant IndexDerivatives have been caught! - op1._profiler._sections['section0'].sops == 65 + assert op1._profiler._sections['section0'].sops == 65 op0.apply(time_M=5) op1.apply(time_M=5, u=u1, v=v1) @@ -269,7 +269,7 @@ def test_v4(self): 't,x0_blk0,y0_blk0,x,y,z,i1,i0'], 'x,y,z,t,x0_blk0,y0_blk0,x,y,z,i1,i0') - op.cfunction + _ = op.cfunction def test_v5(self): grid = Grid(shape=(16, 16)) @@ -290,7 +290,7 @@ def test_v5(self): assert op._profiler._sections['section0'].sops == 127 assert_structure(op, ['t,x,y', 't,x,y,i1', 't,x,y,i1,i0'], 't,x,y,i1,i0') - op.cfunction + _ = op.cfunction def test_v6(self): grid = Grid(shape=(16, 16)) @@ -315,7 +315,7 @@ def test_v6(self): assert op._profiler._sections['section0'].sops == 133 assert_structure(op, ['t,x,y', 't,x,y,i1', 't,x,y,i1,i0'], 't,x,y,i1,i0') - op.cfunction + _ = op.cfunction def test_transpose(self): shape = (11, 11, 11) @@ -364,7 +364,7 @@ def test_redundant_derivatives(self): temps = [i for i in FindSymbols().visit(exprs) if isinstance(i, Symbol)] assert len(temps) == 2 + nlin - op.cfunction + _ = op.cfunction def test_buffering_timestencil(self): grid = Grid((11, 11)) @@ -441,7 +441,7 @@ def test_v1(self): 't,x0_blk0,y0_blk0,x,y,z,i1'], 'x,y,z,t,x0_blk0,y0_blk0,x,y,z,i0,x,y,z,i1') - op.cfunction + _ = op.cfunction def test_diff_first_deriv(self): grid = Grid(shape=(16, 16, 16)) diff --git a/tests/test_visitors.py b/tests/test_visitors.py index 28b61761e1..06eb933351 100644 --- a/tests/test_visitors.py +++ b/tests/test_visitors.py @@ -391,7 +391,7 @@ def test_map_nodes(block1): assert len(map_nodes.keys()) == 1 - for iters, (expr,) in map_nodes.items(): + for iters in map_nodes: # Replace the outermost `Iteration` with a `Call` callback = Callable('solver', iters[0], 'void', ()) processed = Transformer({iters[0]: Call(callback.name)}).visit(block1) diff --git a/tests/test_warnings.py b/tests/test_warnings.py index b3f9e22741..701554bd6e 100644 --- a/tests/test_warnings.py +++ b/tests/test_warnings.py @@ -62,11 +62,15 @@ class TestWarning: """ def test_raise(self): with pytest.warns(UserWarning): - warnings.warn('Let this be a warning to you') + warnings.warn('Let this be a warning to you', stacklevel=1) def test_raise_devito(self): with pytest.warns(DevitoWarning): - warnings.warn('Let this be another warning to you', DevitoWarning) + warnings.warn( + 'Let this be another warning to you', + DevitoWarning, + stacklevel=1 + ) def test_raise_devito_kw(self): with pytest.warns(DevitoWarning): @@ -74,4 +78,4 @@ def test_raise_devito_kw(self): def test_raise_from_custom(self, custom_warning): with pytest.warns(NewWarning): - warnings.warn(custom_warning) + warnings.warn(custom_warning, stacklevel=1) From a7e29846eb69081b6982e6a73078cbf737a3b8e4 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Mon, 5 Jan 2026 15:44:46 +0000 Subject: [PATCH 30/42] misc: Update precommit to include flake8 --- .pre-commit-config.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 284d1fcaa4..9ed8ec27f7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,13 +13,18 @@ repos: hooks: # Run isort to check only (don't modify files) - id: isort - args: [ --check-only ] + args: [--check-only, --filter-files] - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.14.4 hooks: # Run the linter to check only (don't modify files) - id: ruff-check + - repo: https://github.com/PyCQA/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + additional_dependencies: [flake8-pyproject] - repo: https://github.com/crate-ci/typos rev: v1.39.1 hooks: From e5e4557d11410e1d4ead17eba380e839016edbdc Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Mon, 5 Jan 2026 16:01:59 +0000 Subject: [PATCH 31/42] misc: Various fixes --- examples/seismic/tti/tti_example.py | 3 ++- .../tutorials/07.1_dispersion_relation.ipynb | 4 +-- tests/test_caching.py | 3 ++- tests/test_dimension.py | 25 +++++++++++++++++-- tests/test_dle.py | 6 +++-- 5 files changed, 33 insertions(+), 8 deletions(-) diff --git a/examples/seismic/tti/tti_example.py b/examples/seismic/tti/tti_example.py index 5e22901eb8..4fdeb96674 100644 --- a/examples/seismic/tti/tti_example.py +++ b/examples/seismic/tti/tti_example.py @@ -1,5 +1,6 @@ +from contextlib import suppress + import numpy as np -from contrextlib import suppress with suppress(ImportError): import pytest diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index efae154d9c..2a766121fc 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -1132,10 +1132,10 @@ "x = np.linspace(0, np.pi, 201)\n", "m = np.arange(1, len(fornberg) + 1)\n", "y_fornberg = - fornberg[0] - 2*np.sum(\n", - " [a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m, strict=True)], axis=0\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m, strict=False)], axis=0\n", ")\n", "y_drp1 = - drp_stencil1[0] - 2*np.sum(\n", - " [a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m, strict=True)], axis=0\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m, strict=False)], axis=0\n", ")\n", "\n", "fig, ax = plt.subplots(1, 2)\n", diff --git a/tests/test_caching.py b/tests/test_caching.py index 0cc49cc833..8bfaaae837 100644 --- a/tests/test_caching.py +++ b/tests/test_caching.py @@ -678,7 +678,8 @@ def test_sparse_function(self, operate_on_empty_cache): ncreated = 2+1+2+2+2+1+4 # Note that injection is now lazy so no new symbols should be created assert len(_SymbolCache) == cur_cache_size - _ = i.evaluate + # The expression is not redundant, but storing it changes the symbol count + i.evaluate # noqa: B018 assert len(_SymbolCache) == cur_cache_size + ncreated diff --git a/tests/test_dimension.py b/tests/test_dimension.py index 6c6a4c3d76..8c4781d58e 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -1492,8 +1492,29 @@ def test_stepping_dim_in_condition_lowering(self): op.apply(time_M=threshold+3) assert np.all(g.data[0, :, :] == threshold) assert np.all(g.data[1, :, :] == threshold + 1) - assert 'if (g[t0][x + 1][y + 1] <= 10)\n' + \ - '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode) + + # We want to assert that the following snippet: + # ``` + # if (g[t0][x + 1][y + 1] <= 10) + # { + # g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1 + # ``` + # is in the generated code, but indentation etc. seems to vary. + part1 = 'if (g[t0][x + 1][y + 1] <= 10)\n' + part2 = 'g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' + whole_code = str(op.ccode) + + try: + loc = whole_code.find(part1) + assert loc != -1 + assert whole_code.find(part2, loc + len(part1)) != -1 + except AssertionError: + # Try the alternative string + part1 = 'if (gL0(t0, x + 1, y + 1) <= 10)\n' + part2 = 'gL0(t1, x + 1, y + 1) = gL0(t0, x + 1, y + 1) + 1' + loc = whole_code.find(part1) + assert loc != -1 + assert whole_code.find(part2, loc + len(part1)) != -1 def test_expr_like_lowering(self): """ diff --git a/tests/test_dle.py b/tests/test_dle.py index cb01322f22..ced0ad9937 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -21,8 +21,10 @@ def get_blocksizes(op, opt, grid, blockshape, level=0): - blocksizes = {f'{d}0_blk{level}_size': v - for d, v in zip(grid.dimensions, blockshape, strict=True)} + blocksizes = { + f'{d}0_blk{level}_size': v + for d, v in zip(grid.dimensions, blockshape, strict=False) + } blocksizes = {k: v for k, v in blocksizes.items() if k in op._known_arguments} # Sanity check if grid.dim == 1 or len(blockshape) == 0: From 50719ac6a8035882b2c6016362618e5ea9a07d37 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 14:09:53 +0000 Subject: [PATCH 32/42] ci: Change linting order to run ruff before flake8 --- .github/workflows/lint.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ac81a989d4..d2495741ac 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -31,10 +31,6 @@ jobs: python -m pip install --upgrade pip pip install flake8-pyproject isort ruff - - name: Lint codebase with flake8 - run: | - flake8 --builtins=ArgumentError . - - name: Lint the Python imports with isort run: | isort --check-only . @@ -43,6 +39,10 @@ jobs: run: | ruff check --preview --output-format github + - name: Lint codebase with flake8 + run: | + flake8 --builtins=ArgumentError . + spellcheck: name: "Spellcheck everything" runs-on: ubuntu-latest From 757e597503554a52bfff82e6028187c44e139741 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 14:10:22 +0000 Subject: [PATCH 33/42] misc: Update contributing guidelines to include pre-commit --- CONTRIBUTING.md | 94 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 89a656450e..90fd23a031 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,10 +1,12 @@ # Contributing to Devito - We welcome third-party contributions, and we would love you to become an active contributor! -Software contributions are made via GitHub pull requests to https://github.com/devitocodes/devito. If you are planning a large contribution, we encourage you to engage with us frequently to ensure that your effort is well-directed. See below for more details. +Software contributions are made via GitHub pull requests to https://github.com/devitocodes/devito. +If you are planning a large contribution, we encourage you to engage with us frequently to ensure that your effort is well-directed. +See below for more details. -Devito is distributed under the MIT License, https://github.com/devitocodes/devito/blob/main/LICENSE.md. The act of submitting a pull request or patch (with or without an explicit Signed-off-by tag) will be understood as an affirmation of the following: +Devito is distributed under the MIT License, https://github.com/devitocodes/devito/blob/main/LICENSE.md. +The act of submitting a pull request or patch (with or without an explicit Signed-off-by tag) will be understood as an affirmation of the following: Developer's Certificate of Origin 1.1 @@ -33,13 +35,11 @@ Devito is distributed under the MIT License, https://github.com/devitocodes/devi this project or the open source license(s) involved. ### Reporting issues - There are several options: * Talk to us. You can join our Slack team via this [link](https://join.slack.com/t/devitocodes/shared_invite/zt-2hgp6891e-jQDcepOWPQwxL5JJegYKSA). Should you have installation issues, or should you bump into something that appears to be a Devito-related bug, do not hesitate to get in touch. We are always keen to help out. * File an issue on [our GitHub page](https://github.com/devitocodes/devito/issues). ### Making changes - First of all, read of [code of conduct](https://github.com/devitocodes/devito/blob/main/CODE_OF_CONDUCT.md) and make sure you agree with it. The protocol to propose a patch is: @@ -47,8 +47,8 @@ The protocol to propose a patch is: * As soon as you know what you need to do, [fork](https://help.github.com/articles/fork-a-repo/) Devito. * Create a branch with a suitable name. * Write code following the guidelines below. Commit your changes as small logical units. -* Commit messages must adhere to the format specified [here](https://github.com/devitocodes/devito/wiki/Tags-for-commit-messages-and-PR-titles). We may ask you to rebase the commit history if it looks too messy. -* Write tests to convince us and yourself that what you've done works as expected. Commit them. +* Commit messages must adhere to the format specified below. We may ask you to rebase the commit history if it looks too messy. +* Write tests to convince us and yourself that what you have done works as expected and commit them. * Run **the entire test suite**, including the new tests, to make sure that you haven't accidentally broken anything else. * Push everything to your Devito fork. * Submit a Pull Request on our repository. @@ -56,24 +56,84 @@ The protocol to propose a patch is: Tip, especially for newcomers: prefer short, self-contained Pull Requests over lengthy, impenetrable, and thus difficult to review, ones. -### Coding guidelines +#### Commit messages and pull request titles + +Your commit message should follow the following format: `tag: Message` + +Where `tag` should be one of the following: +* `arch`: JIT and architecture (basically anything in `devito/arch`) +* `bench`: Anything related to benchmarking and profiling +* `ci`: Continuous Integration (CI) +* `ckp`: Checkpointing related +* `compiler`: Compilation (`operator`, `ir`, `passes`, `symbolics`, ...) +* `docs`: Updates or changes to docstrings or the documentation +* `dsl`: A change related to Devito's Domain Specific Language _Note: `fd`, `differentiable`, etc -- all belong to dsl_ +* `examples`: Updates or changes to the examples or tutorials +* `install`: Related to installation (`docker`, `conda`, `pip`, ...) +* `reqs`: Package dependence updates +* `sympy`: Changes related to `sympy` +* `tests`: Updates or changes to the test suite +* `misc`: tools, docstring/comment updates, linting fixes, etc + +`Message` should: +* Start with an upper case letter +* Start with a verb in first person +* Be as short as possible + +Examples: +* `compiler: Fix MPI optimization pass` +* `install: Update Dockerfiles to new NVidia SDK` + +Your Pull Request (PR) should follow a similar format: `tag: Title` +Additionally, you should add labels to the PR so that it can be categorised and the new changes can be correctly auto-summarised in the changelog. +Optionally, you may wish to select a reviewer, especially if you have discussed the PR with a member of the Devito team already. -Some coding rules are "enforced" (and automatically checked by our Continuous Integration systems), some are "strongly recommended", others are "optional" but welcome. +### Coding guidelines -* We _enforce_ [PEP8](https://www.python.org/dev/peps/pep-0008/), with a few exceptions, listed [here](https://github.com/devitocodes/devito/blob/main/setup.cfg#L3) +To ease the process of contributing we use [pre-commit](https://pre-commit.com/), which runs a small set of formatting and linting tests before you create a new commit. +To use `pre-commit` with Devito simply run: +```bash +pip install pre-commit +pre-commit install +``` +Now when you make a commit, a set of pre-defined steps will run to check your contributed code. + +These checks will: +* Trim any trailing whitespace +* Fix ends of files +* Check YAML formatting +* Check for accidentally added large files +* Sort imports using `isort` * +* Lint the codebase using `ruff` * +* Lint the codebase again using `flake8` * +* Check the code and documentation for typos using `typos` * +* Lint GitHub Actions workflow files using actionlint * +* Lint Dockerfiles using hadolint * + +(* these checks will not change the edited files, you must manually fix the files or run an automated tool eg: `ruff check --fix`) + +If you absolutely must push "dirty" code, `pre-commit` can be circumvented using: +```bash +git commit --no-verify -m "misc: WIP very dirty code" +``` +However, this will cause CI to fail almost immediately! + +Some coding rules are "enforced" (and automatically checked by CI), some are "strongly recommended", others are "optional" but welcome. + +* We _enforce_ [PEP8](https://www.python.org/dev/peps/pep-0008/) using `ruff` and `flake8` with [a few exceptions](https://github.com/devitocodes/devito/blob/main/pyproject.toml). * We _enforce_ a maximum line length of 90 characters. * We _enforce_ indentation via 4 spaces. -* We _suggest_ to use ``flake8`` to check the above points locally, before filing a Pull Request. -* We _strongly recommend_ to document any new module, class, routine, ... with [NumPy-like docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html#example-numpy) ("numpydoc"). -* We _strongly recommend_ imports to be at the top of a module, logically grouped and, within each group, to be alphabetically ordered. As an example, condider our [__init__.py](https://github.com/devitocodes/devito/blob/main/devito/__init__.py): the first group is imports from the standard library; then imports from third-party dependencies; finally, imports from devito modules. +* We _enforce_ imports to be at the top of a module and logically grouped using `isort`. +* We _strongly recommend_ to document any new module, class, routine, with [numpy docstrings](https://numpydoc.readthedocs.io/en/latest/format.html). * We _strongly recommend_ to follow standard Python coding guidelines: - Use camel caps for class names, e.g. ``class FooBar``. - - Method names must start with a small letter; use underscores to separate words, e.g. ``def _my_meth_...``. - - Private class attributes and methods must start with an underscore. - - Variable names should be explicative (Devito prefers "long and clear" over "short but unclear"). + - Method names must start with a small letter; use underscores to separate words, eg: `def my_method(...)`. + - Private class attributes and methods must start with an underscore, eg: `def _my_private_method(...)`. + - Variable names should be explicative (Devito prefers "long and clear" over "short and FORTRAN like"). - Comment your code, and do not be afraid of being verbose. The first letter must be capitalized. Do not use punctuation (unless the comment consists of multiple sentences). * We _like_ that blank lines are used to logically split blocks of code implementing different (possibly sequential) tasks. ### Adding tutorials or examples -We always look forward to extending our [suite of tutorials and examples](https://www.devitoproject.org/devito/tutorials.html) with new Jupyter Notebooks. Even something completely new, such as a new series of tutorials showing your work with Devito, would be a great addition. +We always look forward to extending our [suite of tutorials and examples](https://www.devitoproject.org/devito/tutorials.html) with new Jupyter Notebooks. +Even something completely new, such as a new series of tutorials showing your work with Devito, would be a great addition. From 891c9ec5977e306816091bdc714ad44acdf506e4 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 14:11:33 +0000 Subject: [PATCH 34/42] Revert "misc: Deliberately add a file with a typo" This reverts commit 1318b528343899d0971939d66ca1ff499e563eb2. --- experiment.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 experiment.txt diff --git a/experiment.txt b/experiment.txt deleted file mode 100644 index 3b4d1bd3df..0000000000 --- a/experiment.txt +++ /dev/null @@ -1 +0,0 @@ -Add a fille with a typo in it. From a7a2ad029698b0cbb294c6f93535c311e0c90a83 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 14:20:32 +0000 Subject: [PATCH 35/42] misc: Fix helper script --- scripts/typos_json_to_gha.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/typos_json_to_gha.py b/scripts/typos_json_to_gha.py index c7be78ecc6..cba99fcca4 100644 --- a/scripts/typos_json_to_gha.py +++ b/scripts/typos_json_to_gha.py @@ -13,13 +13,11 @@ def main(): ) for line in sys.stdin: - error_code = 1 # Grab the JSON data coming from typos from stdin data = json.loads(line.rstrip()) if data['type'] == 'binary_file': continue - try: # Calculate the end column and format the correction suggestions = ', '.join(data['corrections']) @@ -38,6 +36,8 @@ def main(): print('Caught unhandled exception') print(f'{data}') print(f'{e}') + finally: + error_code = 1 return error_code From 528a9a62eb690b040b848743c7333fdbc5bb7913 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 17:05:38 +0000 Subject: [PATCH 36/42] misc: Remove double import of ctypes in allocators --- devito/data/allocators.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/devito/data/allocators.py b/devito/data/allocators.py index dc0decab17..474e426516 100644 --- a/devito/data/allocators.py +++ b/devito/data/allocators.py @@ -1,9 +1,9 @@ import abc import ctypes +import ctypes.util import mmap import os import sys -from ctypes.util import find_library import numpy as np @@ -153,7 +153,7 @@ class PosixAllocator(MemoryAllocator): @classmethod def initialize(cls): - handle = find_library('c') + handle = ctypes.util.find_library('c') # Special case: on MacOS Big Sur any code that attempts to check # for dynamic library presence by looking for a file at a path @@ -274,7 +274,7 @@ class NumaAllocator(MemoryAllocator): @classmethod def initialize(cls): - handle = find_library('numa') + handle = ctypes.util.find_library('numa') if handle is None: return lib = ctypes.CDLL(handle) From 4b30ce2300fea95aad6f0d5ec2e3a12d980945a6 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Tue, 6 Jan 2026 17:12:26 +0000 Subject: [PATCH 37/42] misc: Isolated fixes that only break things in PRO --- devito/ir/support/basic.py | 2 +- devito/ir/support/utils.py | 2 +- devito/passes/clusters/buffering.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index 4405cb3b63..abc817ff73 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -438,7 +438,7 @@ def distance(self, other): # It still could be an imaginary dependence, e.g. `a[3] -> a[4]` or, more # nasty, `a[i+1, 3] -> a[i, 4]` - for i, j in zip(self[n:], other[n:], strict=True): + for i, j in zip(self[n:], other[n:], strict=False): if i == j: ret.append(S.Zero) else: diff --git a/devito/ir/support/utils.py b/devito/ir/support/utils.py index 53f6a55bb0..0d8639a5be 100644 --- a/devito/ir/support/utils.py +++ b/devito/ir/support/utils.py @@ -140,7 +140,7 @@ def detect_accesses(exprs): for e in retrieve_indexed(exprs, deep=True): f = e.function - for a, d0 in zip(e.indices, f.dimensions, strict=True): + for a, d0 in zip(e.indices, f.dimensions, strict=False): if isinstance(a, Indirection): a = a.mapped diff --git a/devito/passes/clusters/buffering.py b/devito/passes/clusters/buffering.py index 22f9004ee7..61676d2e7f 100644 --- a/devito/passes/clusters/buffering.py +++ b/devito/passes/clusters/buffering.py @@ -562,7 +562,7 @@ def write_to(self): # Analogous to the above, we need to include the halo region as well ihalo = IntervalGroup([ Interval(i.dim, -h.left, h.right, i.stamp) - for i, h in zip(ispace, self.b._size_halo, strict=True) + for i, h in zip(ispace, self.b._size_halo, strict=False) ]) ispace = IterationSpace.union(ispace, IterationSpace(ihalo)) From 4f50954c1560acd59b355e05929da9fd33d0f2c5 Mon Sep 17 00:00:00 2001 From: Jack Betteridge <43041811+JDBetteridge@users.noreply.github.com> Date: Fri, 9 Jan 2026 12:37:35 +0000 Subject: [PATCH 38/42] misc: Apply review suggestions Co-authored-by: Ed Caunt --- benchmarks/user/benchmark.py | 2 +- devito/core/operator.py | 2 +- devito/finite_differences/derivative.py | 2 +- devito/ir/clusters/visitors.py | 4 +++- devito/ir/iet/nodes.py | 4 ++-- devito/ir/iet/visitors.py | 4 ++-- devito/ir/stree/algorithms.py | 7 ++----- 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/benchmarks/user/benchmark.py b/benchmarks/user/benchmark.py index f2f29ffe6a..35496bb51f 100644 --- a/benchmarks/user/benchmark.py +++ b/benchmarks/user/benchmark.py @@ -71,7 +71,7 @@ def run_op(solver, operator, **options): """ Initialize any necessary input and run the operator associated with the solver. """ - # Get the operator if exist + # Get the operator if it exists try: op = getattr(solver, operator) except AttributeError as e: diff --git a/devito/core/operator.py b/devito/core/operator.py index 35a34e5898..974753a9fb 100644 --- a/devito/core/operator.py +++ b/devito/core/operator.py @@ -196,7 +196,7 @@ def _check_kwargs(cls, **kwargs): oo = kwargs['options'] if oo['mpi'] and oo['mpi'] not in cls.MPI_MODES: - raise InvalidOperator("Unsupported MPI mode `{}`".format(oo['mpi'])) + raise InvalidOperator(f"Unsupported MPI mode `{oo['mpi']}`") if oo['cse-algo'] not in ('basic', 'smartsort', 'advanced'): raise InvalidOperator("Illegal `cse-algo` value") diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index ecb1060631..68a92b2b0b 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -491,7 +491,7 @@ def _eval_at(self, func): x0 = func.indices_ref.getters psubs = {} nx0 = x0.copy() - for d, _ in x0.items(): + for d in x0: if d in self.dims: # d is a valid Derivative dimension continue diff --git a/devito/ir/clusters/visitors.py b/devito/ir/clusters/visitors.py index 98ffbad36d..11bcad5365 100644 --- a/devito/ir/clusters/visitors.py +++ b/devito/ir/clusters/visitors.py @@ -141,7 +141,9 @@ def __new__(cls, *args, mode='dense'): elif len(args) == 2: func, mode = args else: - raise AssertionError('Too many args') + raise ValueError( + f"Either 1 or 2 arguments permitted, {len(args)} provided" + ) obj = object.__new__(cls) obj.__init__(func, mode) return obj diff --git a/devito/ir/iet/nodes.py b/devito/ir/iet/nodes.py index a8a3a35b00..6214515275 100644 --- a/devito/ir/iet/nodes.py +++ b/devito/ir/iet/nodes.py @@ -1513,7 +1513,7 @@ def __init__(self, sync_ops, body=None): self.sync_ops = sync_ops def __repr__(self): - return "".format(",".join(str(i) for i in self.sync_ops)) + return f"" @property def is_async_op(self): @@ -1590,7 +1590,7 @@ def __init__(self, body, halo_scheme): self._halo_scheme = halo_scheme def __repr__(self): - functions = "({})".format(",".join(i.name for i in self.functions)) + functions = f"({','.join(i.name for i in self.functions)})" return f"<{self.__class__.__name__}{functions}>" @property diff --git a/devito/ir/iet/visitors.py b/devito/ir/iet/visitors.py index aafbeec2fc..b450960354 100644 --- a/devito/ir/iet/visitors.py +++ b/devito/ir/iet/visitors.py @@ -479,7 +479,7 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, IndexedData): v = f._C_name else: - raise AssertionError('rvalue is not a recognised type') + raise TypeError('rvalue is not a recognised type') rvalue = f'({cstr}**) {v}' else: @@ -508,7 +508,7 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, DeviceMap): v = f._C_field_dmap else: - raise AssertionError('rvalue is not a recognised type') + raise TypeError('rvalue is not a recognised type') rvalue = f'({cstr} {rshape}) {f._C_name}->{v}' else: diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index f5e76ca209..3b7eaa29f1 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -228,11 +228,8 @@ def preprocess(clusters, options=None, **kwargs): processed.append(c) # Sanity check! - try: - assert not queue - except AssertionError as e: - if options['mpi']: - raise RuntimeError("Unsupported MPI for the given equations") from e + if not queue and options['mpi']: + raise RuntimeError("Unsupported MPI for the given equations") return processed From 1eced0444c5e60758625dd6015355171d072f858 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 9 Jan 2026 12:33:24 +0000 Subject: [PATCH 39/42] misc: Add steps for automatically fixing errors to contributing and pre-commit-config --- .pre-commit-config.yaml | 39 +++++++++++++++++++++++++++++++++++++-- CONTRIBUTING.md | 19 ++++++++++++++++++- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9ed8ec27f7..8fbcc61d95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,5 @@ # See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks +default_stages: [pre-commit] repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.2.0 @@ -13,23 +13,28 @@ repos: hooks: # Run isort to check only (don't modify files) - id: isort + name: "Check imports are sorted" args: [--check-only, --filter-files] + stages: [pre-commit] - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.14.4 hooks: # Run the linter to check only (don't modify files) - id: ruff-check + name: "Check code is linted with ruff" - repo: https://github.com/PyCQA/flake8 rev: 7.3.0 hooks: - id: flake8 + name: "Check code is linted with flake8" additional_dependencies: [flake8-pyproject] - repo: https://github.com/crate-ci/typos rev: v1.39.1 hooks: - id: typos - args: [] + name: "Check files for typos" + stages: [pre-commit] - repo: https://github.com/rhysd/actionlint rev: v1.7.8 hooks: @@ -39,3 +44,33 @@ repos: hooks: - id: hadolint-docker entry: -e HADOLINT_IGNORE=DL3003,DL3004,DL3005,DL3007,DL3008,DL3009,DL3013,DL3015,DL3042,DL3059,SC2103,SC2046,SC2086 ghcr.io/hadolint/hadolint hadolint + # + # These stages modify the files applying fixes where possible + # Since this may be undesirable they will not run automatically + # These stages can be run with + # pre-commit run --hook-stage manual + # + - repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + # Run isort to check only (don't modify files) + - id: isort + name: "Fix imports with isort" + args: [--filter-files] + stages: [manual] + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.14.4 + hooks: + # Run the linter to check only (don't modify files) + - id: ruff-check + name: "Fix linting errors with ruff check --fix" + args: [--fix] + stages: [manual] + - repo: https://github.com/crate-ci/typos + rev: v1.39.1 + hooks: + - id: typos + name: "Fix typos" + args: [-w] + stages: [manual] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 90fd23a031..e8e0a47864 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -110,7 +110,7 @@ These checks will: * Lint GitHub Actions workflow files using actionlint * * Lint Dockerfiles using hadolint * -(* these checks will not change the edited files, you must manually fix the files or run an automated tool eg: `ruff check --fix`) +(* these checks will not change the edited files, you must manually fix the files or run an automated tool eg: `ruff check --fix` see below for details) If you absolutely must push "dirty" code, `pre-commit` can be circumvented using: ```bash @@ -133,6 +133,23 @@ Some coding rules are "enforced" (and automatically checked by CI), some are "st - Comment your code, and do not be afraid of being verbose. The first letter must be capitalized. Do not use punctuation (unless the comment consists of multiple sentences). * We _like_ that blank lines are used to logically split blocks of code implementing different (possibly sequential) tasks. +#### Pre-commit + +You can use `pre-commit` to apply automated fixes for line endings, ends of files, import sorting and ruff linting. +All of these steps can be run together on your changes by running: +```bash +pre-commit run --hook-stage manual +``` +Adding the `-a` flag runs this on all files in the repository, not just the files that you have changed. +Adding the name of the stage will run just one check. +See the [pre-commit-config](https://github.com/devitocodes/devito/blob/main/.pre-commit.yaml) file for the names of stages. + +Some fixes can be automatically applied by the ruff linter, but may change the code in undesirable ways. +This step can only be run manually: +```bash +ruff check --fix --unsafe-fixes +``` + ### Adding tutorials or examples We always look forward to extending our [suite of tutorials and examples](https://www.devitoproject.org/devito/tutorials.html) with new Jupyter Notebooks. From 538e2d31c54a1db67e3deee2ed3f3c5886f4bb2f Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 9 Jan 2026 12:57:12 +0000 Subject: [PATCH 40/42] misc: Change .yml -> .yaml for consistency --- .github/{dependabot.yml => dependabot.yaml} | 0 .github/{release-drafter.yml => release-drafter.yaml} | 0 .github/workflows/{asv.yml => asv.yaml} | 0 .github/workflows/{docker-bases.yml => docker-bases.yaml} | 0 .github/workflows/{docker-devito.yml => docker-devito.yaml} | 0 .github/workflows/{examples-mpi.yml => examples-mpi.yaml} | 0 .github/workflows/{examples.yml => examples.yaml} | 0 .github/workflows/{lint.yml => lint.yaml} | 0 .../workflows/{pytest-core-mpi.yml => pytest-core-mpi.yaml} | 0 .../{pytest-core-nompi.yml => pytest-core-nompi.yaml} | 0 .github/workflows/{pytest-gpu.yml => pytest-gpu.yaml} | 0 .github/workflows/{pythonpublish.yml => pythonpublish.yaml} | 0 .github/workflows/{release-notes.yml => release-notes.yaml} | 0 .github/workflows/{triggers.yml => triggers.yaml} | 0 .github/workflows/{tutorials.yml => tutorials.yaml} | 0 README.md | 6 +++--- codecov.yml => codecov.yaml | 0 docker-compose.yml => docker-compose.yaml | 0 environment-dev.yml => environment-dev.yaml | 0 19 files changed, 3 insertions(+), 3 deletions(-) rename .github/{dependabot.yml => dependabot.yaml} (100%) rename .github/{release-drafter.yml => release-drafter.yaml} (100%) rename .github/workflows/{asv.yml => asv.yaml} (100%) rename .github/workflows/{docker-bases.yml => docker-bases.yaml} (100%) rename .github/workflows/{docker-devito.yml => docker-devito.yaml} (100%) rename .github/workflows/{examples-mpi.yml => examples-mpi.yaml} (100%) rename .github/workflows/{examples.yml => examples.yaml} (100%) rename .github/workflows/{lint.yml => lint.yaml} (100%) rename .github/workflows/{pytest-core-mpi.yml => pytest-core-mpi.yaml} (100%) rename .github/workflows/{pytest-core-nompi.yml => pytest-core-nompi.yaml} (100%) rename .github/workflows/{pytest-gpu.yml => pytest-gpu.yaml} (100%) rename .github/workflows/{pythonpublish.yml => pythonpublish.yaml} (100%) rename .github/workflows/{release-notes.yml => release-notes.yaml} (100%) rename .github/workflows/{triggers.yml => triggers.yaml} (100%) rename .github/workflows/{tutorials.yml => tutorials.yaml} (100%) rename codecov.yml => codecov.yaml (100%) rename docker-compose.yml => docker-compose.yaml (100%) rename environment-dev.yml => environment-dev.yaml (100%) diff --git a/.github/dependabot.yml b/.github/dependabot.yaml similarity index 100% rename from .github/dependabot.yml rename to .github/dependabot.yaml diff --git a/.github/release-drafter.yml b/.github/release-drafter.yaml similarity index 100% rename from .github/release-drafter.yml rename to .github/release-drafter.yaml diff --git a/.github/workflows/asv.yml b/.github/workflows/asv.yaml similarity index 100% rename from .github/workflows/asv.yml rename to .github/workflows/asv.yaml diff --git a/.github/workflows/docker-bases.yml b/.github/workflows/docker-bases.yaml similarity index 100% rename from .github/workflows/docker-bases.yml rename to .github/workflows/docker-bases.yaml diff --git a/.github/workflows/docker-devito.yml b/.github/workflows/docker-devito.yaml similarity index 100% rename from .github/workflows/docker-devito.yml rename to .github/workflows/docker-devito.yaml diff --git a/.github/workflows/examples-mpi.yml b/.github/workflows/examples-mpi.yaml similarity index 100% rename from .github/workflows/examples-mpi.yml rename to .github/workflows/examples-mpi.yaml diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yaml similarity index 100% rename from .github/workflows/examples.yml rename to .github/workflows/examples.yaml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yaml similarity index 100% rename from .github/workflows/lint.yml rename to .github/workflows/lint.yaml diff --git a/.github/workflows/pytest-core-mpi.yml b/.github/workflows/pytest-core-mpi.yaml similarity index 100% rename from .github/workflows/pytest-core-mpi.yml rename to .github/workflows/pytest-core-mpi.yaml diff --git a/.github/workflows/pytest-core-nompi.yml b/.github/workflows/pytest-core-nompi.yaml similarity index 100% rename from .github/workflows/pytest-core-nompi.yml rename to .github/workflows/pytest-core-nompi.yaml diff --git a/.github/workflows/pytest-gpu.yml b/.github/workflows/pytest-gpu.yaml similarity index 100% rename from .github/workflows/pytest-gpu.yml rename to .github/workflows/pytest-gpu.yaml diff --git a/.github/workflows/pythonpublish.yml b/.github/workflows/pythonpublish.yaml similarity index 100% rename from .github/workflows/pythonpublish.yml rename to .github/workflows/pythonpublish.yaml diff --git a/.github/workflows/release-notes.yml b/.github/workflows/release-notes.yaml similarity index 100% rename from .github/workflows/release-notes.yml rename to .github/workflows/release-notes.yaml diff --git a/.github/workflows/triggers.yml b/.github/workflows/triggers.yaml similarity index 100% rename from .github/workflows/triggers.yml rename to .github/workflows/triggers.yaml diff --git a/.github/workflows/tutorials.yml b/.github/workflows/tutorials.yaml similarity index 100% rename from .github/workflows/tutorials.yml rename to .github/workflows/tutorials.yaml diff --git a/README.md b/README.md index e234164857..f9edc094bc 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Devito: Fast Stencil Computation from Symbolic Specification -[![Build Status for the Core backend](https://github.com/devitocodes/devito/actions/workflows/pytest-core-nompi.yml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-core-nompi.yml) -[![Build Status with MPI](https://github.com/devitocodes/devito/actions/workflows/pytest-core-mpi.yml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-core-mpi.yml) -[![Build Status on GPU](https://github.com/devitocodes/devito/actions/workflows/pytest-gpu.yml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-gpu.yml) +[![Build Status for the Core backend](https://github.com/devitocodes/devito/actions/workflows/pytest-core-nompi.yaml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-core-nompi.yaml) +[![Build Status with MPI](https://github.com/devitocodes/devito/actions/workflows/pytest-core-mpi.yaml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-core-mpi.yaml) +[![Build Status on GPU](https://github.com/devitocodes/devito/actions/workflows/pytest-gpu.yaml/badge.svg?branch=main)](https://github.com/devitocodes/devito/actions/workflows/pytest-gpu.yaml) [![Code Coverage](https://codecov.io/gh/devitocodes/devito/branch/main/graph/badge.svg)](https://codecov.io/gh/devitocodes/devito) [![Slack Status](https://img.shields.io/badge/chat-on%20slack-%2336C5F0)](https://join.slack.com/t/devitocodes/shared_invite/zt-2hgp6891e-jQDcepOWPQwxL5JJegYKSA) [![asv](http://img.shields.io/badge/benchmarked%20by-asv-blue.svg?style=flat)](https://devitocodes.github.io/devito-performance) diff --git a/codecov.yml b/codecov.yaml similarity index 100% rename from codecov.yml rename to codecov.yaml diff --git a/docker-compose.yml b/docker-compose.yaml similarity index 100% rename from docker-compose.yml rename to docker-compose.yaml diff --git a/environment-dev.yml b/environment-dev.yaml similarity index 100% rename from environment-dev.yml rename to environment-dev.yaml From deeb1c4f7ca8307f05b01669904fe311e0bbfb74 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 9 Jan 2026 13:10:00 +0000 Subject: [PATCH 41/42] misc: Missed some ymls --- .github/workflows/examples.yaml | 2 +- MANIFEST.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/examples.yaml b/.github/workflows/examples.yaml index 07e4c54475..827a205b37 100644 --- a/.github/workflows/examples.yaml +++ b/.github/workflows/examples.yaml @@ -49,7 +49,7 @@ jobs: uses: conda-incubator/setup-miniconda@v3 with: activate-environment: devito - environment-file: environment-dev.yml + environment-file: environment-dev.yaml auto-activate-base: false - name: Tests in examples diff --git a/MANIFEST.in b/MANIFEST.in index 1d80e3db97..90021f26af 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,7 +10,7 @@ prune .github prune binder prune docker # Exclude hidden files and YAML files -exclude .* *.yml +exclude .* *.yaml # Exclude compiled and temporary files global-exclude *~ *.py[cod] *.so From 371a13c70c267b0802d7c0dc42ae2149fe394918 Mon Sep 17 00:00:00 2001 From: Jack Betteridge Date: Fri, 9 Jan 2026 13:16:40 +0000 Subject: [PATCH 42/42] misc: Fix Eds suggestion --- devito/ir/stree/algorithms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index 3b7eaa29f1..d4a761dfc8 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -228,7 +228,7 @@ def preprocess(clusters, options=None, **kwargs): processed.append(c) # Sanity check! - if not queue and options['mpi']: + if queue and options['mpi']: raise RuntimeError("Unsupported MPI for the given equations") return processed