diff --git a/.gitattributes b/.gitattributes index 5469090436..d4fb97db55 100644 --- a/.gitattributes +++ b/.gitattributes @@ -6,8 +6,8 @@ benchmarks/fontes18/* linguist-vendored benchmarks/trindade16/* linguist-vendored *.v linguist-detectable=false *.fs linguist-detectable=false -libs/* linguist-detectable=false -libs/combinations/* linguist-detectable=false -libs/graph-coloring/* linguist-detectable=false -libs/mugen/* linguist-detectable=false -libs/undirected_graph/* linguist-detectable=false +vendors/* linguist-detectable=false +vendors/combinations/* linguist-detectable=false +vendors/graph-coloring/* linguist-detectable=false +vendors/mugen/* linguist-detectable=false +vendors/undirected_graph/* linguist-detectable=false diff --git a/.github/workflows/clang-tidy-review.yml b/.github/workflows/clang-tidy-review.yml index 8d61fdc564..b6593a9415 100644 --- a/.github/workflows/clang-tidy-review.yml +++ b/.github/workflows/clang-tidy-review.yml @@ -19,7 +19,7 @@ permissions: pull-requests: write env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: clangtidy: @@ -30,7 +30,6 @@ jobs: - name: Clone Repository uses: actions/checkout@v6 with: - submodules: recursive fetch-depth: 0 - name: Install dependencies @@ -57,6 +56,7 @@ jobs: -DFICTION_PROGRESS_BARS=ON -DFICTION_Z3=ON -DFICTION_ENABLE_MUGEN=ON + -DFICTION_PYTHON_BINDINGS=ON -DFICTION_ALGLIB=ON -DFICTION_ENABLE_MUGEN=ON -DMOCKTURTLE_EXAMPLES=OFF @@ -72,7 +72,7 @@ jobs: database: "build" step-summary: true thread-comments: ${{ ((github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork) && 'update') || 'false' }} - ignore-tidy: "build|libs/*|docs/*|benchmarks/*|bib/*|bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp|bindings/mnt/pyfiction/include/pyfiction/documentation.hpp" + ignore: "build|libs/*|vendors/*|docs/*|benchmarks/*|bib/*|bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp|bindings/mnt/pyfiction/include/pyfiction/documentation.hpp" version: "21" - if: steps.linter.outputs.checks-failed > 0 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index fac98c6d85..82d058300c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -9,7 +9,6 @@ on: - "**/*.cmake" - "**/CMakeLists.txt" - "**/*.py" - - "libs/**" - ".github/workflows/codeql-analysis.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" pull_request: @@ -20,7 +19,6 @@ on: - "**/*.cmake" - "**/CMakeLists.txt" - "**/*.py" - - "libs/**" - ".github/workflows/codeql-analysis.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" merge_group: @@ -32,7 +30,7 @@ concurrency: cancel-in-progress: true env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: analyze: @@ -68,7 +66,6 @@ jobs: - name: Checkout repository uses: actions/checkout@v6 with: - submodules: recursive fetch-depth: 0 - name: Setup ccache @@ -91,7 +88,7 @@ jobs: - name: Install pip packages uses: BSFishy/pip-action@v1 with: - requirements: ${{github.workspace}}/libs/mugen/requirements.txt + requirements: ${{github.workspace}}/vendors/mugen/requirements.txt - name: Setup Z3 Solver id: z3 @@ -110,19 +107,12 @@ jobs: languages: ${{ matrix.language }} config-file: .github/codeql-config.yml - - if: matrix.language == 'cpp' - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - if: matrix.language == 'cpp' name: Configure CMake - working-directory: ${{github.workspace}}/build run: > - cmake ${{github.workspace}} + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=${{matrix.build_type}} - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=OFF -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF @@ -136,8 +126,7 @@ jobs: - if: matrix.language == 'cpp' name: Build fiction - working-directory: ${{github.workspace}}/build - run: cmake --build . --config ${{matrix.build_type}} -j4 + run: cmake --build ${{github.workspace}}/build --config ${{matrix.build_type}} --parallel 4 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v4 @@ -149,7 +138,7 @@ jobs: uses: advanced-security/filter-sarif@main with: patterns: | - -**/libs/** + -**/vendors/** -**/docs/** -**/experiments/** input: sarif-results/${{ matrix.language }}.sarif diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5f3055271e..e65c962340 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -8,7 +8,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/coverage.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" pull_request: @@ -18,7 +17,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/coverage.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" merge_group: @@ -33,7 +31,7 @@ defaults: env: BUILD_TYPE: Debug - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: build_and_test: @@ -51,8 +49,6 @@ jobs: - name: Clone Repository uses: actions/checkout@v6 - with: - submodules: recursive - name: Setup ccache uses: hendrikmuhs/ccache-action@v1.2 @@ -74,7 +70,7 @@ jobs: - name: Install pip packages uses: BSFishy/pip-action@v1 with: - requirements: ${{github.workspace}}/libs/mugen/requirements.txt + requirements: ${{github.workspace}}/vendors/mugen/requirements.txt - name: Setup Z3 Solver id: z3 @@ -91,11 +87,10 @@ jobs: cmake -S ${{github.workspace}} -B ${{github.workspace}}/build -G Ninja -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=$BUILD_TYPE - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=OFF -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF + -DFICTION_EXPERIMENTS=OFF -DFICTION_Z3=ON -DFICTION_ALGLIB=ON -DFICTION_ENABLE_MUGEN=ON @@ -105,11 +100,11 @@ jobs: -DMOCKTURTLE_EXAMPLES=OFF - name: Build - run: cmake --build ${{github.workspace}}/build --config $BUILD_TYPE --parallel + run: cmake --build ${{github.workspace}}/build --config $BUILD_TYPE --parallel 4 - name: Test working-directory: ${{github.workspace}}/build - run: ctest -C $BUILD_TYPE --output-on-failure --repeat until-pass:3 --parallel + run: ctest -C $BUILD_TYPE --output-on-failure --repeat until-pass:3 --parallel 4 - name: Generate Coverage Report run: | diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 9aee6a4ccf..fef3a050ad 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -8,7 +8,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/macos.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" pull_request: @@ -18,7 +17,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/macos.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" merge_group: @@ -32,7 +30,7 @@ defaults: shell: bash env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: build_and_test: @@ -47,8 +45,6 @@ jobs: steps: - name: Clone Repository uses: actions/checkout@v6 - with: - submodules: recursive - name: Setup Python uses: actions/setup-python@v6 @@ -77,17 +73,12 @@ jobs: version: ${{env.Z3_VERSION}} # Build and test pipeline for Debug mode - - name: Create Build Environment (Debug) - run: cmake -E make_directory ${{github.workspace}}/build_debug - name: Configure CMake (Debug) - working-directory: ${{github.workspace}}/build_debug run: > - cmake ${{github.workspace}} + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_debug -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=Debug - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=ON -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF @@ -99,25 +90,18 @@ jobs: -DMOCKTURTLE_EXAMPLES=OFF - name: Build (Debug) - working-directory: ${{github.workspace}}/build_debug - run: cmake --build . --config Debug -j4 # all macOS runners provide at least 3 cores + run: cmake --build ${{github.workspace}}/build_debug --config Debug --parallel 3 - name: Test (Debug) - working-directory: ${{github.workspace}}/build_debug - run: ctest -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 4 + run: ctest --test-dir ${{github.workspace}}/build_debug -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 3 # Build and test pipeline for Release mode - - name: Create Build Environment (Release) - run: cmake -E make_directory ${{github.workspace}}/build_release - name: Configure CMake (Release) - working-directory: ${{github.workspace}}/build_release run: > - cmake ${{github.workspace}} + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_release -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=Release - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_ENABLE_JEMALLOC=ON -DFICTION_CLI=ON -DFICTION_TEST=ON @@ -130,9 +114,7 @@ jobs: -DMOCKTURTLE_EXAMPLES=OFF - name: Build (Release) - working-directory: ${{github.workspace}}/build_release - run: cmake --build . --config Release -j4 # all macOS runners provide at least 3 cores + run: cmake --build ${{github.workspace}}/build_release --config Release --parallel 3 - name: Test (Release) - working-directory: ${{github.workspace}}/build_release - run: ctest -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 4 + run: ctest --test-dir ${{github.workspace}}/build_release -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 3 diff --git a/.github/workflows/pyfiction-docstring-generator.yml b/.github/workflows/pyfiction-docstring-generator.yml index dbb5121284..c6da9c0896 100644 --- a/.github/workflows/pyfiction-docstring-generator.yml +++ b/.github/workflows/pyfiction-docstring-generator.yml @@ -15,7 +15,6 @@ jobs: - name: Clone Repository uses: actions/checkout@v6 with: - submodules: recursive fetch-depth: 0 # Fetch all history for all branches and tags - name: Setup Python diff --git a/.github/workflows/pyfiction-pypi-deployment.yml b/.github/workflows/pyfiction-pypi-deployment.yml index f685dc6c4a..e603f8a4c9 100644 --- a/.github/workflows/pyfiction-pypi-deployment.yml +++ b/.github/workflows/pyfiction-pypi-deployment.yml @@ -15,10 +15,9 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - "noxfile.py" - "pyproject.toml" - - ".uv.lock" + - "uv.lock" - ".github/workflows/pyfiction-pypi-deployment.yml" pull_request: branches: ["main"] @@ -29,10 +28,9 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - "noxfile.py" - "pyproject.toml" - - ".uv.lock" + - "uv.lock" - ".github/workflows/pyfiction-pypi-deployment.yml" workflow_dispatch: workflow_run: @@ -50,17 +48,16 @@ concurrency: cancel-in-progress: true env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: build_sdist: name: 📦 Build Source Distribution runs-on: ubuntu-latest steps: - - name: Clone respository + - name: Clone repository uses: actions/checkout@v6 with: - submodules: recursive fetch-depth: 0 - name: Install the latest version of uv diff --git a/.github/workflows/python-bindings.yml b/.github/workflows/python-bindings.yml index 9e54a4bd51..8dfccc3de6 100644 --- a/.github/workflows/python-bindings.yml +++ b/.github/workflows/python-bindings.yml @@ -10,10 +10,9 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - "noxfile.py" - "pyproject.toml" - - ".uv.lock" + - "uv.lock" - ".github/workflows/python-bindings.yml" pull_request: branches: ["main"] @@ -24,10 +23,9 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - "noxfile.py" - "pyproject.toml" - - ".uv.lock" + - "uv.lock" - ".github/workflows/python-bindings.yml" workflow_run: workflows: ["pyfiction Docstring Generator"] @@ -43,7 +41,7 @@ defaults: shell: bash env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: python-tests: @@ -52,12 +50,11 @@ jobs: strategy: fail-fast: false matrix: - runs-on: [ubuntu-latest, macos-13, macos-15, windows-latest] + runs-on: [ubuntu-latest, macos-latest, windows-latest] steps: - name: Clone Repository uses: actions/checkout@v6 with: - submodules: recursive fetch-depth: 0 - name: Setup ccache diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index daa401eb8a..495eb24ba2 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -8,7 +8,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/ubuntu.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" pull_request: @@ -18,7 +17,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/ubuntu.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" merge_group: @@ -32,7 +30,7 @@ defaults: shell: bash env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: build_and_test: @@ -78,8 +76,6 @@ jobs: - name: Clone Repository uses: actions/checkout@v6 - with: - submodules: recursive - name: Setup ccache # ccache is not supported on ARM yet @@ -100,7 +96,7 @@ jobs: - name: Install pip packages uses: BSFishy/pip-action@v1 with: - requirements: ${{github.workspace}}/libs/mugen/requirements.txt + requirements: ${{github.workspace}}/vendors/mugen/requirements.txt - name: Setup mold uses: rui314/setup-mold@v1 @@ -118,17 +114,11 @@ jobs: # Build and test pipeline for Debug mode - - name: Create Build Environment (Debug) - run: cmake -E make_directory ${{github.workspace}}/build_debug - - name: Configure CMake (Debug) - working-directory: ${{github.workspace}}/build_debug run: > - cmake ${{github.workspace}} ${{matrix.cppstandard}} + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_debug ${{matrix.cppstandard}} -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=Debug - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=ON -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF @@ -142,42 +132,32 @@ jobs: -DFICTION_LIGHTWEIGHT_DEBUG_BUILDS=ON - name: Build (Debug) - working-directory: ${{github.workspace}}/build_debug - run: cmake --build . --config Debug -j4 + run: cmake --build ${{github.workspace}}/build_debug --config Debug --parallel 4 - name: Test (Debug) - working-directory: ${{github.workspace}}/build_debug - run: ctest -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 4 + run: ctest --test-dir ${{github.workspace}}/build_debug -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 4 # Build and test pipeline for Release mode - - name: Create Build Environment (Release) - run: cmake -E make_directory ${{github.workspace}}/build_release - - name: Configure CMake (Release) - working-directory: ${{github.workspace}}/build_release run: > - cmake ${{github.workspace}} ${{matrix.cppstandard}} + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_release ${{matrix.cppstandard}} -DCMAKE_CXX_COMPILER=${{matrix.compiler}} -DCMAKE_BUILD_TYPE=Release - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON - -DFICTION_ENABLE_JEMALLOC=ON -DFICTION_CLI=ON -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF -DFICTION_EXPERIMENTS=ON ${{ matrix.os != 'ubuntu-24.04-arm' && '-DFICTION_Z3=ON' || '' }} -DFICTION_ALGLIB=ON + -DFICTION_ENABLE_JEMALLOC=ON -DFICTION_ENABLE_MUGEN=ON -DFICTION_PROGRESS_BARS=OFF -DFICTION_WARNINGS_AS_ERRORS=OFF -DMOCKTURTLE_EXAMPLES=OFF - name: Build (Release) - working-directory: ${{github.workspace}}/build_release - run: cmake --build . --config Release -j4 + run: cmake --build ${{github.workspace}}/build_release --config Release --parallel 4 - name: Test (Release) - working-directory: ${{github.workspace}}/build_release - run: ctest -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 4 + run: ctest --test-dir ${{github.workspace}}/build_release -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 4 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 2e3e7f4816..d8fac0e401 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -8,7 +8,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/windows.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" pull_request: @@ -18,7 +17,6 @@ on: - "**/*.cpp" - "**/*.cmake" - "**/CMakeLists.txt" - - "libs/**" - ".github/workflows/windows.yml" - "!bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" merge_group: @@ -32,7 +30,7 @@ defaults: shell: pwsh # use pwsh as directory handling does not seem to work with bash env: - Z3_VERSION: 4.13.0 + Z3_VERSION: 4.13.4 jobs: build_and_test: @@ -54,8 +52,6 @@ jobs: steps: - name: Clone Repository uses: actions/checkout@v6 - with: - submodules: recursive - name: Setup Python uses: actions/setup-python@v6 @@ -81,18 +77,12 @@ jobs: # Build and test pipeline for Debug mode - - name: Create Build Environment (Debug) - run: cmake -E make_directory ${{github.workspace}}\build_debug - - name: Configure CMake (Debug) - working-directory: ${{github.workspace}}\build_debug run: > - cmake ${{github.workspace}} -G Ninja + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_debug -G Ninja -DCMAKE_CXX_COMPILER=${{matrix.cxx_compiler}} -DCMAKE_C_COMPILER=${{matrix.c_compiler}} -DCMAKE_BUILD_TYPE=Debug - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=ON -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF @@ -103,27 +93,19 @@ jobs: -DMOCKTURTLE_EXAMPLES=OFF - name: Build (Debug) - working-directory: ${{github.workspace}}\build_debug - run: cmake --build . --config Debug -j4 + run: cmake --build ${{github.workspace}}/build_debug --config Debug --parallel 4 - name: Test (Debug) - working-directory: ${{github.workspace}}\build_debug - run: ctest -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 4 --exclude-regex "quality" + run: ctest --test-dir ${{github.workspace}}/build_debug -C Debug --verbose --output-on-failure --repeat until-pass:3 --parallel 4 # Build and test pipeline for Release mode - - name: Create Build Environment (Release) - run: cmake -E make_directory ${{github.workspace}}\build_release - - name: Configure CMake (Release) - working-directory: ${{github.workspace}}\build_release run: > - cmake ${{github.workspace}} -G Ninja + cmake -S ${{github.workspace}} -B ${{github.workspace}}/build_release -G Ninja -DCMAKE_CXX_COMPILER=${{matrix.cxx_compiler}} -DCMAKE_C_COMPILER=${{matrix.c_compiler}} -DCMAKE_BUILD_TYPE=Release - -DFICTION_ENABLE_UNITY_BUILD=ON - -DFICTION_ENABLE_PCH=ON -DFICTION_CLI=ON -DFICTION_TEST=ON -DFICTION_BENCHMARK=OFF @@ -134,9 +116,7 @@ jobs: -DMOCKTURTLE_EXAMPLES=OFF - name: Build (Release) - working-directory: ${{github.workspace}}\build_release - run: cmake --build . --config Release -j4 + run: cmake --build ${{github.workspace}}/build_release --config Release --parallel 4 - name: Test (Release) - working-directory: ${{github.workspace}}\build_release - run: ctest -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 4 + run: ctest --test-dir ${{github.workspace}}/build_release -C Release --verbose --output-on-failure --repeat until-pass:3 --parallel 4 diff --git a/.gitignore b/.gitignore index 44ab497fb7..e9c41488b3 100644 --- a/.gitignore +++ b/.gitignore @@ -42,7 +42,13 @@ # Build directories cmake-build-* -build/ +build* +docs/_build/ +docs/html/ +*/_dep/ + +# Cache files +.cache/ # IDE files *.directory @@ -51,6 +57,7 @@ build/ # Experiment files *.json +!CMakePresets.json *.csv # Coverage files diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 4eda19d6c2..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,25 +0,0 @@ -[submodule "libs/alice"] - path = libs/alice - url = https://github.com/marcelwa/alice.git -[submodule "libs/Catch2"] - path = libs/Catch2 - url = https://github.com/catchorg/Catch2.git -[submodule "libs/parallel-hashmap"] - path = libs/parallel-hashmap - url = https://github.com/greg7mdp/parallel-hashmap.git -[submodule "libs/tinyxml2"] - path = libs/tinyxml2 - url = https://github.com/leethomason/tinyxml2.git -[submodule "libs/pybind11"] - path = libs/pybind11 - url = https://github.com/pybind/pybind11.git -[submodule "libs/json"] - path = libs/json - url = https://github.com/nlohmann/json.git -[submodule "libs/mockturtle"] - path = libs/mockturtle - url = https://github.com/marcelwa/mockturtle.git - branch = mnt -[submodule "libs/alglib-cmake"] - path = libs/alglib-cmake - url = https://github.com/wlambooy/alglib-cmake.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 80f38eaa31..6f865b6218 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ ci: autofix_commit_msg: "🎨 Incorporated pre-commit fixes" autoupdate_schedule: quarterly -exclude: "^libs/|^benchmarks/|bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" +exclude: "^vendors/|^benchmarks/|bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp" repos: # Standard hooks @@ -21,8 +21,10 @@ repos: hooks: - id: check-added-large-files - id: check-case-conflict - - id: check-docstring-first + - id: check-vcs-permalinks - id: check-merge-conflict + - id: check-symlinks + - id: check-json - id: check-toml - id: check-yaml - id: debug-statements @@ -45,6 +47,15 @@ repos: - id: rst-directive-colons - id: rst-inline-touching-normal + # CMake format and lint the CMakeLists.txt files + - repo: https://github.com/cheshirekow/cmake-format-precommit + rev: v0.6.13 + hooks: + - id: cmake-format + additional_dependencies: [pyyaml] + types: [file] + files: (\.cmake|CMakeLists.txt)(.in)?$ + # clang-format the C++ part of the code base - repo: https://github.com/pre-commit/mirrors-clang-format rev: v21.1.8 @@ -53,6 +64,12 @@ repos: types_or: [c++, c] args: ["-style=file"] + # Ensure uv lock file is up-to-date + - repo: https://github.com/astral-sh/uv-pre-commit + rev: 0.9.16 + hooks: + - id: uv-lock + # Python linting using ruff - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.14.10 diff --git a/AGENTS.md b/AGENTS.md index a41093713b..1c3afb9a93 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -45,7 +45,7 @@ You are an expert software architect and engineer specializing in **C++17**, **P - `bindings/mnt/pyfiction/`: **Read/Write**. Python bindings and tests. - `cli/`: **Read/Write**. Command-line interface. - `docs/`: **Read/Write**. Documentation (Sphinx/Doxygen). - - `libs/`: **ReadOnly**. Third-party libraries (NEVER modify). + - `vendors/`: **ReadOnly**. Third-party libraries (NEVER modify). - `experiments/`: **Read/Write**. Scientific experiments for reproducibility of papers. ## Commands @@ -160,9 +160,9 @@ def create_logic_network(filename: str) -> LogicNetwork: - Prefer STL over custom algorithms. - Use braced initialization. - ⚠️ **Ask First**: - - Before adding new third-party dependencies to `libs/` or `CMakeLists.txt`. + - Before adding new third-party dependencies to `vendors/` or `CMakeLists.txt`. - Before changing major build configurations. - 🚫 **Never**: - - Modify files in `libs/`. + - Modify files in `vendors/`. - Commit secrets or large binary files. - Use `using namespace std;`. diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ae46dc7ba..2c011a29aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ # Project (https://github.com/lefticus/cpp_starter_project) and CMake Template # (https://github.com/cpp-best-practices/cmake_template) -cmake_minimum_required(VERSION 3.23...3.29) +cmake_minimum_required(VERSION 3.23...4.2) # Only set the CMAKE_CXX_STANDARD if it is not set by someone else if(NOT DEFINED CMAKE_CXX_STANDARD) @@ -27,14 +27,12 @@ project( include(cmake/PreventInSourceBuilds.cmake) include(cmake/ProjectOptions.cmake) include(cmake/Utilities.cmake) +include(cmake/SystemLink.cmake) fiction_setup_options() fiction_global_options() fiction_local_options() -# don't know if this should be set globally from here or not... -set(CMAKE_CXX_VISIBILITY_PRESET hidden) - set(GIT_SHA "Unknown" CACHE STRING "SHA this build was generated from") @@ -50,8 +48,8 @@ add_library(fiction::fiction_warnings ALIAS fiction_warnings) # Include header files add_subdirectory(include) -# Include libraries -add_subdirectory(libs) +# Add vendors +add_subdirectory(vendors) # Enable progress bars if(NOT WIN32) @@ -96,14 +94,30 @@ endif() # target from visual studio if(MSVC) get_all_installable_targets(all_targets) - message("all_targets=${all_targets}") set_target_properties( ${all_targets} PROPERTIES VS_DEBUGGER_ENVIRONMENT "PATH=$(VC_ExecutablePath_x64);%PATH%") endif() # set the startup project for the "play" button in MSVC -set_property(DIRECTORY PROPERTY VS_STARTUP_PROJECT intro) +set_property(DIRECTORY PROPERTY VS_STARTUP_PROJECT fiction) + +# Package Configuration Generation +include(CMakePackageConfigHelpers) + +write_basic_package_version_file( + fictionConfigVersion.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion) + +configure_package_config_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/fictionConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/fictionConfig.cmake + INSTALL_DESTINATION lib/cmake/fiction) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/fictionConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/fictionConfigVersion.cmake + DESTINATION lib/cmake/fiction) if(CMAKE_SKIP_INSTALL_RULES) return() diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 0000000000..de6efb80ba --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,119 @@ +{ + "version": 4, + "cmakeMinimumRequired": { + "major": 3, + "minor": 23, + "patch": 0 + }, + "configurePresets": [ + { + "name": "base", + "hidden": true, + "generator": "Ninja", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON", + "FICTION_ENABLE_CACHE": "ON" + } + }, + { + "name": "dev", + "displayName": "Development", + "description": "Debug build for development", + "inherits": "base", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", + "FICTION_WARNINGS_AS_ERRORS": "OFF" + } + }, + { + "name": "dev-asan", + "displayName": "Development (ASan)", + "description": "Debug build with Address Sanitizer", + "inherits": "dev", + "cacheVariables": { + "FICTION_ENABLE_SANITIZER_ADDRESS": "ON", + "FICTION_ENABLE_SANITIZER_LEAK": "ON", + "FICTION_ENABLE_SANITIZER_UNDEFINED": "ON" + } + }, + { + "name": "release", + "displayName": "Release", + "description": "Release build with optimizations", + "inherits": "base", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", + "FICTION_WARNINGS_AS_ERRORS": "ON", + "FICTION_ENABLE_IPO": "ON" + } + }, + { + "name": "ci", + "displayName": "CI", + "description": "Configuration for Continuous Integration", + "inherits": "base", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", + "FICTION_WARNINGS_AS_ERRORS": "ON", + "FICTION_ENABLE_IPO": "OFF" + } + } + ], + "buildPresets": [ + { + "name": "dev", + "configurePreset": "dev" + }, + { + "name": "dev-asan", + "configurePreset": "dev-asan" + }, + { + "name": "release", + "configurePreset": "release" + }, + { + "name": "ci", + "configurePreset": "ci" + } + ], + "testPresets": [ + { + "name": "dev", + "configurePreset": "dev", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "error", + "stopOnFailure": false + } + }, + { + "name": "dev-asan", + "configurePreset": "dev-asan", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "error", + "stopOnFailure": true + } + }, + { + "name": "release", + "configurePreset": "release", + "output": { + "outputOnFailure": true + } + }, + { + "name": "ci", + "configurePreset": "ci", + "output": { + "outputOnFailure": true + } + } + ] +} diff --git a/Dockerfile b/Dockerfile index 74bba503a8..8d1e39b63c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ USER appuser RUN python3 -m venv venv && \ . venv/bin/activate && \ pip install --upgrade --no-cache-dir pip setuptools && \ - pip install --no-cache-dir z3-solver==4.13.0 + pip install --no-cache-dir z3-solver==4.13.4 # Add the virtual environment to the PATH ENV PATH="/app/venv/bin:$PATH" @@ -47,8 +47,6 @@ COPY --chown=appuser:appuser . fiction/ RUN . venv/bin/activate \ && cmake -S fiction -B fiction/build \ -DCMAKE_BUILD_TYPE=Release \ - -DFICTION_ENABLE_UNITY_BUILD=ON \ - -DFICTION_ENABLE_PCH=ON \ -DFICTION_CLI=ON \ -DFICTION_TEST=OFF \ -DFICTION_EXPERIMENTS=OFF \ diff --git a/bindings/mnt/pyfiction/CMakeLists.txt b/bindings/mnt/pyfiction/CMakeLists.txt index 3ed7d42829..bb06f3fad9 100644 --- a/bindings/mnt/pyfiction/CMakeLists.txt +++ b/bindings/mnt/pyfiction/CMakeLists.txt @@ -1,40 +1,40 @@ -if (APPLE) - set(BASEPOINT @loader_path) -else () - set(BASEPOINT $ORIGIN) -endif () -list(APPEND CMAKE_INSTALL_RPATH ${BASEPOINT} ${BASEPOINT}/${CMAKE_INSTALL_LIBDIR}) +if(APPLE) + set(BASEPOINT @loader_path) +else() + set(BASEPOINT $ORIGIN) +endif() +list(APPEND CMAKE_INSTALL_RPATH ${BASEPOINT} + ${BASEPOINT}/${CMAKE_INSTALL_LIBDIR}) set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) -pybind11_add_module(pyfiction - # Prefer thin LTO if available - THIN_LTO pyfiction.cpp) +pybind11_add_module( + pyfiction + # Prefer thin LTO if available + THIN_LTO pyfiction.cpp) target_link_libraries(pyfiction PRIVATE libfiction) target_include_directories(pyfiction PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include) set_property(TARGET pyfiction PROPERTY POSITION_INDEPENDENT_CODE ON) # Collect pyfiction headers under bindings/mnt/pyfiction/ -file(GLOB_RECURSE FICTION_PYFICTION_HEADERS CONFIGURE_DEPENDS - RELATIVE ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/ - ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/include/*.hpp) +file( + GLOB_RECURSE FICTION_PYFICTION_HEADERS CONFIGURE_DEPENDS + RELATIVE ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/ + ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/include/*.hpp) # Register header file set for IDE integration and installation metadata # Multiple BASE_DIRS so CMake knows how to layout the installed tree # -# Using a FILE_SET ensures proper exposure in CMake package exports later -# if export() logic is added. +# Using a FILE_SET ensures proper exposure in CMake package exports later if +# export() logic is added. # # (GLOB with CONFIGURE_DEPENDS keeps IDE view in sync when adding headers.) -target_sources(pyfiction - PRIVATE - FILE_SET HEADERS - BASE_DIRS - ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/ - FILES - ${FICTION_PYFICTION_HEADERS} -) +target_sources( + pyfiction + PRIVATE FILE_SET HEADERS BASE_DIRS + ${PROJECT_SOURCE_DIR}/bindings/mnt/pyfiction/ FILES + ${FICTION_PYFICTION_HEADERS}) # Ensure header verification is enabled for this target set_property(TARGET pyfiction PROPERTY VERIFY_INTERFACE_HEADER_SETS ON) @@ -42,6 +42,7 @@ set_property(TARGET pyfiction PROPERTY VERIFY_INTERFACE_HEADER_SETS ON) target_compile_features(pyfiction PRIVATE cxx_std_${CMAKE_CXX_STANDARD}) # Install directive for scikit-build-core -install(TARGETS pyfiction - DESTINATION . - COMPONENT fiction_Python) +install( + TARGETS pyfiction + DESTINATION . + COMPONENT fiction_Python) diff --git a/bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp b/bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp index 95bf93cccf..8cb70710fb 100644 --- a/bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp +++ b/bindings/mnt/pyfiction/include/pyfiction/pybind11_mkdoc_docstrings.hpp @@ -113,9 +113,9 @@ this function cannot be evaluated in :math:`\mathcal{O}(1)`, but has the polynomial complexity of A*. If no path between `source` and `target` exists in `layout`, the -returned distance is `std::numeric_limits::infinity()` if that -value is supported by `Dist`, or `std::numeric_limits::max()`, -otherwise. +returned distance is `std::numeric_limits::infinity()` for +floating-point types or `std::numeric_limits::max()` for +integral types. Template parameter ``Lyt``: Coordinate layout type. @@ -6317,7 +6317,7 @@ static const char *__doc_fiction_detail_design_sidb_gates_impl_output_bdl_wires static const char *__doc_fiction_detail_design_sidb_gates_impl_params = R"doc(Parameters for the *SiDB Gate Designer*.)doc"; static const char *__doc_fiction_detail_design_sidb_gates_impl_run_automatic_exhaustive_gate_designer = -R"doc(Design gates by using the *Automatic Exhaustive Gate Desginer*. This +R"doc(Design gates by using the *Automatic Exhaustive Gate Designer*. This algorithm was proposed in \"Minimal Design of SiDB Gates: An Optimal Basis for Circuits Based on Silicon Dangling Bonds\" by J. Drewniok, M. Walter, and R. Wille in NANOARCH 2023 @@ -14107,7 +14107,7 @@ each other).)doc"; static const char *__doc_fiction_generate_random_sidb_layout_params_positive_charges_FORBIDDEN = R"doc(Positive charges are not allowed to occur (i.e. SiDBs need to be -seperated by a few lattice points).)doc"; +separated by a few lattice points).)doc"; static const char *__doc_fiction_generate_random_sidb_layout_params_positive_charges_MAY_OCCUR = R"doc(Positive charges can occur, which means that the @@ -16854,8 +16854,8 @@ R"doc(Reorders the nodes in a given rank according to computed medians. Parameter ``r``: The rank index. -Parameter ``reverse``: - If `true`, sorts in descending order of medians.)doc"; +Parameter ``order``: + Sorting order of medians.)doc"; static const char *__doc_fiction_mincross_impl_run = R"doc(Runs the crossing minimization algorithm and returns a reordered @@ -16870,8 +16870,8 @@ static const char *__doc_fiction_mincross_impl_transpose = R"doc(Performs pairwise transpositions within ranks to further reduce crossings. -Parameter ``reverse``: - If `true`, applies reversed heuristic for tie-breaking.)doc"; +Parameter ``order``: + Sorting heuristic for tie-breaking.)doc"; static const char *__doc_fiction_mincross_impl_transpose_step = R"doc(Performs a single transposition pass for rank `r`. @@ -16879,8 +16879,8 @@ R"doc(Performs a single transposition pass for rank `r`. Parameter ``r``: Rank index. -Parameter ``reverse``: - If `true`, applies reversed heuristic for tie-breaking. +Parameter ``order``: + Sorting heuristic for tie-breaking. Returns: The number of crossings reduced.)doc"; @@ -16934,20 +16934,21 @@ static const char *__doc_fiction_mincross_stats_num_crossings = R"doc(The total static const char *__doc_fiction_minimum_energy = R"doc(Computes the minimum energy of a range of `charge_distribution_surface` objects. If the range is empty, infinity -is returned. +is returned to indicate no valid energy value exists. Template parameter ``InputIt``: Must meet the requirements of `LegacyInputIterator`. Parameter ``first``: - Begin of the range to examime. + Begin of the range to examine. Parameter ``last``: End of the range to examine. Returns: Value of the minimum energy found in the input range (unit: eV), - or infinity if the range is empty.)doc"; + or `std::numeric_limits::infinity()` if the range is + empty.)doc"; static const char *__doc_fiction_minimum_energy_distribution = R"doc(Returns an iterator to the charge distribution of minimum energy @@ -16958,7 +16959,7 @@ Template parameter ``InputIt``: Must meet the requirements of `LegacyInputIterator`. Parameter ``first``: - Begin of the range to examime. + Begin of the range to examine. Parameter ``last``: End of the range to examine. diff --git a/bindings/mnt/pyfiction/include/pyfiction/utils/version_info.hpp b/bindings/mnt/pyfiction/include/pyfiction/utils/version_info.hpp index bce6f4c23c..3579394620 100644 --- a/bindings/mnt/pyfiction/include/pyfiction/utils/version_info.hpp +++ b/bindings/mnt/pyfiction/include/pyfiction/utils/version_info.hpp @@ -5,15 +5,13 @@ #ifndef PYFICTION_VERSION_INFO_HPP #define PYFICTION_VERSION_INFO_HPP -#include +#include #include namespace pyfiction { -namespace py = pybind11; - inline void version_info(pybind11::module& m) { m.attr("__version__") = fiction::FICTION_VERSION; diff --git a/bindings/mnt/pyfiction/test/algorithms/physical_design/test_post_layout_optimization.py b/bindings/mnt/pyfiction/test/algorithms/physical_design/test_post_layout_optimization.py index 100fd3e5cf..cbcfe4610b 100644 --- a/bindings/mnt/pyfiction/test/algorithms/physical_design/test_post_layout_optimization.py +++ b/bindings/mnt/pyfiction/test/algorithms/physical_design/test_post_layout_optimization.py @@ -14,7 +14,7 @@ dir_path = os.path.dirname(os.path.realpath(__file__)) -class TestPostLayoutOptimiztaion(unittest.TestCase): +class TestPostLayoutOptimization(unittest.TestCase): def test_post_layout_optimization_default(self): network = read_technology_network(dir_path + "/../../resources/mux21.v") diff --git a/bindings/mnt/pyfiction/test/algorithms/simulation/sidb/test_random_sidb_layout_generator.py b/bindings/mnt/pyfiction/test/algorithms/simulation/sidb/test_random_sidb_layout_generator.py index 75da8cbefb..2a0c0210a2 100644 --- a/bindings/mnt/pyfiction/test/algorithms/simulation/sidb/test_random_sidb_layout_generator.py +++ b/bindings/mnt/pyfiction/test/algorithms/simulation/sidb/test_random_sidb_layout_generator.py @@ -71,7 +71,7 @@ def test_impossible_design_of_single_layout(self): result_lyt = generate_random_sidb_layout(params, sidb_layout()) self.assertIsNone(result_lyt) - def test_impossible_design_of_mutiple_layouts(self): + def test_impossible_design_of_multiple_layouts(self): params = generate_random_sidb_layout_params() params.maximal_attempts_for_multiple_layouts = 5 params.number_of_sidbs = 2 diff --git a/cli/CMakeLists.txt b/cli/CMakeLists.txt index db7441dd98..8b9ddd7242 100644 --- a/cli/CMakeLists.txt +++ b/cli/CMakeLists.txt @@ -1,45 +1,32 @@ add_executable(fiction) -file(GLOB_RECURSE FICTION_CMD_SOURCES CONFIGURE_DEPENDS - "cmd/*/src/*.cpp" -) +target_sources(fiction PRIVATE fiction.cpp) -target_sources(fiction PRIVATE - ${PROJECT_SOURCE_DIR}/cli/fiction.cpp - ${FICTION_CMD_SOURCES} -) - -# Include configuration file -include_directories(${PROJECT_BINARY_DIR}/include/) +# Add command subdirectories +add_subdirectory(cmd/general) +add_subdirectory(cmd/io) +add_subdirectory(cmd/logic) +add_subdirectory(cmd/physical_design) +add_subdirectory(cmd/simulation) +add_subdirectory(cmd/technology) +add_subdirectory(cmd/verification) # Add include directory for command headers target_include_directories(fiction PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) # Link against the project settings, libfiction and alice -target_link_libraries(fiction PRIVATE libfiction alice) - -# Collect CLI headers under cmd/ -file(GLOB_RECURSE FICTION_CLI_HEADERS CONFIGURE_DEPENDS - RELATIVE ${PROJECT_SOURCE_DIR}/cli/ - ${PROJECT_SOURCE_DIR}/cli/cmd/*/*.hpp - ${PROJECT_SOURCE_DIR}/cli/stores.hpp - ${PROJECT_SOURCE_DIR}/cli/pch_cli.hpp) +target_link_libraries(fiction PRIVATE fiction::libfiction alice) # Register header file set for IDE integration and installation metadata -# Multiple BASE_DIRS so CMake knows how to layout the installed tree -# -# Using a FILE_SET ensures proper exposure in CMake package exports later -# if export() logic is added. -# -# (GLOB with CONFIGURE_DEPENDS keeps IDE view in sync when adding headers.) -target_sources(fiction - PRIVATE - FILE_SET HEADERS - BASE_DIRS - ${PROJECT_SOURCE_DIR}/cli - FILES - ${FICTION_CLI_HEADERS} -) +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + stores.hpp + pch_cli.hpp) # Ensure header verification is enabled for this target set_property(TARGET fiction PROPERTY VERIFY_INTERFACE_HEADER_SETS ON) @@ -51,70 +38,74 @@ target_compile_features(fiction PRIVATE cxx_std_${CMAKE_CXX_STANDARD}) # ============================================================================ # Enable PCH for the fiction CLI to speed up compilation of all command files. # The PCH includes stable, expensive headers like alice, fiction types, etc. -if (FICTION_ENABLE_PCH) - target_precompile_headers(fiction PRIVATE pch_cli.hpp) -endif () +if(FICTION_ENABLE_PCH) + target_precompile_headers(fiction PRIVATE pch_cli.hpp) +endif() # Compile-time decisions on which flows to compile # Logic synthesis flow -option(FICTION_LOGIC_SYNTHESIS_FLOW "Enable the logic synthesis flow for the fiction CLI" ON) -if (FICTION_LOGIC_SYNTHESIS_FLOW) - target_compile_definitions(fiction PRIVATE FICTION_LOGIC_SYNTHESIS_FLOW) -endif () +option(FICTION_LOGIC_SYNTHESIS_FLOW + "Enable the logic synthesis flow for the fiction CLI" ON) +if(FICTION_LOGIC_SYNTHESIS_FLOW) + target_compile_definitions(fiction PRIVATE FICTION_LOGIC_SYNTHESIS_FLOW) +endif() # Physical design flow -option(FICTION_PHYSICAL_DESIGN_FLOW "Enable the physical design flow for the fiction CLI" ON) -if (FICTION_PHYSICAL_DESIGN_FLOW) - target_compile_definitions(fiction PRIVATE FICTION_PHYSICAL_DESIGN_FLOW) -endif () +option(FICTION_PHYSICAL_DESIGN_FLOW + "Enable the physical design flow for the fiction CLI" ON) +if(FICTION_PHYSICAL_DESIGN_FLOW) + target_compile_definitions(fiction PRIVATE FICTION_PHYSICAL_DESIGN_FLOW) +endif() # Physical simulation flow -option(FICTION_SIMULATION_FLOW "Enable the physical simulation flow for the fiction CLI" ON) -if (FICTION_SIMULATION_FLOW) - target_compile_definitions(fiction PRIVATE FICTION_SIMULATION_FLOW) -endif () +option(FICTION_SIMULATION_FLOW + "Enable the physical simulation flow for the fiction CLI" ON) +if(FICTION_SIMULATION_FLOW) + target_compile_definitions(fiction PRIVATE FICTION_SIMULATION_FLOW) +endif() # If the logic synthesis flow is enabled, we can enable ABC as a callback -if (FICTION_LOGIC_SYNTHESIS_FLOW) - # Enable ABC - option(FICTION_ABC "Find, include, and utilize ABC. It needs to be installed manually." OFF) - if (FICTION_ABC) - message(STATUS "Usage of the Z3 solver was enabled. Make sure that it is installed on your system!") - # Option for a user-defined path to ABC - set(ABC_ROOT "" CACHE STRING "Path to the ABC directory") - - find_program( - ABC_BINARY abc - HINTS - ${ABC_ROOT} - $ENV{HOME}/.local/bin - /usr/local/bin - /usr/bin - /opt/abc/bin - ENV PATH - DOC "Path to the ABC executable" - ) - if (ABC_BINARY) - message(STATUS "Found ABC binary: ${ABC_BINARY}") - target_compile_definitions(fiction PRIVATE FICTION_ABC) - target_compile_definitions(fiction PRIVATE ABC_EXECUTABLE="${ABC_BINARY}") - else () - message(FATAL_ERROR "ABC not found. Please specify `ABC_ROOT` or ensure ABC is in your PATH.") - endif () - endif () -endif () +if(FICTION_LOGIC_SYNTHESIS_FLOW) + # Enable ABC + option(FICTION_ABC + "Find, include, and utilize ABC. It needs to be installed manually." + OFF) + if(FICTION_ABC) + message( + STATUS + "Usage of the Z3 solver was enabled. Make sure that it is installed on your system!" + ) + # Option for a user-defined path to ABC + set(ABC_ROOT + "" + CACHE STRING "Path to the ABC directory") + + find_program( + ABC_BINARY abc + HINTS ${ABC_ROOT} + $ENV{HOME}/.local/bin + /usr/local/bin + /usr/bin + /opt/abc/bin + ENV + PATH + DOC "Path to the ABC executable") + if(ABC_BINARY) + message(STATUS "Found ABC binary: ${ABC_BINARY}") + target_compile_definitions(fiction PRIVATE FICTION_ABC) + target_compile_definitions(fiction PRIVATE ABC_EXECUTABLE="${ABC_BINARY}") + else() + message( + FATAL_ERROR + "ABC not found. Please specify `ABC_ROOT` or ensure ABC is in your PATH." + ) + endif() + endif() +endif() # Strip the executable if we are in Release mode -if (CMAKE_BUILD_TYPE STREQUAL "Release") - if (CMAKE_STRIP) - add_custom_command( - TARGET fiction - POST_BUILD - COMMAND ${CMAKE_STRIP} $) - else () - message( - WARNING - "Strip command is not available. The executables will not be stripped.") - endif () -endif () +fiction_strip_target(fiction) + +# Installation +install(TARGETS fiction RUNTIME DESTINATION bin) diff --git a/cli/cmd/general/CMakeLists.txt b/cli/cmd/general/CMakeLists.txt new file mode 100644 index 0000000000..7dd3f074cd --- /dev/null +++ b/cli/cmd/general/CMakeLists.txt @@ -0,0 +1,12 @@ +target_sources(fiction PRIVATE src/clear.cpp src/version.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_general.hpp + include/clear.hpp + include/version.hpp) diff --git a/cli/cmd/general/src/version.cpp b/cli/cmd/general/src/version.cpp index 92e5edc63f..0442d35b02 100644 --- a/cli/cmd/general/src/version.cpp +++ b/cli/cmd/general/src/version.cpp @@ -4,7 +4,7 @@ #include "cmd/general/include/version.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include diff --git a/cli/cmd/io/CMakeLists.txt b/cli/cmd/io/CMakeLists.txt new file mode 100644 index 0000000000..14b8f720cc --- /dev/null +++ b/cli/cmd/io/CMakeLists.txt @@ -0,0 +1,31 @@ +target_sources( + fiction + PRIVATE src/blif.cpp + src/fgl.cpp + src/fqca.cpp + src/qca.cpp + src/qcc.cpp + src/qll.cpp + src/read.cpp + src/sqd.cpp + src/tt.cpp + src/verilog.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_io.hpp + include/blif.hpp + include/fgl.hpp + include/fqca.hpp + include/qca.hpp + include/qcc.hpp + include/qll.hpp + include/read.hpp + include/sqd.hpp + include/tt.hpp + include/verilog.hpp) diff --git a/cli/cmd/logic/CMakeLists.txt b/cli/cmd/logic/CMakeLists.txt new file mode 100644 index 0000000000..8c3d005902 --- /dev/null +++ b/cli/cmd/logic/CMakeLists.txt @@ -0,0 +1,31 @@ +target_sources( + fiction + PRIVATE src/abc.cpp + src/akers.cpp + src/balance.cpp + src/fanouts.cpp + src/gates.cpp + src/map.cpp + src/miginvopt.cpp + src/miginvprop.cpp + src/random.cpp + src/simulate.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_logic.hpp + include/abc.hpp + include/akers.hpp + include/balance.hpp + include/fanouts.hpp + include/gates.hpp + include/map.hpp + include/miginvopt.hpp + include/miginvprop.hpp + include/random.hpp + include/simulate.hpp) diff --git a/cli/cmd/logic/include/fanouts.hpp b/cli/cmd/logic/include/fanouts.hpp index 42c8164d3c..23b6288658 100644 --- a/cli/cmd/logic/include/fanouts.hpp +++ b/cli/cmd/logic/include/fanouts.hpp @@ -38,6 +38,10 @@ class fanouts_command final : public command * Random seed used for random fanout substitution */ uint32_t seed{0u}; + /** + * Strategy as integer for CLI11 compatibility + */ + int strategy_int{0}; /** * Parameters. */ diff --git a/cli/cmd/logic/src/fanouts.cpp b/cli/cmd/logic/src/fanouts.cpp index dcfee59bbd..27d36d0437 100644 --- a/cli/cmd/logic/src/fanouts.cpp +++ b/cli/cmd/logic/src/fanouts.cpp @@ -10,6 +10,7 @@ #include #include +#include #include @@ -23,7 +24,7 @@ fanouts_command::fanouts_command(const environment::ptr& e) : { add_option("--degree,-d", ps.degree, "Maximum number of outputs a fan-out node can have", true) ->set_type_name("{2, 3}"); - add_option("--strategy,-s", ps.strategy, + add_option("--strategy,-s", strategy_int, "Chain fan-outs in a balanced tree (breadth), a DFS tree (depth), or a random fashion", true) ->set_type_name("{breadth=0, depth=1, random=2}"); add_option("--threshold,-t", ps.threshold, @@ -45,18 +46,21 @@ void fanouts_command::execute() if (ps.degree < 2 || ps.degree > 3) { - env->out() << "[w] " << ps.degree << " outputs per fan-out are not supported\n"; + env->out() << fmt::format("[w] {} outputs per fan-out are not supported\n", ps.degree); ps = {}; return; } - if (ps.strategy > 2) + if (strategy_int < 0 || strategy_int > 2) { - env->out() << "[w] " << ps.strategy << " does not refer to a valid strategy\n"; + env->out() << fmt::format("[w] does not refer to a valid strategy\n", strategy_int); ps = {}; return; } + // Convert integer to enum + ps.strategy = static_cast(strategy_int); + if (is_set("seed")) { ps.seed = seed; diff --git a/cli/cmd/physical_design/CMakeLists.txt b/cli/cmd/physical_design/CMakeLists.txt new file mode 100644 index 0000000000..458ff63f3d --- /dev/null +++ b/cli/cmd/physical_design/CMakeLists.txt @@ -0,0 +1,17 @@ +target_sources(fiction PRIVATE src/exact.cpp src/gold.cpp src/hex.cpp + src/onepass.cpp src/optimize.cpp src/ortho.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_physical_design.hpp + include/exact.hpp + include/gold.hpp + include/hex.hpp + include/onepass.hpp + include/optimize.hpp + include/ortho.hpp) diff --git a/cli/cmd/simulation/CMakeLists.txt b/cli/cmd/simulation/CMakeLists.txt new file mode 100644 index 0000000000..f3233a6a89 --- /dev/null +++ b/cli/cmd/simulation/CMakeLists.txt @@ -0,0 +1,16 @@ +target_sources(fiction PRIVATE src/clustercomplete.cpp src/opdom.cpp + src/quickexact.cpp src/quicksim.cpp src/temp.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_simulation.hpp + include/clustercomplete.hpp + include/opdom.hpp + include/quickexact.hpp + include/quicksim.hpp + include/temp.hpp) diff --git a/cli/cmd/technology/CMakeLists.txt b/cli/cmd/technology/CMakeLists.txt new file mode 100644 index 0000000000..e5909d62be --- /dev/null +++ b/cli/cmd/technology/CMakeLists.txt @@ -0,0 +1,13 @@ +target_sources(fiction PRIVATE src/area.cpp src/cell.cpp src/energy.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_technology.hpp + include/area.hpp + include/cell.hpp + include/energy.hpp) diff --git a/cli/cmd/technology/cmd_technology.hpp b/cli/cmd/technology/cmd_technology.hpp index 19c324aeb0..c998ac563b 100644 --- a/cli/cmd/technology/cmd_technology.hpp +++ b/cli/cmd/technology/cmd_technology.hpp @@ -18,7 +18,7 @@ namespace alice constexpr inline auto FICTION_CLI_CATEGORY_TECHNOLOGY = "Technology"; -// technlogy commands +// technology commands ALICE_ADD_COMMAND(area, FICTION_CLI_CATEGORY_TECHNOLOGY) ALICE_ADD_COMMAND(cell, FICTION_CLI_CATEGORY_TECHNOLOGY) ALICE_ADD_COMMAND(energy, FICTION_CLI_CATEGORY_TECHNOLOGY) diff --git a/cli/cmd/verification/CMakeLists.txt b/cli/cmd/verification/CMakeLists.txt new file mode 100644 index 0000000000..c23042ba4f --- /dev/null +++ b/cli/cmd/verification/CMakeLists.txt @@ -0,0 +1,12 @@ +target_sources(fiction PRIVATE src/check.cpp src/equiv.cpp) + +target_sources( + fiction + PRIVATE FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/cli + FILES + cmd_verification.hpp + include/check.hpp + include/equiv.hpp) diff --git a/cmake/CheckSubmodules.cmake b/cmake/CheckSubmodules.cmake deleted file mode 100644 index 063b63246f..0000000000 --- a/cmake/CheckSubmodules.cmake +++ /dev/null @@ -1,10 +0,0 @@ -# check whether the submodule ``modulename`` is correctly cloned in the -# ``/libs`` directory. -macro(check_if_present modulename) - if(NOT EXISTS "${PROJECT_SOURCE_DIR}/libs/${modulename}/CMakeLists.txt") - message( - FATAL_ERROR - "Submodule `${PROJECT_SOURCE_DIR}/libs/${modulename}` not cloned properly. Please run `git submodule update --init --recursive` from the main project directory to fix this issue." - ) - endif() -endmacro() diff --git a/cmake/CompilerWarnings.cmake b/cmake/CompilerWarnings.cmake index 9cbb6d399b..ae25812978 100644 --- a/cmake/CompilerWarnings.cmake +++ b/cmake/CompilerWarnings.cmake @@ -2,17 +2,11 @@ # # https://github.com/lefticus/cppbestpractices/blob/master/02-Use_the_Tools_Available.md -function( - fiction_set_project_warnings - project_name - WARNINGS_AS_ERRORS - MSVC_WARNINGS - CLANG_WARNINGS - GCC_WARNINGS - CUDA_WARNINGS) +function(fiction_set_project_warnings project_name WARNINGS_AS_ERRORS + MSVC_WARNINGS CLANG_WARNINGS GCC_WARNINGS) if("${MSVC_WARNINGS}" STREQUAL "") set(MSVC_WARNINGS - /W4 # Baseline reasonable warnings + /W4 # Baseline reasonable warnings /w14242 # 'identifier': conversion from 'type1' to 'type2', possible # loss of data /w14254 # 'operator': conversion from 'type1:field_bits' to @@ -73,6 +67,8 @@ function( # explicit annotation -Wno-unknown-pragmas # do not warn if encountering unknown pragmas -Wno-pragmas # do not warn if encountering unknown pragma options + -Wno-system-headers # do not emit diagnostics originating from + # STL/system headers -Wno-gnu-zero-variadic-macro-arguments # do not warn if zero variadic # macro arguments are passed to a # GNU user-defined macro @@ -90,12 +86,10 @@ function( # were probably wanted -Wuseless-cast # warn if you perform a cast to the same type ) - endif() - - if("${CUDA_WARNINGS}" STREQUAL "") - set(CUDA_WARNINGS -Wall -Wextra -Wunused -Wconversion -Wshadow - # TODO add more Cuda warnings - ) + # Remove -Wnull-dereference from GCC warnings as it doesn't respect + # -Wno-system-headers and causes warnings in standard library headers (e.g., + # libstdc++) + list(REMOVE_ITEM GCC_WARNINGS -Wnull-dereference) endif() if(WARNINGS_AS_ERRORS) @@ -115,20 +109,15 @@ function( message( AUTHOR_WARNING "No compiler warnings set for CXX compiler: '${CMAKE_CXX_COMPILER_ID}'") - # TODO support Intel compiler endif() # use the same warning flags for C set(PROJECT_WARNINGS_C "${PROJECT_WARNINGS_CXX}") - set(PROJECT_WARNINGS_CUDA "${CUDA_WARNINGS}") - target_compile_options( ${project_name} INTERFACE # C++ warnings $<$:${PROJECT_WARNINGS_CXX}> # C warnings - $<$:${PROJECT_WARNINGS_C}> - # Cuda warnings - $<$:${PROJECT_WARNINGS_CUDA}>) + $<$:${PROJECT_WARNINGS_C}>) endfunction() diff --git a/cmake/Coverage.cmake b/cmake/Coverage.cmake index 7c7c5d1e34..dd7ea27374 100644 --- a/cmake/Coverage.cmake +++ b/cmake/Coverage.cmake @@ -1,7 +1,8 @@ function(fiction_enable_coverage project_name) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") - target_compile_options(${project_name} INTERFACE --coverage -O0 -g) + target_compile_options(${project_name} INTERFACE --coverage -fprofile-arcs + -ftest-coverage -O0 -g) target_link_libraries(${project_name} INTERFACE --coverage) endif() endfunction() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake new file mode 100644 index 0000000000..46e8bb0c41 --- /dev/null +++ b/cmake/Dependencies.cmake @@ -0,0 +1,83 @@ +include(FetchContent) + +# nlohmann_json +set(JSON_BuildTests + OFF + CACHE INTERNAL "") +FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.12.0) +FetchContent_MakeAvailable(nlohmann_json) + +# Catch2 +if(FICTION_TEST) + FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG v3.11.0) + FetchContent_MakeAvailable(Catch2) +endif() + +# pybind11 +FetchContent_Declare( + pybind11 + GIT_REPOSITORY https://github.com/pybind/pybind11.git + GIT_TAG v3.0.1) +# Suppress warnings about removed FindPython modules in newer CMake versions +if(POLICY CMP0148) + set(CMAKE_POLICY_DEFAULT_CMP0148 OLD) +endif() +set(PYBIND11_FINDPYTHON ON) +FetchContent_MakeAvailable(pybind11) + +# parallel-hashmap +FetchContent_Declare( + parallel-hashmap + GIT_REPOSITORY https://github.com/greg7mdp/parallel-hashmap.git + GIT_TAG v2.0.0) +FetchContent_MakeAvailable(parallel-hashmap) + +# tinyxml2 +set(tinyxml2_BUILD_TESTING OFF) +FetchContent_Declare( + tinyxml2 + GIT_REPOSITORY https://github.com/leethomason/tinyxml2.git + GIT_TAG 11.0.0) +FetchContent_MakeAvailable(tinyxml2) + +# alice +set(ALICE_EXAMPLES + OFF + CACHE BOOL "" FORCE) +set(ALICE_TEST + OFF + CACHE BOOL "" FORCE) +FetchContent_Declare( + alice + GIT_REPOSITORY https://github.com/marcelwa/alice.git + GIT_TAG master # Using master as per submodule +) +FetchContent_MakeAvailable(alice) + +# mockturtle +set(MOCKTURTLE_EXAMPLES + OFF + CACHE BOOL "" FORCE) +set(MOCKTURTLE_EXPERIMENTS + OFF + CACHE BOOL "" FORCE) +set(MOCKTURTLE_TEST + OFF + CACHE BOOL "" FORCE) +FetchContent_Declare( + mockturtle + GIT_REPOSITORY https://github.com/marcelwa/mockturtle.git + GIT_TAG mnt # Using mnt branch as per submodule +) +FetchContent_MakeAvailable(mockturtle) + +# ALGLIB +if(FICTION_ALGLIB) + include(${PROJECT_SOURCE_DIR}/cmake/FetchALGLIB.cmake) +endif() diff --git a/cmake/FetchALGLIB.cmake b/cmake/FetchALGLIB.cmake new file mode 100644 index 0000000000..0df4daad6b --- /dev/null +++ b/cmake/FetchALGLIB.cmake @@ -0,0 +1,115 @@ +# FetchALGLIB.cmake - Fetch ALGLIB library with GitHub mirror support +# +# Downloads ALGLIB from a reliable GitHub mirror (cda-tum/alglib-mirror) with +# fallback to the official ALGLIB site. The GitHub mirror provides fast, +# reliable downloads in CI environments where the official site frequently times +# out. +# +# Change ALGLIB_VERSION below to use a different version. All versions 3.9.0 +# through 4.07.0 are available on the mirror. + +include(FetchContent) + +# Set policy to allow FetchContent_Populate with declared content +if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) +endif() + +# ALGLIB version configuration +set(ALGLIB_VERSION "3.14.0") +set(ALGLIB_FILE "alglib-${ALGLIB_VERSION}.cpp.gpl.tgz") + +# Mirror URLs - GitHub mirror is tried first for reliability +set(ALGLIB_URLS + "https://github.com/cda-tum/alglib-mirror/releases/download/v${ALGLIB_VERSION}/${ALGLIB_FILE}" + "http://www.alglib.net/translator/re/${ALGLIB_FILE}") + +# Fetch alglib-cmake wrapper library. Note: We use manual population and +# extraction instead of FetchContent_MakeAvailable because alglib-cmake has a +# problematic waiting loop that causes CI timeouts. By extracting and preparing +# the files ourselves, we bypass this issue entirely. +FetchContent_Declare( + alglib-cmake + GIT_REPOSITORY https://github.com/wlambooy/alglib-cmake.git + GIT_TAG master) + +# Download ALGLIB tarball (without extraction) using FetchContent. FetchContent +# handles retries and multiple URLs automatically. +FetchContent_Declare( + alglib + URL ${ALGLIB_URLS} + DOWNLOAD_NO_EXTRACT TRUE) + +# Get the ALGLIB tarball +FetchContent_GetProperties(alglib) +if(NOT alglib_POPULATED) + FetchContent_Populate(alglib) +endif() + +# Locate the downloaded tarball +set(ALGLIB_TARBALL_PATH "${alglib_SOURCE_DIR}/${ALGLIB_FILE}") + +if(NOT EXISTS "${ALGLIB_TARBALL_PATH}") + message(FATAL_ERROR "ALGLIB tarball not found at: ${ALGLIB_TARBALL_PATH}") +endif() + +# Manually populate and prepare alglib-cmake +FetchContent_GetProperties(alglib-cmake) +if(NOT alglib-cmake_POPULATED) + FetchContent_Populate(alglib-cmake) + + # Copy the downloaded ALGLIB tarball to alglib-cmake's expected location + file(COPY "${ALGLIB_TARBALL_PATH}" DESTINATION "${alglib-cmake_SOURCE_DIR}") + + # Extract and reorganize the ALGLIB source files This must be done before + # alglib-cmake processes its CMakeLists.txt + set(ALGLIB_UNZIP_DIR "${alglib-cmake_SOURCE_DIR}/src") + set(ALGLIB_EXTRACTED_SRC_DIR "${ALGLIB_UNZIP_DIR}/cpp/src") + + if(NOT EXISTS "${ALGLIB_EXTRACTED_SRC_DIR}") + message(STATUS "Extracting and preparing ALGLIB ${ALGLIB_VERSION}...") + + file(REMOVE_RECURSE "${ALGLIB_UNZIP_DIR}") + file(MAKE_DIRECTORY "${ALGLIB_UNZIP_DIR}") + + # Extract tarball + execute_process( + COMMAND ${CMAKE_COMMAND} -E tar xfz + "${alglib-cmake_SOURCE_DIR}/${ALGLIB_FILE}" + WORKING_DIRECTORY "${ALGLIB_UNZIP_DIR}" + RESULT_VARIABLE extract_result) + + if(NOT extract_result EQUAL 0) + message(FATAL_ERROR "Failed to extract ALGLIB tarball") + endif() + + # Reorganize headers into separate directory (alglib-cmake expectation) + file(GLOB ALGLIB_HEADERS "${ALGLIB_EXTRACTED_SRC_DIR}/*.h") + file(GLOB ALGLIB_SOURCES "${ALGLIB_EXTRACTED_SRC_DIR}/*.cpp") + + set(HEADER_OUTPUT_DIR "${ALGLIB_EXTRACTED_SRC_DIR}/headers") + file(MAKE_DIRECTORY "${HEADER_OUTPUT_DIR}") + + foreach(header ${ALGLIB_HEADERS}) + get_filename_component(header_filename ${header} NAME) + file(RENAME ${header} "${HEADER_OUTPUT_DIR}/${header_filename}") + endforeach() + + # Update include paths in source files + foreach(source_file ${ALGLIB_SOURCES}) + file(READ "${source_file}" file_content) + string(REGEX + REPLACE "#include \"([^\"]+)\\.h\"" "#include \"headers/\\1.h\"" + modified_content "${file_content}") + file(WRITE "${source_file}" "${modified_content}") + endforeach() + + message(STATUS "ALGLIB extraction and preparation complete") + endif() + + # Create success marker file to prevent alglib-cmake's waiting loop + file(TOUCH "${alglib-cmake_SOURCE_DIR}/success") + + # Add alglib-cmake to the build + add_subdirectory(${alglib-cmake_SOURCE_DIR} ${alglib-cmake_BINARY_DIR}) +endif() diff --git a/cmake/FetchJemalloc.cmake b/cmake/FetchJemalloc.cmake index 9f50743ad2..459a3d65a3 100644 --- a/cmake/FetchJemalloc.cmake +++ b/cmake/FetchJemalloc.cmake @@ -1,39 +1,48 @@ -# FetchJemalloc.cmake | this file is intended to be run only on UNIX-like operating systems +# FetchJemalloc.cmake | this file is intended to be run only on UNIX-like +# operating systems -if (CMAKE_SYSTEM_NAME STREQUAL "Windows") - message(SEND_ERROR "Install jemalloc on Windows using the instructions found in https://github.com/jemalloc/jemalloc/blob/dev/INSTALL.md#building-for-windows.") -endif () +if(CMAKE_SYSTEM_NAME STREQUAL "Windows") + message( + SEND_ERROR + "Install jemalloc on Windows using the instructions found in https://github.com/jemalloc/jemalloc/blob/dev/INSTALL.md#building-for-windows." + ) +endif() include(ExternalProject) -set(JEMALLOC_TAG "5.3.0" CACHE STRING "Which tag/revision of jemalloc to use") -set(JEMALLOC_SOURCE_DIR "${CMAKE_SOURCE_DIR}/libs/jemalloc") -set(JEMALLOC_PREFIX_DIR "${CMAKE_BINARY_DIR}/libs/jemalloc") +set(JEMALLOC_TAG + "5.3.0" + CACHE STRING "Which tag/revision of jemalloc to use") +set(JEMALLOC_SOURCE_DIR "${CMAKE_BINARY_DIR}/_deps/jemalloc-src") +set(JEMALLOC_PREFIX_DIR "${CMAKE_BINARY_DIR}/_deps/jemalloc-build") set(JEMALLOC_INSTALL_DIR "${JEMALLOC_PREFIX_DIR}/install") # Decide parallel build args, if no jobserver is in MAKEFLAGS. -if ("$ENV{MAKEFLAGS}" MATCHES "jobserver") - set(PARALLEL_BUILD_ARGS "") -else () - set(PARALLEL_BUILD_ARGS "-j${CMAKE_BUILD_PARALLEL_LEVEL}") -endif () +if("$ENV{MAKEFLAGS}" MATCHES "jobserver") + set(PARALLEL_BUILD_ARGS "") +else() + set(PARALLEL_BUILD_ARGS "-j${CMAKE_BUILD_PARALLEL_LEVEL}") +endif() ExternalProject_Add( - jemalloc_project - PREFIX ${JEMALLOC_PREFIX_DIR} - URL "https://github.com/jemalloc/jemalloc/archive/refs/tags/${JEMALLOC_TAG}.tar.gz" - SOURCE_DIR ${JEMALLOC_SOURCE_DIR} - UPDATE_COMMAND "" - CONFIGURE_COMMAND bash -c "echo \"${JEMALLOC_TAG}-0-g$(git ls-remote --tags https://github.com/jemalloc/jemalloc.git | grep ${JEMALLOC_TAG} | tail -1 | cut -f1)\" > ${JEMALLOC_SOURCE_DIR}/VERSION" && cd ${JEMALLOC_SOURCE_DIR} && ./autogen.sh --prefix=${JEMALLOC_INSTALL_DIR} && rm VERSION - BUILD_COMMAND cd ${JEMALLOC_SOURCE_DIR} && make ${PARALLEL_BUILD_ARGS} - INSTALL_COMMAND cd ${JEMALLOC_SOURCE_DIR} && make -j1 install -) + jemalloc_project + PREFIX ${JEMALLOC_PREFIX_DIR} + URL "https://github.com/jemalloc/jemalloc/archive/refs/tags/${JEMALLOC_TAG}.tar.gz" + SOURCE_DIR ${JEMALLOC_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND + bash -c + "echo \"${JEMALLOC_TAG}-0-g$(git ls-remote --tags https://github.com/jemalloc/jemalloc.git | grep ${JEMALLOC_TAG} | tail -1 | cut -f1)\" > ${JEMALLOC_SOURCE_DIR}/VERSION" + && cd ${JEMALLOC_SOURCE_DIR} && ./autogen.sh + --prefix=${JEMALLOC_INSTALL_DIR} && rm VERSION + BUILD_COMMAND cd ${JEMALLOC_SOURCE_DIR} && make ${PARALLEL_BUILD_ARGS} + INSTALL_COMMAND cd ${JEMALLOC_SOURCE_DIR} && make -j1 install) # Create an imported target (works on all platforms). add_library(jemalloc STATIC IMPORTED) add_dependencies(jemalloc jemalloc_project) -set_target_properties(jemalloc PROPERTIES - IMPORTED_LOCATION "${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a" - INTERFACE_INCLUDE_DIRECTORIES "${JEMALLOC_INSTALL_DIR}/include" -) +set_target_properties( + jemalloc + PROPERTIES IMPORTED_LOCATION "${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a" + INTERFACE_INCLUDE_DIRECTORIES "${JEMALLOC_INSTALL_DIR}/include") diff --git a/cmake/FindZ3.cmake b/cmake/FindZ3.cmake index 75f2938299..ad5b6358ea 100644 --- a/cmake/FindZ3.cmake +++ b/cmake/FindZ3.cmake @@ -6,190 +6,203 @@ include(FindPackageHandleStandardArgs) # Function to check Z3's version function(check_z3_version z3_include z3_lib) - try_run( - Z3_RUN_RESULT Z3_COMPILE_RESULT ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/try_z3.cpp - CMAKE_FLAGS -DINCLUDE_DIRECTORIES:STRING=${z3_include} LINK_LIBRARIES ${z3_lib} - COMPILE_OUTPUT_VARIABLE COMPILE_OUTPUT RUN_OUTPUT_STDOUT_VARIABLE RUN_OUTPUT - RUN_OUTPUT_STDERR_VARIABLE RUN_OUTPUT_STDERR) - - if (NOT Z3_COMPILE_RESULT OR RUN_OUTPUT_STDERR) - if (NOT Z3_COMPILE_RESULT) - message(STATUS "Could not compile test program for Z3 version check. Compile output: " - ${COMPILE_OUTPUT}) - else () - message(STATUS "Could not run test program for Z3 version check. Run output: " ${RUN_OUTPUT} - " RUN_OUTPUT_STDERR: " ${RUN_OUTPUT_STDERR}) - endif () - if (z3_include AND EXISTS "${z3_include}/z3_version.h") - file(STRINGS "${z3_include}/z3_version.h" z3_version_str - REGEX "^#define[\t ]+Z3_MAJOR_VERSION[\t ]+.*") - string(REGEX REPLACE "^.*Z3_MAJOR_VERSION[\t ]+([0-9]*).*$" "\\1" Z3_MAJOR - "${z3_version_str}") - - file(STRINGS "${z3_include}/z3_version.h" z3_version_str - REGEX "^#define[\t ]+Z3_MINOR_VERSION[\t ]+.*") - string(REGEX REPLACE "^.*Z3_MINOR_VERSION[\t ]+([0-9]*).*$" "\\1" Z3_MINOR - "${z3_version_str}") - - file(STRINGS "${z3_include}/z3_version.h" z3_version_str - REGEX "^#define[\t ]+Z3_BUILD_NUMBER[\t ]+.*") - string(REGEX REPLACE "^.*Z3_BUILD_NUMBER[\t ]+([0-9]*).*$" "\\1" Z3_BUILD "${z3_version_str}") - - set(z3_version_string ${Z3_MAJOR}.${Z3_MINOR}.${Z3_BUILD}) - endif () - - if (NOT z3_version_string) - message(STATUS "Could not determine Z3 version from z3_version.h") - return() - endif () - else () - string(REGEX MATCH "(Z3 )?([0-9]+.[0-9]+.[0-9]+.[0-9]+)" Z3_VERSION_STRING ${RUN_OUTPUT}) - set(z3_version_string ${CMAKE_MATCH_2}) - endif () - - find_package_check_version(${z3_version_string} suitable_version RESULT_MESSAGE_VARIABLE reason) - - if (suitable_version) - set(FOUND_SUITABLE_VERSION - TRUE - PARENT_SCOPE) - set(Z3_VERSION_STRING - ${z3_version_string} - PARENT_SCOPE) - else () - message(STATUS "${reason}") - endif () + try_run( + Z3_RUN_RESULT Z3_COMPILE_RESULT ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/try_z3.cpp + CMAKE_FLAGS -DINCLUDE_DIRECTORIES:STRING=${z3_include} LINK_LIBRARIES + ${z3_lib} + COMPILE_OUTPUT_VARIABLE + COMPILE_OUTPUT RUN_OUTPUT_STDOUT_VARIABLE RUN_OUTPUT + RUN_OUTPUT_STDERR_VARIABLE RUN_OUTPUT_STDERR) + + if(NOT Z3_COMPILE_RESULT OR RUN_OUTPUT_STDERR) + if(NOT Z3_COMPILE_RESULT) + message( + STATUS + "Could not compile test program for Z3 version check. Compile output: " + ${COMPILE_OUTPUT}) + else() + message( + STATUS "Could not run test program for Z3 version check. Run output: " + ${RUN_OUTPUT} " RUN_OUTPUT_STDERR: " ${RUN_OUTPUT_STDERR}) + endif() + if(z3_include AND EXISTS "${z3_include}/z3_version.h") + file(STRINGS "${z3_include}/z3_version.h" z3_version_str + REGEX "^#define[\t ]+Z3_MAJOR_VERSION[\t ]+.*") + string(REGEX REPLACE "^.*Z3_MAJOR_VERSION[\t ]+([0-9]*).*$" "\\1" + Z3_MAJOR "${z3_version_str}") + + file(STRINGS "${z3_include}/z3_version.h" z3_version_str + REGEX "^#define[\t ]+Z3_MINOR_VERSION[\t ]+.*") + string(REGEX REPLACE "^.*Z3_MINOR_VERSION[\t ]+([0-9]*).*$" "\\1" + Z3_MINOR "${z3_version_str}") + + file(STRINGS "${z3_include}/z3_version.h" z3_version_str + REGEX "^#define[\t ]+Z3_BUILD_NUMBER[\t ]+.*") + string(REGEX REPLACE "^.*Z3_BUILD_NUMBER[\t ]+([0-9]*).*$" "\\1" Z3_BUILD + "${z3_version_str}") + + set(z3_version_string ${Z3_MAJOR}.${Z3_MINOR}.${Z3_BUILD}) + endif() + + if(NOT z3_version_string) + message(STATUS "Could not determine Z3 version from z3_version.h") + return() + endif() + else() + string(REGEX MATCH "(Z3 )?([0-9]+.[0-9]+.[0-9]+.[0-9]+)" Z3_VERSION_STRING + ${RUN_OUTPUT}) + set(z3_version_string ${CMAKE_MATCH_2}) + endif() + + find_package_check_version(${z3_version_string} suitable_version + RESULT_MESSAGE_VARIABLE reason) + + if(suitable_version) + set(FOUND_SUITABLE_VERSION + TRUE + PARENT_SCOPE) + set(Z3_VERSION_STRING + ${z3_version_string} + PARENT_SCOPE) + else() + message(STATUS "${reason}") + endif() endfunction(check_z3_version) set(Z3_ROOT - "" - CACHE PATH "Root of Z3 distribution.") -if (DEFINED ENV{Z3_ROOT}) - set(Z3_ROOT $ENV{Z3_ROOT}) - message(STATUS "Z3_ROOT from environment: ${Z3_ROOT}") -endif () + "" + CACHE PATH "Root of Z3 distribution.") +if(DEFINED ENV{Z3_ROOT}) + set(Z3_ROOT $ENV{Z3_ROOT}) + message(STATUS "Z3_ROOT from environment: ${Z3_ROOT}") +endif() # if Z3_ROOT is provided, check there first -if (NOT ${Z3_ROOT} STREQUAL "") - find_path( - Z3_CXX_INCLUDE_DIRS - NAMES z3.h z3++.h - NO_DEFAULT_PATH - PATHS ${Z3_ROOT}/include - PATH_SUFFIXES libz3 z3) - - find_library( - Z3_LIBRARIES - NAMES z3 libz3 - NO_DEFAULT_PATH - PATHS ${Z3_ROOT} - PATH_SUFFIXES lib bin) - - if (Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) - message(STATUS "Z3_ROOT provided and includes and libraries found.") - message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") - message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") - check_z3_version(${Z3_CXX_INCLUDE_DIRS} ${Z3_LIBRARIES}) - endif () -endif () +if(NOT ${Z3_ROOT} STREQUAL "") + find_path( + Z3_CXX_INCLUDE_DIRS + NAMES z3.h z3++.h + NO_DEFAULT_PATH + PATHS ${Z3_ROOT}/include + PATH_SUFFIXES libz3 z3) + + find_library( + Z3_LIBRARIES + NAMES z3 libz3 + NO_DEFAULT_PATH + PATHS ${Z3_ROOT} + PATH_SUFFIXES lib bin) + + if(Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) + message(STATUS "Z3_ROOT provided and includes and libraries found.") + message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") + message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") + check_z3_version(${Z3_CXX_INCLUDE_DIRS} ${Z3_LIBRARIES}) + endif() +endif() # see if a config file is available -if (NOT FOUND_SUITABLE_VERSION) - unset(Z3_CXX_INCLUDE_DIRS CACHE) - unset(Z3_LIBRARIES CACHE) - - find_package(Z3 CONFIG QUIET) - if (Z3_FOUND) - message(STATUS "Found Z3 includes and libraries from config file") - message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") - message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") - set(FOUND_SUITABLE_VERSION TRUE) - set(Z3_VERSION_STRING ${Z3_VERSION}) - endif () -endif () +if(NOT FOUND_SUITABLE_VERSION) + unset(Z3_CXX_INCLUDE_DIRS CACHE) + unset(Z3_LIBRARIES CACHE) + + find_package(Z3 CONFIG QUIET) + if(Z3_FOUND) + message(STATUS "Found Z3 includes and libraries from config file") + message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") + message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") + set(FOUND_SUITABLE_VERSION TRUE) + set(Z3_VERSION_STRING ${Z3_VERSION}) + endif() +endif() # if Z3 has not been found yet, look in the system paths -if (NOT FOUND_SUITABLE_VERSION) - unset(Z3_CXX_INCLUDE_DIRS CACHE) - unset(Z3_LIBRARIES CACHE) - - find_path( - Z3_CXX_INCLUDE_DIRS - NAMES z3.h z3++.h - PATH_SUFFIXES libz3 z3) - find_library( - Z3_LIBRARIES - NAMES z3 libz3 - PATH_SUFFIXES lib bin) - - if (Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) - message(STATUS "Found Z3 includes and libraries in system paths.") +if(NOT FOUND_SUITABLE_VERSION) + unset(Z3_CXX_INCLUDE_DIRS CACHE) + unset(Z3_LIBRARIES CACHE) + + find_path( + Z3_CXX_INCLUDE_DIRS + NAMES z3.h z3++.h + PATH_SUFFIXES libz3 z3) + find_library( + Z3_LIBRARIES + NAMES z3 libz3 + PATH_SUFFIXES lib bin) + + if(Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) + message(STATUS "Found Z3 includes and libraries in system paths.") + message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") + message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") + check_z3_version(${Z3_CXX_INCLUDE_DIRS} ${Z3_LIBRARIES}) + endif() +endif() + +# if it is still not found, try to find it with Python as a last resort +if(NOT FOUND_SUITABLE_VERSION) + unset(Z3_CXX_INCLUDE_DIRS CACHE) + unset(Z3_LIBRARIES CACHE) + + set(PYTHON_FIND_VIRTUALENV FIRST) + find_package(Python COMPONENTS Interpreter Development.Module) + if(Python_FOUND) + execute_process( + COMMAND ${Python_EXECUTABLE} -c + "import os, z3; print(os.path.dirname(z3.__file__))" + OUTPUT_VARIABLE Z3_PYTHON_ROOT) + string(STRIP "${Z3_PYTHON_ROOT}" Z3_PYTHON_ROOT) + message(STATUS "Z3_PYTHON_ROOT: ${Z3_PYTHON_ROOT}") + + if(Z3_PYTHON_ROOT) + find_path( + Z3_CXX_INCLUDE_DIRS + NAMES z3.h z3++.h + NO_DEFAULT_PATH + PATHS ${Z3_PYTHON_ROOT} + PATH_SUFFIXES libz3 z3 include) + + find_library( + Z3_LIBRARIES + NAMES z3 libz3 + NO_DEFAULT_PATH + PATHS ${Z3_PYTHON_ROOT} + PATH_SUFFIXES lib bin) + + if(Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) + message( + STATUS "Found Z3 includes and libraries from Python installation.") message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") check_z3_version(${Z3_CXX_INCLUDE_DIRS} ${Z3_LIBRARIES}) - endif () -endif () - -# if it is still not found, try to find it with Python as a last resort -if (NOT FOUND_SUITABLE_VERSION) - unset(Z3_CXX_INCLUDE_DIRS CACHE) - unset(Z3_LIBRARIES CACHE) - - set(PYTHON_FIND_VIRTUALENV FIRST) - find_package(Python COMPONENTS Interpreter Development.Module) - if (Python_FOUND) - execute_process( - COMMAND ${Python_EXECUTABLE} -c "import os, z3; print(os.path.dirname(z3.__file__))" - OUTPUT_VARIABLE Z3_PYTHON_ROOT) - string(STRIP "${Z3_PYTHON_ROOT}" Z3_PYTHON_ROOT) - message(STATUS "Z3_PYTHON_ROOT: ${Z3_PYTHON_ROOT}") - - if (Z3_PYTHON_ROOT) - find_path( - Z3_CXX_INCLUDE_DIRS - NAMES z3.h z3++.h - NO_DEFAULT_PATH - PATHS ${Z3_PYTHON_ROOT} - PATH_SUFFIXES libz3 z3 include) - - find_library( - Z3_LIBRARIES - NAMES z3 libz3 - NO_DEFAULT_PATH - PATHS ${Z3_PYTHON_ROOT} - PATH_SUFFIXES lib bin) - - if (Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) - message(STATUS "Found Z3 includes and libraries from Python installation.") - message(VERBOSE "Z3_CXX_INCLUDE_DIRS: ${Z3_CXX_INCLUDE_DIRS}") - message(VERBOSE "Z3_LIBRARIES: ${Z3_LIBRARIES}") - check_z3_version(${Z3_CXX_INCLUDE_DIRS} ${Z3_LIBRARIES}) - endif () - endif () - endif () -endif () - -if (NOT FOUND_SUITABLE_VERSION) - if (Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) - message(STATUS "Found include and library directories but could not find a suitable Z3 version") - endif () - set(Z3_VERSION_STRING "0.0.0") -endif () + endif() + endif() + endif() +endif() + +if(NOT FOUND_SUITABLE_VERSION) + if(Z3_CXX_INCLUDE_DIRS AND Z3_LIBRARIES) + message( + STATUS + "Found include and library directories but could not find a suitable Z3 version" + ) + endif() + set(Z3_VERSION_STRING "0.0.0") +endif() find_package_handle_standard_args( - Z3 - REQUIRED_VARS Z3_LIBRARIES Z3_CXX_INCLUDE_DIRS - VERSION_VAR Z3_VERSION_STRING) - -if (Z3_FOUND) - if (NOT TARGET z3::z3lib) - add_library(z3::z3lib INTERFACE IMPORTED GLOBAL) - target_include_directories(z3::z3lib INTERFACE ${Z3_CXX_INCLUDE_DIRS}) - target_link_libraries(z3::z3lib INTERFACE ${Z3_LIBRARIES}) - endif () - add_compile_definitions(Z3_FOUND) -endif () + Z3 + REQUIRED_VARS Z3_LIBRARIES Z3_CXX_INCLUDE_DIRS + VERSION_VAR Z3_VERSION_STRING) + +if(Z3_FOUND) + if(NOT TARGET z3::z3lib) + add_library(z3::z3lib INTERFACE IMPORTED GLOBAL) + target_include_directories(z3::z3lib INTERFACE ${Z3_CXX_INCLUDE_DIRS}) + target_link_libraries(z3::z3lib INTERFACE ${Z3_LIBRARIES}) + endif() + add_compile_definitions(Z3_FOUND) +endif() mark_as_advanced(Z3_CXX_INCLUDE_DIRS Z3_LIBRARIES Z3_VERSION_STRING) diff --git a/cmake/Findjemalloc.cmake b/cmake/Findjemalloc.cmake index a130528135..a5ab66dd73 100644 --- a/cmake/Findjemalloc.cmake +++ b/cmake/Findjemalloc.cmake @@ -1,8 +1,7 @@ -# From: https://raw.githubusercontent.com/STEllAR-GROUP/hpx/master/cmake/FindJemalloc.cmake -# Copyright (c) 2014 Thomas Heller -# Copyright (c) 2007-2012 Hartmut Kaiser -# Copyright (c) 2010-2011 Matt Anderson -# Copyright (c) 2011 Bryce Lelbach +# From: +# https://raw.githubusercontent.com/STEllAR-GROUP/hpx/master/cmake/FindJemalloc.cmake +# Copyright (c) 2014 Thomas Heller Copyright (c) 2007-2012 Hartmut Kaiser +# Copyright (c) 2010-2011 Matt Anderson Copyright (c) 2011 Bryce Lelbach # # Distributed under the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) @@ -10,44 +9,49 @@ find_package(PkgConfig) pkg_check_modules(PC_JEMALLOC QUIET libjemalloc) -find_path(JEMALLOC_INCLUDE_DIR jemalloc/jemalloc.h - HINTS - ${JEMALLOC_ROOT} ENV JEMALLOC_ROOT +find_path( + JEMALLOC_INCLUDE_DIR jemalloc/jemalloc.h + HINTS ${JEMALLOC_ROOT} + ENV + JEMALLOC_ROOT ${PC_JEMALLOC_MINIMAL_INCLUDEDIR} ${PC_JEMALLOC_MINIMAL_INCLUDE_DIRS} ${PC_JEMALLOC_INCLUDEDIR} ${PC_JEMALLOC_INCLUDE_DIRS} - PATH_SUFFIXES include) - -find_library(JEMALLOC_LIBRARY NAMES jemalloc libjemalloc - HINTS - ${JEMALLOC_ROOT} ENV JEMALLOC_ROOT + PATH_SUFFIXES include) + +find_library( + JEMALLOC_LIBRARY + NAMES jemalloc libjemalloc + HINTS ${JEMALLOC_ROOT} + ENV + JEMALLOC_ROOT ${PC_JEMALLOC_MINIMAL_LIBDIR} ${PC_JEMALLOC_MINIMAL_LIBRARY_DIRS} ${PC_JEMALLOC_LIBDIR} ${PC_JEMALLOC_LIBRARY_DIRS} - PATH_SUFFIXES lib lib64) + PATH_SUFFIXES lib lib64) -if (JEMALLOC_INCLUDE_DIR) - set(_version_regex "^#define[ \t]+JEMALLOC_VERSION[ \t]+\"([^\"]+)\".*") - file(STRINGS "${JEMALLOC_INCLUDE_DIR}/jemalloc/jemalloc.h" - JEMALLOC_VERSION REGEX "${_version_regex}") - string(REGEX REPLACE "${_version_regex}" "\\1" - JEMALLOC_VERSION "${JEMALLOC_VERSION}") - unset(_version_regex) -endif () +if(JEMALLOC_INCLUDE_DIR) + set(_version_regex "^#define[ \t]+JEMALLOC_VERSION[ \t]+\"([^\"]+)\".*") + file(STRINGS "${JEMALLOC_INCLUDE_DIR}/jemalloc/jemalloc.h" JEMALLOC_VERSION + REGEX "${_version_regex}") + string(REGEX REPLACE "${_version_regex}" "\\1" JEMALLOC_VERSION + "${JEMALLOC_VERSION}") + unset(_version_regex) +endif() include(FindPackageHandleStandardArgs) -# handle the QUIETLY and REQUIRED arguments and set JEMALLOC_FOUND to TRUE -# if all listed variables are TRUE and the requested version matches. -find_package_handle_standard_args(jemalloc REQUIRED_VARS - JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR - VERSION_VAR JEMALLOC_VERSION) - - -if (JEMALLOC_FOUND) - set(JEMALLOC_LIBRARIES ${JEMALLOC_LIBRARY}) - set(JEMALLOC_INCLUDE_DIRS ${JEMALLOC_INCLUDE_DIR}) -endif () +# handle the QUIETLY and REQUIRED arguments and set JEMALLOC_FOUND to TRUE if +# all listed variables are TRUE and the requested version matches. +find_package_handle_standard_args( + jemalloc + REQUIRED_VARS JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR + VERSION_VAR JEMALLOC_VERSION) + +if(JEMALLOC_FOUND) + set(JEMALLOC_LIBRARIES ${JEMALLOC_LIBRARY}) + set(JEMALLOC_INCLUDE_DIRS ${JEMALLOC_INCLUDE_DIR}) +endif() mark_as_advanced(JEMALLOC_INCLUDE_DIR JEMALLOC_LIBRARY) diff --git a/cmake/ProjectOptions.cmake b/cmake/ProjectOptions.cmake index 7ad9d12e3d..926ccc5b7d 100644 --- a/cmake/ProjectOptions.cmake +++ b/cmake/ProjectOptions.cmake @@ -31,7 +31,8 @@ macro(fiction_setup_options) option(FICTION_ENABLE_UNITY_BUILD "Enable unity builds" OFF) option(FICTION_ENABLE_PCH "Enable precompiled headers" OFF) option(FICTION_ENABLE_CACHE "Enable ccache" ON) - option(FICTION_LIGHTWEIGHT_DEBUG_BUILDS "Reduce memory consumption of Debug builds" OFF) + option(FICTION_LIGHTWEIGHT_DEBUG_BUILDS + "Reduce memory consumption of Debug builds" OFF) if(NOT PROJECT_IS_TOP_LEVEL) mark_as_advanced( @@ -70,7 +71,15 @@ macro(fiction_local_options) include(cmake/CompilerWarnings.cmake) fiction_set_project_warnings(fiction_warnings ${FICTION_WARNINGS_AS_ERRORS} - "" "" "" "") + "" "" "") + + target_compile_options( + fiction_options + INTERFACE + $<$,$,$>: + -fvisibility=hidden + -fvisibility-inlines-hidden + >) include(cmake/Sanitizers.cmake) fiction_enable_sanitizers( @@ -99,15 +108,20 @@ macro(fiction_local_options) endif() endif() - # This applies a memory optimization for Debug builds which may be used to conform to memory limitations - if (FICTION_LIGHTWEIGHT_DEBUG_BUILDS) - if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7 /Ob0") - elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g1 -fno-inline") - else () - message(WARNING "Lightweight Debug builds are not supported for this compiler (${CMAKE_CXX_COMPILER_ID}).") - endif () - endif () + # This applies a memory optimization for Debug builds which may be used to + # conform to memory limitations + if(FICTION_LIGHTWEIGHT_DEBUG_BUILDS) + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7 /Ob0") + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID + MATCHES ".*Clang") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g1 -fno-inline") + else() + message( + WARNING + "Lightweight Debug builds are not supported for this compiler (${CMAKE_CXX_COMPILER_ID})." + ) + endif() + endif() endmacro() diff --git a/cmake/SystemLink.cmake b/cmake/SystemLink.cmake index 3a90a0772c..41923ce514 100644 --- a/cmake/SystemLink.cmake +++ b/cmake/SystemLink.cmake @@ -28,15 +28,24 @@ endfunction() # Include the directories of a library target as system directories (which # suppresses their warnings). function(target_include_system_library target scope lib) + # Check if lib contains a generator expression for BUILD_INTERFACE + string(REGEX MATCH "\\$" match "${lib}") + if(CMAKE_MATCH_1) + set(check_target ${CMAKE_MATCH_1}) + else() + set(check_target ${lib}) + endif() + # check if this is a target - if(TARGET ${lib}) - get_target_property(lib_include_dirs ${lib} INTERFACE_INCLUDE_DIRECTORIES) + if(TARGET ${check_target}) + get_target_property(lib_include_dirs ${check_target} + INTERFACE_INCLUDE_DIRECTORIES) if(lib_include_dirs) target_include_system_directories(${target} ${scope} ${lib_include_dirs}) else() message( TRACE - "${lib} library does not have the INTERFACE_INCLUDE_DIRECTORIES property." + "${check_target} library does not have the INTERFACE_INCLUDE_DIRECTORIES property." ) endif() endif() diff --git a/cmake/Utilities.cmake b/cmake/Utilities.cmake index 9fc325c0e8..30f9b683fa 100644 --- a/cmake/Utilities.cmake +++ b/cmake/Utilities.cmake @@ -99,3 +99,19 @@ function(is_verbose var) PARENT_SCOPE) endif() endfunction() + +function(fiction_strip_target target_name) + if(CMAKE_BUILD_TYPE STREQUAL "Release") + if(CMAKE_STRIP) + add_custom_command( + TARGET ${target_name} + POST_BUILD + COMMAND ${CMAKE_STRIP} $) + else() + message( + WARNING + "Strip command is not available. The executables will not be stripped." + ) + endif() + endif() +endfunction() diff --git a/cmake/fictionConfig.cmake.in b/cmake/fictionConfig.cmake.in new file mode 100644 index 0000000000..ec9f8cb77d --- /dev/null +++ b/cmake/fictionConfig.cmake.in @@ -0,0 +1,30 @@ +@PACKAGE_INIT@ + +include("${CMAKE_CURRENT_LIST_DIR}/fictionTargets.cmake") + +# Helper macro to define imported interface library +macro(fiction_define_imported_target target_name include_subdir) + if(NOT TARGET ${target_name}) + add_library(${target_name} INTERFACE IMPORTED) + set_target_properties( + ${target_name} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES + "${PACKAGE_PREFIX_DIR}/include/${include_subdir}") + endif() +endmacro() + +# Define dependencies +fiction_define_imported_target(fiction::alice "") +fiction_define_imported_target(fiction::mockturtle "") +fiction_define_imported_target(fiction::nlohmann_json "") +fiction_define_imported_target(fiction::graph-coloring "graph-coloring") +fiction_define_imported_target(fiction::undirected_graph "undirected_graph") +fiction_define_imported_target(fiction::combinations "combinations") +fiction_define_imported_target(fiction::tinyxml2 "") + +# ALGLIB (Optional) +if(EXISTS "${PACKAGE_PREFIX_DIR}/include/alglib") + fiction_define_imported_target(fiction::alglib "alglib") +endif() + +check_required_components(fiction) diff --git a/docs/changelog.rst b/docs/changelog.rst index eddb1debbb..cfe61f9a8a 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -12,13 +12,31 @@ Added ##### - Documentation: - Added ``AGENTS.md`` to guide AI agents in the repository - +- Build system: + - Added `CMakePresets.json` to offer default CMake configurations + - Added support for CMake version 4+ +- Tooling: + - Added the following `pre-commit` hooks: + - `check-vcs-permalinks` + - `check-symlinks` + - `check-json` + - `cmake-format-precommit` + - `uv-pre-commit` Changed ####### - Build system: - Restructured the CLI command implementation to improve code organization, modularity, and compilation speed + - Refactored the entire CMake build system to use ``FetchContent`` for dependency management instead of git submodules + - Moved vendored libraries from ``libs/`` to ``vendors/`` + - Improved build configuration and option handling for better modularity + - Addressed several CMake and compiler warnings (including CMP0148 and Pybind11 compatibility) as well as CMake code smells + - Streamlined package installation and discovery process# +Fixed +##### +- Code quality: + - Addressed several ``clang-tidy`` warnings throughout the code base v0.6.12 - 2025-10-29 -------------------- @@ -229,7 +247,7 @@ Added - I/O: - SVG drawer for SiDB layouts - Experiments: - - Ship the SiQAD and Bestagon gate libraries als SQD files + - Ship the SiQAD and Bestagon gate libraries as SQD files - Documentation: - Added wiring reduction paper to publication list - Added Willem Lambooy to the authors list diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 947d532573..01d819b73d 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -242,7 +242,7 @@ It has some further Python dependencies that can be installed via ``pip3``: .. code-block:: console - (venv) $ pip install -r libs/mugen/requirements.txt + (venv) $ pip install -r vendors/mugen/requirements.txt The Python integration is experimental and may cause issues on some systems. It is currently not available on Windows and some macOS versions due to issues with ``python-sat``. Mugen requires at least Python 3.7! diff --git a/experiments/CMakeLists.txt b/experiments/CMakeLists.txt index d787a793d2..1beaecd260 100644 --- a/experiments/CMakeLists.txt +++ b/experiments/CMakeLists.txt @@ -2,9 +2,8 @@ add_library(fiction_experiments INTERFACE) target_include_directories( - fiction_experiments - INTERFACE ${CMAKE_CURRENT_SOURCE_DIR} - ${PROJECT_SOURCE_DIR}/libs/mockturtle/experiments/) + fiction_experiments INTERFACE ${CMAKE_CURRENT_SOURCE_DIR} + ${mockturtle_SOURCE_DIR}/experiments/) target_link_libraries(fiction_experiments INTERFACE libfiction) if(ENABLE_MATPLOTLIB) target_link_libraries(fiction_experiments INTERFACE matplot) @@ -30,6 +29,7 @@ target_compile_definitions( fiction_experiments INTERFACE "EXPERIMENTS_PATH=\"${CMAKE_CURRENT_SOURCE_DIR}/\"") +# Recursing is fine here to reduce config overhead file(GLOB_RECURSE FILENAMES *.cpp) foreach(filename ${FILENAMES}) @@ -38,17 +38,5 @@ foreach(filename ${FILENAMES}) target_link_libraries(${expname} PUBLIC fiction_experiments) # Strip the executable if we are in Release mode - if(CMAKE_BUILD_TYPE STREQUAL "Release") - if(CMAKE_STRIP) - add_custom_command( - TARGET ${expname} - POST_BUILD - COMMAND ${CMAKE_STRIP} $) - else() - message( - WARNING - "Strip command is not available. The executables will not be stripped." - ) - endif() - endif() + fiction_strip_target(${expname}) endforeach() diff --git a/experiments/defect_aware_physical_design/generate_defective_surface.py b/experiments/defect_aware_physical_design/generate_defective_surface.py index ee6e74c874..ee9e337896 100644 --- a/experiments/defect_aware_physical_design/generate_defective_surface.py +++ b/experiments/defect_aware_physical_design/generate_defective_surface.py @@ -39,7 +39,7 @@ def __init__(self, surface_width: int = 100, surface_height: int = 100) -> None: # add_defects function allows you to choose total coverage of defects (fully defected surface is coverage = 1.) # self.defect_params is array used for configuration of ratio and size of defects on the surface - # each entry is formated as [array_value,width,height,ratio] + # each entry is formatted as [array_value,width,height,ratio] # array_value is number assigned to each lattice point in the self.surface_lattice # width and height are how many HSi atoms are used (note this different than surface_height in init) # ratio is given as fractional percent (0.05 = 5%) @@ -114,7 +114,7 @@ def add_defects(self, coverage: float = 0.05) -> None: i = i - 1 def draw_panels(self) -> None: # DB_panels,DB_pattern_extended, pattern): - # draws the DB_pattern_extended with rectangles to show each pannel + # draws the DB_pattern_extended with rectangles to show each panel width_nm = self.a1 * self.surface_width height_nm = self.a2 * self.surface_height diff --git a/experiments/figure_of_merit_analysis/fom_analysis_2_input_1_output.cpp b/experiments/figure_of_merit_analysis/fom_analysis_2_input_1_output.cpp index 5132f0e4e0..4e2c43d33e 100644 --- a/experiments/figure_of_merit_analysis/fom_analysis_2_input_1_output.cpp +++ b/experiments/figure_of_merit_analysis/fom_analysis_2_input_1_output.cpp @@ -137,9 +137,9 @@ int main() // NOLINT std::vector bbr_all = {}; std::vector all_gates{}; - design_sidb_gates_stats efficent_stats{}; + design_sidb_gates_stats efficient_stats{}; - all_gates = design_sidb_gates(skeleton, truth_table, design_params, &efficent_stats); + all_gates = design_sidb_gates(skeleton, truth_table, design_params, &efficient_stats); if (all_gates.empty()) { diff --git a/experiments/sidb_simulation/electrostatic_ground_state/runtime_analysis_bestagon_gates.cpp b/experiments/sidb_simulation/electrostatic_ground_state/runtime_analysis_bestagon_gates.cpp index 366328f649..109793a842 100644 --- a/experiments/sidb_simulation/electrostatic_ground_state/runtime_analysis_bestagon_gates.cpp +++ b/experiments/sidb_simulation/electrostatic_ground_state/runtime_analysis_bestagon_gates.cpp @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -63,7 +62,7 @@ int main() // NOLINT double total_runtime_exhaustive = 0.0; double total_runtime_quickexact = 0.0; double average_accuracy_quicksim = 0.0; - double total_single_rumtime_quicksim = 0.0; + double total_single_runtime_quicksim = 0.0; double total_tts_quicksim = 0.0; std::size_t total_number_of_instances = 0; @@ -117,7 +116,7 @@ int main() // NOLINT total_runtime_exhaustive += runtime_exhaustive; total_runtime_quickexact += runtime_quickexact; average_accuracy_quicksim += quicksim_accuracy_mean; - total_single_rumtime_quicksim += quicksim_single_runtime; + total_single_runtime_quicksim += quicksim_single_runtime; total_tts_quicksim += tts_quicksim; simulation_exp(gate, instances, runtime_exhaustive, runtime_quickexact, quicksim_accuracy_mean, @@ -127,7 +126,7 @@ int main() // NOLINT } simulation_exp("Total", total_number_of_instances, total_runtime_exhaustive, total_runtime_quickexact, - average_accuracy_quicksim / gates.size(), total_single_rumtime_quicksim, total_tts_quicksim); + average_accuracy_quicksim / gates.size(), total_single_runtime_quicksim, total_tts_quicksim); simulation_exp.save(); simulation_exp.table(); diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt index 920a9840d8..8c30d33ffb 100644 --- a/include/CMakeLists.txt +++ b/include/CMakeLists.txt @@ -1,7 +1,12 @@ add_library(libfiction INTERFACE) +add_library(fiction::libfiction ALIAS libfiction) -target_include_directories(libfiction INTERFACE ${PROJECT_SOURCE_DIR}/include - ${PROJECT_BINARY_DIR}/include/) +target_include_directories( + libfiction + INTERFACE $ + $ + $ + $) target_compile_features(libfiction INTERFACE cxx_std_${CMAKE_CXX_STANDARD}) @@ -15,39 +20,77 @@ set_target_properties( VISIBILITY_INLINES_HIDDEN YES) # Set UTF-8 encoding for MSVC -if (MSVC) - target_compile_options(libfiction INTERFACE /utf-8) - add_definitions(-DUNICODE -D_UNICODE) -endif () +if(MSVC) + target_compile_options(libfiction INTERFACE /utf-8 /Zm10) + add_definitions(-DUNICODE -D_UNICODE) +else() + # enable some more optimizations in release mode + target_compile_options( + libfiction INTERFACE $<$:-fno-math-errno -fno-trapping-math + -fno-stack-protector>) + + # enable some more options for better debugging + target_compile_options( + libfiction INTERFACE $<$:-fno-omit-frame-pointer + -fno-optimize-sibling-calls -fno-inline-functions>) +endif() + +# add a compile definition for _LIBCPP_REMOVE_TRANSITIVE_INCLUDES to remove +# transitive includes from libc++ headers. This is useful to avoid including +# system headers that are not needed and that may conflict with other headers. +# This is only supported by libc++. +if(CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") + target_compile_definitions(libfiction + INTERFACE _LIBCPP_REMOVE_TRANSITIVE_INCLUDES) +endif() # Add configuration file -configure_file(${PROJECT_SOURCE_DIR}/include/fiction/utils/version_info.hpp.in - utils/version_info.hpp) +configure_file( + ${PROJECT_SOURCE_DIR}/include/fiction/utils/version_info.hpp.in + ${PROJECT_BINARY_DIR}/generated/include/fiction/utils/version_info.hpp) # Collect top-level public headers under include/ -file(GLOB_RECURSE FICTION_PUBLIC_HEADERS CONFIGURE_DEPENDS - RELATIVE ${PROJECT_SOURCE_DIR}/include - ${PROJECT_SOURCE_DIR}/include/*.hpp) +file( + GLOB_RECURSE FICTION_PUBLIC_HEADERS CONFIGURE_DEPENDS + RELATIVE ${PROJECT_SOURCE_DIR}/include + ${PROJECT_SOURCE_DIR}/include/*.hpp) # Register header file set for IDE integration and installation metadata # Multiple BASE_DIRS so CMake knows how to layout the installed tree # -# Using a FILE_SET ensures proper exposure in CMake package exports later -# if export() logic is added. +# Using a FILE_SET ensures proper exposure in CMake package exports later if +# export() logic is added. # # (GLOB with CONFIGURE_DEPENDS keeps IDE view in sync when adding headers.) -target_sources(libfiction +target_sources( + libfiction INTERFACE - FILE_SET HEADERS - BASE_DIRS - ${PROJECT_SOURCE_DIR}/include - ${PROJECT_BINARY_DIR}/include - FILES - ${FICTION_PUBLIC_HEADERS} - ${PROJECT_BINARY_DIR}/include/utils/version_info.hpp -) + FILE_SET + HEADERS + BASE_DIRS + ${PROJECT_SOURCE_DIR}/include + ${PROJECT_BINARY_DIR}/include + ${PROJECT_BINARY_DIR}/generated/include + FILES + ${FICTION_PUBLIC_HEADERS} + ${PROJECT_BINARY_DIR}/generated/include/fiction/utils/version_info.hpp) # Ensure header verification is enabled for this target set_property(TARGET libfiction PROPERTY VERIFY_INTERFACE_HEADER_SETS ON) # Enforce project-wide C++ standard feature requirement (redundant but explicit) target_compile_features(libfiction INTERFACE cxx_std_${CMAKE_CXX_STANDARD}) + +# Installation +install( + TARGETS libfiction + EXPORT fictionTargets + FILE_SET HEADERS) + +# Install internal targets to satisfy export set requirements +install(TARGETS fiction_options fiction_warnings EXPORT fictionTargets) + +install( + EXPORT fictionTargets + FILE fictionTargets.cmake + NAMESPACE fiction:: + DESTINATION lib/cmake/fiction) diff --git a/include/fiction/algorithms/graph/mincross.hpp b/include/fiction/algorithms/graph/mincross.hpp index e6e0efb2cc..f954869d8b 100644 --- a/include/fiction/algorithms/graph/mincross.hpp +++ b/include/fiction/algorithms/graph/mincross.hpp @@ -208,7 +208,7 @@ class mincross_impl if (pass % 2 == 0) { // Upward pass: from rank 1 to max_rank - for (auto r = 1; r <= max_rank; ++r) + for (uint32_t r = 1; r <= max_rank; ++r) { if (r == 0 && ps.fixed_pis) { @@ -333,9 +333,9 @@ class mincross_impl * Reorders the nodes in a given rank according to computed medians. * * @param r The rank index. - * @param reverse If `true`, sorts in descending order of medians. + * @param order Sorting order of medians. */ - void reorder(const uint32_t r, median_sorting order) + void reorder(const uint32_t r, const median_sorting order) { // Get the nodes at rank r auto rank = fanout_ntk.get_ranks(r); @@ -367,9 +367,9 @@ class mincross_impl /** * Performs pairwise transpositions within ranks to further reduce crossings. * - * @param reverse If `true`, applies reversed heuristic for tie-breaking. + * @param order Sorting heuristic for tie-breaking. */ - void transpose(median_sorting order) + void transpose(const median_sorting order) { std::vector candidate(fanout_ntk.depth() + 1, 1); uint32_t delta = 0; @@ -407,10 +407,10 @@ class mincross_impl * Performs a single transposition pass for rank `r`. * * @param r Rank index. - * @param reverse If `true`, applies reversed heuristic for tie-breaking. + * @param order Sorting heuristic for tie-breaking. * @return The number of crossings reduced. */ - uint32_t transpose_step(const uint32_t r, median_sorting order) + uint32_t transpose_step(const uint32_t r, const median_sorting order) { auto rank = fanout_ntk.get_ranks(r); diff --git a/include/fiction/algorithms/network_transformation/fanout_substitution.hpp b/include/fiction/algorithms/network_transformation/fanout_substitution.hpp index 5c0f4318e2..d9b2ede371 100644 --- a/include/fiction/algorithms/network_transformation/fanout_substitution.hpp +++ b/include/fiction/algorithms/network_transformation/fanout_substitution.hpp @@ -8,7 +8,6 @@ #include "fiction/algorithms/network_transformation/network_conversion.hpp" #include "fiction/traits.hpp" -#include #include #include #include @@ -16,8 +15,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -39,7 +37,7 @@ struct fanout_substitution_params /** * Breadth-first vs. depth-first fanout-tree substitution strategies. */ - enum substitution_strategy + enum class substitution_strategy : uint8_t { /** * Breadth-first substitution. Creates balanced fanout trees. @@ -58,7 +56,7 @@ struct fanout_substitution_params /** * Substitution strategy of high-degree fanout networks (depth-first vs. breadth-first). */ - substitution_strategy strategy = BREADTH; + substitution_strategy strategy = substitution_strategy::BREADTH; /** * Maximum output degree of each fan-out node. */ @@ -109,7 +107,7 @@ class fanout_substitution_impl available_fanouts{ntk_topo}, ps{p} { - if (ps.strategy == fanout_substitution_params::RANDOM) + if (ps.strategy == fanout_substitution_params::substitution_strategy::RANDOM) { rng.emplace(ps.seed.value_or(std::random_device{}())); } @@ -259,15 +257,15 @@ class fanout_substitution_impl mockturtle::signal get_fanout(const NtkDest& substituted, const mockturtle::node& n, mockturtle::signal& child) { - if (substituted.fanout_size(child) >= ps.threshold) + if (substituted.fanout_size(substituted.get_node(child)) >= ps.threshold) { if (auto fanouts = available_fanouts[n]; !fanouts.empty()) { // find non-overfull fanout node - do + while (true) { child = fanouts.front(); - if (substituted.fanout_size(child) >= ps.degree) + if (substituted.fanout_size(substituted.get_node(child)) >= ps.degree) { fanouts.pop(); } @@ -275,7 +273,7 @@ class fanout_substitution_impl { break; } - } while (true); + } } } @@ -341,7 +339,7 @@ class fanout_substitution_impl auto& dist = rng->dist; // maintain a vector of available fanout nodes and randomly select one std::vector> available_vec{child}; - dist.param(typename std::uniform_int_distribution::param_type(0, available_vec.size() - 1)); + dist.param(std::uniform_int_distribution::param_type(0, available_vec.size() - 1)); for (auto f = 0u; f < num_fanouts; ++f) { @@ -363,8 +361,7 @@ class fanout_substitution_impl if (!available_vec.empty()) { - dist.param( - typename std::uniform_int_distribution::param_type(0, available_vec.size() - 1)); + dist.param(std::uniform_int_distribution::param_type(0, available_vec.size() - 1)); } } // transfer the available nodes to a queue for later use in get_fanout diff --git a/include/fiction/algorithms/path_finding/a_star.hpp b/include/fiction/algorithms/path_finding/a_star.hpp index ffb6b43514..d896ef5191 100644 --- a/include/fiction/algorithms/path_finding/a_star.hpp +++ b/include/fiction/algorithms/path_finding/a_star.hpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ class a_star_impl assert(layout.is_within_bounds(objective.source) && layout.is_within_bounds(objective.target) && "Both source and target coordinate have to be within the layout bounds"); - do + while (!open_list.empty()) // until the open list is empty { // get coordinate with lowest f-value const auto current = get_lowest_f_coord(); @@ -81,8 +82,7 @@ class a_star_impl // expand from current coordinate expand(current); - - } while (!open_list.empty()); // until the open list is empty + } return {}; // open list is empty, no path has been found } @@ -411,8 +411,8 @@ template ::infinity()` if that value is supported by `Dist`, or `std::numeric_limits::max()`, - * otherwise. + * `std::numeric_limits::infinity()` for floating-point types or `std::numeric_limits::max()` for integral + * types. * * @tparam Lyt Coordinate layout type. * @tparam Dist Distance type. @@ -432,12 +432,14 @@ template if (path_length == 0ul) { - if constexpr (std::numeric_limits::has_infinity) + if constexpr (std::is_floating_point_v) { return std::numeric_limits::infinity(); } - - return std::numeric_limits::max(); + else + { + return std::numeric_limits::max(); + } } return static_cast(path_length - 1); diff --git a/include/fiction/algorithms/physical_design/design_sidb_gates.hpp b/include/fiction/algorithms/physical_design/design_sidb_gates.hpp index e946edbec9..7413200b4e 100644 --- a/include/fiction/algorithms/physical_design/design_sidb_gates.hpp +++ b/include/fiction/algorithms/physical_design/design_sidb_gates.hpp @@ -194,7 +194,7 @@ class design_sidb_gates_impl } /** - * Design gates by using the *Automatic Exhaustive Gate Desginer*. This algorithm was proposed in \"Minimal + * Design gates by using the *Automatic Exhaustive Gate Designer*. This algorithm was proposed in \"Minimal * Design of SiDB Gates: An Optimal Basis for Circuits Based on Silicon Dangling Bonds\" by J. Drewniok, M. Walter, * and R. Wille in NANOARCH 2023 (https://dl.acm.org/doi/10.1145/3611315.3633241). * @@ -237,7 +237,7 @@ class design_sidb_gates_impl status == operational_status::OPERATIONAL) { { - const std::lock_guard lock_vector{mutex_to_protect_designed_gate_layouts}; + const std::scoped_lock lock_vector{mutex_to_protect_designed_gate_layouts}; designed_gate_layouts.push_back(layout_with_added_cells); } @@ -350,7 +350,7 @@ class design_sidb_gates_impl input_bdl_wires, output_bdl_wires); status == operational_status::OPERATIONAL) { - const std::lock_guard lock{mutex_to_protect_designed_gate_layouts}; + const std::scoped_lock lock{mutex_to_protect_designed_gate_layouts}; if constexpr (has_get_sidb_defect_v) { @@ -455,7 +455,7 @@ class design_sidb_gates_impl { // Lock and update shared resources { - const std::lock_guard lock{mutex_to_protect_gate_designs}; + const std::scoped_lock lock{mutex_to_protect_gate_designs}; gate_layouts.push_back(candidate); } gate_design_found = true; // Notify all threads that a solution has been found @@ -635,7 +635,7 @@ class design_sidb_gates_impl } } - const std::lock_guard lock{mutex_to_protect_gate_candidates}; + const std::scoped_lock lock{mutex_to_protect_gate_candidates}; gate_candidate.push_back(current_layout); }; diff --git a/include/fiction/algorithms/simulation/sidb/defect_clearance.hpp b/include/fiction/algorithms/simulation/sidb/defect_clearance.hpp index 8274d6522a..1181c8fc5d 100644 --- a/include/fiction/algorithms/simulation/sidb/defect_clearance.hpp +++ b/include/fiction/algorithms/simulation/sidb/defect_clearance.hpp @@ -46,11 +46,11 @@ template [[nodiscard]] defect_clearance> calculate_defect_clearance(const Lyt& lyt, const defect_influence_domain& defect_inf_domain) noexcept { - double max_distance = 0; - cell max_distance_postion = {}; + double max_distance = 0; + cell max_distance_position = {}; defect_inf_domain.for_each( - [&lyt, &max_distance, &max_distance_postion](const auto& defect_pos, const auto& val) + [&lyt, &max_distance, &max_distance_position](const auto& defect_pos, const auto& val) { if (std::get<0>(val) == defect_influence_status::NON_INFLUENTIAL) { @@ -72,12 +72,12 @@ calculate_defect_clearance(const Lyt& lyt, const defect_influence_domain& d if (min_distance > max_distance) { - max_distance = min_distance; - max_distance_postion = min_distance_position; + max_distance = min_distance; + max_distance_position = min_distance_position; } }); - return defect_clearance>{max_distance_postion, max_distance}; + return defect_clearance>{max_distance_position, max_distance}; } } // namespace fiction diff --git a/include/fiction/algorithms/simulation/sidb/is_operational.hpp b/include/fiction/algorithms/simulation/sidb/is_operational.hpp index 96c87934d7..31159025f7 100644 --- a/include/fiction/algorithms/simulation/sidb/is_operational.hpp +++ b/include/fiction/algorithms/simulation/sidb/is_operational.hpp @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -637,12 +638,12 @@ class is_operational_impl charge_distribution_mode::UPDATE_CHARGE_DISTRIBUTION); } - if (min_energy < std::numeric_limits::infinity()) + if (std::isinf(min_energy)) { - return min_energy; + return std::nullopt; } - return std::nullopt; + return min_energy; } /** diff --git a/include/fiction/algorithms/simulation/sidb/minimum_energy.hpp b/include/fiction/algorithms/simulation/sidb/minimum_energy.hpp index 1fd6432362..34e0c77c91 100644 --- a/include/fiction/algorithms/simulation/sidb/minimum_energy.hpp +++ b/include/fiction/algorithms/simulation/sidb/minimum_energy.hpp @@ -16,12 +16,13 @@ namespace fiction /** * Computes the minimum energy of a range of `charge_distribution_surface` objects. If the range is empty, infinity is - * returned. + * returned to indicate no valid energy value exists. * * @tparam InputIt Must meet the requirements of `LegacyInputIterator`. - * @param first Begin of the range to examime. + * @param first Begin of the range to examine. * @param last End of the range to examine. - * @return Value of the minimum energy found in the input range (unit: eV), or infinity if the range is empty. + * @return Value of the minimum energy found in the input range (unit: eV), or `std::numeric_limits::infinity()` + * if the range is empty. */ template [[nodiscard]] double minimum_energy(const InputIt first, const InputIt last) noexcept @@ -43,7 +44,7 @@ template * `charge_distribution_surface` objects. If the range is empty, `last` is returned. * * @tparam InputIt Must meet the requirements of `LegacyInputIterator`. - * @param first Begin of the range to examime. + * @param first Begin of the range to examine. * @param last End of the range to examine. * @return Iterator to the minimum energy charge distribution found in the input range, or `last` if the range is empty. */ diff --git a/include/fiction/algorithms/simulation/sidb/operational_domain.hpp b/include/fiction/algorithms/simulation/sidb/operational_domain.hpp index 822312d827..0f7a41a7d5 100644 --- a/include/fiction/algorithms/simulation/sidb/operational_domain.hpp +++ b/include/fiction/algorithms/simulation/sidb/operational_domain.hpp @@ -280,7 +280,7 @@ class critical_temperature_domain : public sidb_simulation_domain::max(); + double min_ct = std::numeric_limits::infinity(); this->for_each( [&min_ct](const auto&, const auto& op_value) diff --git a/include/fiction/algorithms/simulation/sidb/random_sidb_layout_generator.hpp b/include/fiction/algorithms/simulation/sidb/random_sidb_layout_generator.hpp index 21c00cc0ce..26041e1781 100644 --- a/include/fiction/algorithms/simulation/sidb/random_sidb_layout_generator.hpp +++ b/include/fiction/algorithms/simulation/sidb/random_sidb_layout_generator.hpp @@ -38,7 +38,7 @@ struct generate_random_sidb_layout_params */ ALLOWED, /** - * Positive charges are not allowed to occur (i.e. SiDBs need to be seperated by a few lattice points). + * Positive charges are not allowed to occur (i.e. SiDBs need to be separated by a few lattice points). */ FORBIDDEN, /** @@ -132,16 +132,16 @@ generate_random_sidb_layout(const generate_random_sidb_layout_params) { - random_cell_is_identical_wih_defect = (lyt.get_sidb_defect(random_coord).type != sidb_defect_type::NONE); + random_cell_is_identical_with_defect = (lyt.get_sidb_defect(random_coord).type != sidb_defect_type::NONE); } // if the constraints that no positive SiDBs occur and the cell is not yet occupied by a defect are satisfied, // the SiDB is added to the layout - if (!random_cell_is_identical_wih_defect && !next_to_neutral_defect) + if (!random_cell_is_identical_with_defect && !next_to_neutral_defect) { if (skeleton.has_value()) { diff --git a/include/fiction/algorithms/simulation/sidb/time_to_solution.hpp b/include/fiction/algorithms/simulation/sidb/time_to_solution.hpp index 1daecb66f4..a7591f7848 100644 --- a/include/fiction/algorithms/simulation/sidb/time_to_solution.hpp +++ b/include/fiction/algorithms/simulation/sidb/time_to_solution.hpp @@ -106,7 +106,7 @@ void time_to_solution(const Lyt& lyt, const quicksim_params& quicksim_params, if (lyt.num_cells() == 0) { st.single_runtime_exact = 0.0; - st.time_to_solution = std::numeric_limits::max(); + st.time_to_solution = std::numeric_limits::infinity(); st.acc = 0.0; st.mean_single_runtime = 0.0; st.algorithm = sidb_simulation_engine_name(tts_params.engine); @@ -218,7 +218,7 @@ void time_to_solution_for_given_simulation_results(const sidb_simulation_result< } else if (acc == 0) { - tts = std::numeric_limits::max(); + tts = std::numeric_limits::infinity(); } else { diff --git a/include/fiction/io/dot_drawers.hpp b/include/fiction/io/dot_drawers.hpp index a14759b4c5..499bf914f0 100644 --- a/include/fiction/io/dot_drawers.hpp +++ b/include/fiction/io/dot_drawers.hpp @@ -7,9 +7,7 @@ #include "fiction/traits.hpp" #include "fiction/utils/network_utils.hpp" -#include "utils/version_info.hpp" - -#include +#include "fiction/utils/version_info.hpp" #include #include @@ -19,7 +17,10 @@ #include #include -#include +#include +#include +#include +#include #include #include #include @@ -1127,7 +1128,7 @@ void write_dot_layout(const Lyt& lyt, std::ostream& os, const Drawer& drawer = { template void write_dot_layout(const Lyt& lyt, const std::string_view& filename, const Drawer& drawer = {}) { - std::ofstream os{filename.data(), std::ofstream::out}; + std::ofstream os{std::string{filename}, std::ofstream::out}; if (!os.is_open()) { diff --git a/include/fiction/io/write_fgl_layout.hpp b/include/fiction/io/write_fgl_layout.hpp index 2e253f3639..bda1347a7d 100644 --- a/include/fiction/io/write_fgl_layout.hpp +++ b/include/fiction/io/write_fgl_layout.hpp @@ -7,7 +7,7 @@ #include "fiction/traits.hpp" #include "fiction/utils/stl_utils.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include diff --git a/include/fiction/io/write_fqca_layout.hpp b/include/fiction/io/write_fqca_layout.hpp index b8aab4429d..79d9f670e9 100644 --- a/include/fiction/io/write_fqca_layout.hpp +++ b/include/fiction/io/write_fqca_layout.hpp @@ -7,7 +7,7 @@ #include "fiction/technology/cell_technologies.hpp" #include "fiction/traits.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include @@ -322,7 +322,7 @@ void write_fqca_layout(const Lyt& lyt, std::ostream& os, write_fqca_layout_param template void write_fqca_layout(const Lyt& lyt, const std::string_view& filename, write_fqca_layout_params ps = {}) { - std::ofstream os{filename.data(), std::ofstream::out}; + std::ofstream os{std::string{filename}, std::ofstream::out}; if (!os.is_open()) { diff --git a/include/fiction/io/write_qca_layout.hpp b/include/fiction/io/write_qca_layout.hpp index 618d0b8c05..1da9ddff7f 100644 --- a/include/fiction/io/write_qca_layout.hpp +++ b/include/fiction/io/write_qca_layout.hpp @@ -7,10 +7,12 @@ #include "fiction/technology/cell_technologies.hpp" #include "fiction/traits.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include +#include +#include #include #include #include @@ -358,13 +360,13 @@ class write_qca_layout_impl { for (int j2 = 1; j2 > -2; j2 -= 2) { - int j = i == 1 ? -j2 : j2; + const int j = (i == 1) ? -j2 : j2; // open dot os << qcad::OPEN_CELL_DOT; - os << qcad::X_POS << std::to_string(pos.x + (qcad::CELL_SIZE / 4.0f) * static_cast(i)) << '\n'; - os << qcad::Y_POS << std::to_string(pos.y + (qcad::CELL_SIZE / 4.0f) * static_cast(j)) << '\n'; + os << qcad::X_POS << std::to_string(pos.x + ((qcad::CELL_SIZE / 4.0f) * static_cast(i))) << '\n'; + os << qcad::Y_POS << std::to_string(pos.y + ((qcad::CELL_SIZE / 4.0f) * static_cast(j))) << '\n'; os << qcad::DIAMETER << qcad::DOT_SIZE << '\n'; // determine charge @@ -410,10 +412,17 @@ class write_qca_layout_impl const auto cell_type = lyt.get_cell_type(c); // override cell_name if cell is constant; if cell has a name - if (auto cell_name = qca_technology::is_const_0_cell(cell_type) ? "-1.00" : - qca_technology::is_const_1_cell(cell_type) ? "1.00" : - lyt.get_cell_name(c); - !cell_name.empty()) + auto cell_name = lyt.get_cell_name(c); + if (qca_technology::is_const_0_cell(cell_type)) + { + cell_name = "-1.00"; + } + else if (qca_technology::is_const_1_cell(cell_type)) + { + cell_name = "1.00"; + } + + if (!cell_name.empty()) { // open label os << qcad::OPEN_QCAD_LABEL; @@ -427,7 +436,7 @@ class write_qca_layout_impl os << qcad::BOUNDING_BOX_X << std::to_string(pos.x - qcad::BB_X_OFFSET) << '\n'; os << qcad::BOUNDING_BOX_Y << std::to_string(pos.y - qcad::BB_Y_OFFSET) << '\n'; os << qcad::BOUNDING_BOX_CX - << std::to_string(static_cast(cell_name.size()) * qcad::CHARACTER_WIDTH + qcad::BB_CX_OFFSET) + << std::to_string((static_cast(cell_name.size()) * qcad::CHARACTER_WIDTH) + qcad::BB_CX_OFFSET) << '\n'; os << qcad::BOUNDING_BOX_CY << qcad::BB_CY_OFFSET << '\n'; @@ -450,8 +459,8 @@ class write_qca_layout_impl // calculate cell position const qcad::cell_pos pos{ - static_cast(c.x * static_cast(qcad::CELL_DISTANCE) + qcad::X_Y_OFFSET), - static_cast(c.y * static_cast(qcad::CELL_DISTANCE) + qcad::X_Y_OFFSET)}; + static_cast((c.x * static_cast(qcad::CELL_DISTANCE)) + qcad::X_Y_OFFSET), + static_cast((c.y * static_cast(qcad::CELL_DISTANCE)) + qcad::X_Y_OFFSET)}; // write cell position os << qcad::X_POS << std::to_string(pos.x) << '\n'; @@ -462,8 +471,8 @@ class write_qca_layout_impl const auto color = write_cell_colors(c); // write cell bounding box - os << qcad::BOUNDING_BOX_X << std::to_string(pos.x - qcad::CELL_SIZE / 2.0f) << '\n'; - os << qcad::BOUNDING_BOX_Y << std::to_string(pos.y - qcad::CELL_SIZE / 2.0f) << '\n'; + os << qcad::BOUNDING_BOX_X << std::to_string(pos.x - (qcad::CELL_SIZE / 2.0f)) << '\n'; + os << qcad::BOUNDING_BOX_Y << std::to_string(pos.y - (qcad::CELL_SIZE / 2.0f)) << '\n'; os << qcad::BOUNDING_BOX_CX << qcad::CELL_SIZE << '\n'; os << qcad::BOUNDING_BOX_CY << qcad::CELL_SIZE << '\n'; @@ -554,7 +563,7 @@ void write_qca_layout(const Lyt& lyt, std::ostream& os, write_qca_layout_params template void write_qca_layout(const Lyt& lyt, const std::string_view& filename, write_qca_layout_params ps = {}) { - std::ofstream os{filename.data(), std::ofstream::out}; + std::ofstream os{std::string{filename}, std::ofstream::out}; if (!os.is_open()) { diff --git a/include/fiction/io/write_qcc_layout.hpp b/include/fiction/io/write_qcc_layout.hpp index 3051499bad..5b4f4b7ec3 100644 --- a/include/fiction/io/write_qcc_layout.hpp +++ b/include/fiction/io/write_qcc_layout.hpp @@ -10,16 +10,19 @@ #include "fiction/technology/magcad_magnet_count.hpp" #include "fiction/traits.hpp" #include "fiction/types.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include #include +#include #include #include #include +#include #include +#include #include #include #include @@ -102,7 +105,7 @@ class write_qcc_layout_impl sorted_po_list{sorted_pos()}, num_magnets{magcad_magnet_count(lyt)}, os{s}, - ps{std::move(p)} + ps{p} {} void run() @@ -376,7 +379,7 @@ void write_qcc_layout(const Lyt& lyt, std::ostream& os, write_qcc_layout_params template void write_qcc_layout(const Lyt& lyt, const std::string_view& filename, write_qcc_layout_params ps = {}) { - std::ofstream os{filename.data(), std::ofstream::out}; + std::ofstream os{std::string{filename}, std::ofstream::out}; if (!os.is_open()) { diff --git a/include/fiction/io/write_qll_layout.hpp b/include/fiction/io/write_qll_layout.hpp index c7fe8f45d4..1cd4bcd06e 100644 --- a/include/fiction/io/write_qll_layout.hpp +++ b/include/fiction/io/write_qll_layout.hpp @@ -7,18 +7,16 @@ #include "fiction/layouts/bounding_box.hpp" #include "fiction/technology/cell_technologies.hpp" -#include "fiction/technology/magcad_magnet_count.hpp" #include "fiction/traits.hpp" -#include "fiction/types.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include #include -#include +#include #include -#include +#include #include #include #include @@ -144,7 +142,21 @@ class write_qll_layout_impl uint64_t cell_id{1}; - const char* tech_name{has_inml_technology_v ? "iNML" : has_qca_technology_v ? "MolFCN" : "?"}; + const char* tech_name = []() + { + if constexpr (has_inml_technology_v) + { + return "iNML"; + } + else if constexpr (has_qca_technology_v) + { + return "MolFCN"; + } + else + { + return "?"; + } + }(); [[nodiscard]] std::vector> sorted_pis() const noexcept { @@ -359,7 +371,8 @@ class write_qll_layout_impl // write via cell if (qca_technology::is_vertical_cell_mode(mode) && c.z != lyt.z()) { - os << fmt::format(qll::OPEN_MQCA_LAYOUT_ITEM, 0, cell_id++, bb_x(c), bb_y(c), c.z * 2 + 1); + os << fmt::format(qll::OPEN_MQCA_LAYOUT_ITEM, 0, cell_id++, bb_x(c), bb_y(c), + (c.z * 2) + 1); os << fmt::format(qll::LAYOUT_ITEM_PROPERTY, qll::PROPERTY_PHASE, lyt.get_clock_number(c)); os << qll::CLOSE_LAYOUT_ITEM; } @@ -414,7 +427,7 @@ void write_qll_layout(const Lyt& lyt, std::ostream& os) template void write_qll_layout(const Lyt& lyt, const std::string_view& filename) { - std::ofstream os{filename.data(), std::ofstream::out}; + std::ofstream os{std::string{filename}, std::ofstream::out}; if (!os.is_open()) { diff --git a/include/fiction/io/write_sqd_layout.hpp b/include/fiction/io/write_sqd_layout.hpp index 3e12fb9760..5e72bf7a8e 100644 --- a/include/fiction/io/write_sqd_layout.hpp +++ b/include/fiction/io/write_sqd_layout.hpp @@ -9,7 +9,7 @@ #include "fiction/technology/sidb_defects.hpp" #include "fiction/traits.hpp" #include "fiction/utils/stl_utils.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include diff --git a/include/fiction/io/write_sqd_sim_result.hpp b/include/fiction/io/write_sqd_sim_result.hpp index 6be7c0330b..820f6189ca 100644 --- a/include/fiction/io/write_sqd_sim_result.hpp +++ b/include/fiction/io/write_sqd_sim_result.hpp @@ -11,7 +11,7 @@ #include "fiction/technology/sidb_nm_position.hpp" #include "fiction/traits.hpp" #include "fiction/utils/stl_utils.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include diff --git a/include/fiction/io/write_svg_layout.hpp b/include/fiction/io/write_svg_layout.hpp index 7ba243823a..48e4ae9443 100644 --- a/include/fiction/io/write_svg_layout.hpp +++ b/include/fiction/io/write_svg_layout.hpp @@ -9,7 +9,7 @@ #include "fiction/layouts/coordinates.hpp" #include "fiction/technology/sidb_charge_state.hpp" #include "fiction/traits.hpp" -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include diff --git a/include/fiction/networks/views/bfs_topo_view.hpp b/include/fiction/networks/views/bfs_topo_view.hpp index 5b2d78e3d5..63d4740c55 100644 --- a/include/fiction/networks/views/bfs_topo_view.hpp +++ b/include/fiction/networks/views/bfs_topo_view.hpp @@ -112,7 +112,8 @@ class bfs_topo_view : public mockturtle::immutable_view */ uint32_t node_to_index(const node& n) const { - return std::distance(std::cbegin(topo_order), std::find(std::cbegin(topo_order), std::cend(topo_order), n)); + return static_cast( + std::distance(std::cbegin(topo_order), std::find(std::cbegin(topo_order), std::cend(topo_order), n))); } /** diff --git a/include/fiction/networks/views/mutable_rank_view.hpp b/include/fiction/networks/views/mutable_rank_view.hpp index 0449b0dedd..b4b0e94704 100644 --- a/include/fiction/networks/views/mutable_rank_view.hpp +++ b/include/fiction/networks/views/mutable_rank_view.hpp @@ -126,7 +126,7 @@ class mutable_rank_view : public fiction::static_depth_view * @param ntk Reference to the network. * @param ranks A vector of vectors specifying initial ranks for the nodes within the network. */ - explicit mutable_rank_view(const Ntk& ntk, const std::vector>& ranks) : + explicit mutable_rank_view(const Ntk& ntk, const std::vector>& init_ranks_vec) : fiction::static_depth_view{ntk}, ranks{this->depth() + 1}, max_rank_width{0} @@ -140,7 +140,7 @@ class mutable_rank_view : public fiction::static_depth_view rank_pos.rehash(this->size()); - init_ranks(ranks); + init_ranks(init_ranks_vec); } /** diff --git a/include/fiction/networks/views/static_depth_view.hpp b/include/fiction/networks/views/static_depth_view.hpp index 38d3a839d2..0473156ec8 100644 --- a/include/fiction/networks/views/static_depth_view.hpp +++ b/include/fiction/networks/views/static_depth_view.hpp @@ -12,6 +12,7 @@ #include #include +#include #include @@ -68,10 +69,7 @@ template class static_depth_view : public Ntk { public: - explicit static_depth_view(Ntk const& ntk, depth_view_params const& ps = {}) : Ntk(ntk) - { - (void)ps; - } + explicit static_depth_view(Ntk const& ntk, [[maybe_unused]] depth_view_params const& params = {}) : Ntk(ntk) {} }; /** @@ -94,13 +92,13 @@ class static_depth_view : public Ntk * Initializes an empty `fiction::static_depth_view` object, sets up base class properties, * and ensures that the network type (Ntk) satisfies required interface methods. * - * @param cost_fn Optional cost function to compute node costs. - * @param ps Optional parameters for depth view construction + * @param node_cost_fn Optional cost function to compute node costs. + * @param params Optional parameters for depth view construction */ - explicit static_depth_view(NodeCostFn const& cost_fn = {}, depth_view_params const& ps = {}) : + explicit static_depth_view(NodeCostFn const& node_cost_fn = {}, depth_view_params const& params = {}) : Ntk(), - ps(ps), - cost_fn(cost_fn) + ps(params), + cost_fn(node_cost_fn) { static_assert(mockturtle::is_network_type_v, "Ntk is not a network type"); static_assert(mockturtle::has_size_v, "Ntk does not implement the size method"); @@ -121,13 +119,14 @@ class static_depth_view : public Ntk * interface methods. * * @param ntk The network on which to construct the depth view. - * @param cost_fn Optional function to compute node costs. - * @param ps Optional parameters for depth view construction. + * @param node_cost_fn Optional function to compute node costs. + * @param params Optional parameters for depth view construction. */ - explicit static_depth_view(Ntk const& ntk, NodeCostFn const& cost_fn = {}, depth_view_params const& ps = {}) : + explicit static_depth_view(Ntk const& ntk, NodeCostFn const& node_cost_fn = {}, + depth_view_params const& params = {}) : Ntk(ntk), - ps(ps), - cost_fn(cost_fn) + ps(params), + cost_fn(node_cost_fn) { static_assert(mockturtle::is_network_type_v, "Ntk is not a network type"); static_assert(mockturtle::has_size_v, "Ntk does not implement the size method"); @@ -148,7 +147,7 @@ class static_depth_view : public Ntk * * @param other The other `fiction::static_depth_view` object to be copied. */ - static_depth_view(static_depth_view const& other) : + static_depth_view(static_depth_view const& other) : Ntk(other), ps(other.ps), levels(other.levels), @@ -164,7 +163,7 @@ class static_depth_view : public Ntk * @param other The source `fiction::static_depth_view` object whose contents are being copied. * @return A reference to the current object, enabling chain assignments. */ - static_depth_view& operator=(static_depth_view const& other) + static_depth_view& operator=(static_depth_view const& other) { // Check for self-assignment if (this == &other) @@ -193,6 +192,57 @@ class static_depth_view : public Ntk return *this; } + /** + * Move constructor creates a new `fiction::static_depth_view` by moving the content of another + * `fiction::static_depth_view`. + * + * @param other The other `fiction::static_depth_view` object to be moved. + */ + static_depth_view(static_depth_view&& other) noexcept : + Ntk(std::move(other)), + ps(other.ps), // trivially copyable + levels(std::move(other.levels)), + crit_path(std::move(other.crit_path)), + ntk_depth(other.ntk_depth), + cost_fn(std::move(other.cost_fn)) + {} + + /** + * Move assignment operator for moving `fiction::static_depth_view` content of another + * `fiction::static_depth_view` object. + * + * @param other The source `fiction::static_depth_view` object whose contents are being moved. + * @return A reference to the current object, enabling chain assignments. + */ + static_depth_view& operator=(static_depth_view&& other) noexcept + { + // Check for self-assignment + if (this == &other) + { + return *this; + } + + // move the base class + this->_storage = std::move(other._storage); + this->_events = std::move(other._events); + + // move the virtual storage + if constexpr (has_is_virtual_pi_v) + { + this->v_storage = std::move(other.v_storage); + } + + // move + ps = other.ps; // trivially copyable + levels = std::move(other.levels); + crit_path = std::move(other.crit_path); + ntk_depth = other.ntk_depth; + cost_fn = std::move(other.cost_fn); + + // Return the current object + return *this; + } + /** * Destructor for `fiction::static_depth_view`. */ @@ -233,7 +283,7 @@ class static_depth_view : public Ntk /** * Set the depth of the network. */ - void set_depth(uint32_t level) + void set_depth(const uint32_t level) { ntk_depth = level; } @@ -268,7 +318,7 @@ class static_depth_view : public Ntk auto clevel = levels[f]; if (ps.count_complements && this->is_complemented(f)) { - clevel++; + ++clevel; } level = std::max(level, clevel); }); @@ -312,7 +362,7 @@ class static_depth_view : public Ntk auto clevel = compute_levels(this->get_node(f)); if (ps.count_complements && this->is_complemented(f)) { - clevel++; + ++clevel; } level = std::max(level, clevel); }); @@ -332,7 +382,7 @@ class static_depth_view : public Ntk auto clevel = compute_levels(this->get_node(f)); if (ps.count_complements && this->is_complemented(f)) { - clevel++; + ++clevel; } ntk_depth = std::max(ntk_depth, clevel); }); @@ -345,7 +395,7 @@ class static_depth_view : public Ntk auto clevel = compute_levels(this->get_node(f)); if (ps.count_complements && this->is_complemented(f)) { - clevel++; + ++clevel; } ntk_depth = std::max(ntk_depth, clevel); }); @@ -391,7 +441,7 @@ class static_depth_view : public Ntk auto offset = cost_fn(*this, n); if (ps.count_complements && this->is_complemented(f)) { - offset++; + ++offset; } if (levels[cn] + offset == lvl && !crit_path[cn]) { diff --git a/include/fiction/technology/charge_distribution_surface.hpp b/include/fiction/technology/charge_distribution_surface.hpp index 50606a5453..626c74364d 100644 --- a/include/fiction/technology/charge_distribution_surface.hpp +++ b/include/fiction/technology/charge_distribution_surface.hpp @@ -390,7 +390,7 @@ class charge_distribution_surface : public Lyt * * @param cds Other `charge_distribution_surface`. */ - charge_distribution_surface(const charge_distribution_surface& cds) : + charge_distribution_surface(const charge_distribution_surface& cds) : // NOLINT(*-explicit-constructor) Lyt(cds), strg{std::make_shared(*cds.strg)} {} @@ -408,6 +408,22 @@ class charge_distribution_surface : public Lyt return *this; } + /** + * Move constructor. + * + * @param other charge_distribution_surface to move from. + */ + charge_distribution_surface(charge_distribution_surface&& other) noexcept = default; + /** + * Move assignment operator. + * + * @param other charge_distribution_surface to move from. + */ + charge_distribution_surface& operator=(charge_distribution_surface&& other) noexcept = default; + /** + * Destructor. + */ + ~charge_distribution_surface() = default; /** * Clones the current charge distribution surface and returns a deep copy. * @@ -1210,7 +1226,7 @@ class charge_distribution_surface : public Lyt collect += strg->local_int_pot_at_defect[c] * static_cast(defect.charge); } - strg->system_energy = collect_ext + 0.5 * collect; + strg->system_energy = collect_ext + (0.5 * collect); } /** * This function returns the currently stored system's total electrostatic potential energy in eV. @@ -1555,8 +1571,8 @@ class charge_distribution_surface : public Lyt } const auto dist_min = - std::accumulate(negative_indices.begin(), negative_indices.end(), std::numeric_limits::max(), - [&](const double acc, const uint64_t occ) + std::accumulate(negative_indices.begin(), negative_indices.end(), + std::numeric_limits::infinity(), [&](const double acc, const uint64_t occ) { return std::min(acc, this->get_nm_distance_by_indices(unocc, occ)); }); index_vector.push_back(unocc); diff --git a/include/fiction/technology/sidb_cluster_hierarchy.hpp b/include/fiction/technology/sidb_cluster_hierarchy.hpp index 00fdea6a83..6de7bc459a 100644 --- a/include/fiction/technology/sidb_cluster_hierarchy.hpp +++ b/include/fiction/technology/sidb_cluster_hierarchy.hpp @@ -1155,7 +1155,7 @@ struct sidb_cluster * @param pst Projector state of which the corresponding compositions are requested. * @return The compositions associated with the multiset charge configuration of the projecting cluster. */ -[[nodiscard]] static const std::vector& +[[nodiscard]] inline const std::vector& get_projector_state_compositions(const sidb_cluster_projector_state& pst) noexcept { return std::ref(pst.cluster->charge_space.find(sidb_cluster_charge_state{pst.multiset_conf})->compositions); @@ -1219,17 +1219,16 @@ to_unique_sidb_cluster(const uint64_t total_sidbs, const sidb_binary_cluster_hie * @param n A node from a binary cluster hierarchy, as for instance returned by parsing ALGLIB's result. * @return A uniquely identified node in a decorated cluster hierarchy that follows the "general tree" structure. */ -[[nodiscard]] static sidb_cluster_ptr to_sidb_cluster(const sidb_binary_cluster_hierarchy_node& n) noexcept +[[nodiscard]] inline sidb_cluster_ptr to_sidb_cluster(const sidb_binary_cluster_hierarchy_node& n) noexcept { - uint64_t uid = n.c.size(); - if (uid != 1) + if (uint64_t uid = n.c.size(); uid != 1) { return to_unique_sidb_cluster(n.c.size(), n, uid); } // to avoid weird shared pointer deallocation behaviour, give a parent to a singleton cluster hierarchy - sidb_cluster_ptr parent = + auto parent = std::make_shared(std::vector{*n.c.cbegin()}, std::vector{}, sidb_clustering{std::make_shared( std::vector{*n.c.cbegin()}, std::vector{}, sidb_clustering{}, 0)}, diff --git a/include/fiction/technology/sidb_lattice.hpp b/include/fiction/technology/sidb_lattice.hpp index 60781e7669..103d082672 100644 --- a/include/fiction/technology/sidb_lattice.hpp +++ b/include/fiction/technology/sidb_lattice.hpp @@ -53,7 +53,7 @@ class sidb_lattice : public Lyt * * @param layout SiDB Cell-level layout. */ - explicit sidb_lattice(const Lyt& layout) : Lyt(layout) + explicit sidb_lattice(const Lyt& lyt) : Lyt(lyt) { static_assert(is_cell_level_layout_v, "Lyt is not a cell-level layout"); static_assert(has_sidb_technology_v, "Lyt is not an SiDB layout"); diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt deleted file mode 100644 index 65fc997340..0000000000 --- a/libs/CMakeLists.txt +++ /dev/null @@ -1,210 +0,0 @@ -include(${PROJECT_SOURCE_DIR}/cmake/CheckSubmodules.cmake) - -# Prevent CMake from finding a system-installed fmt package to avoid version conflicts -# We use the bundled fmt version from alice/mockturtle submodules -set(CMAKE_DISABLE_FIND_PACKAGE_fmt TRUE) - -# Include pybind11 -add_subdirectory(pybind11) - -# Include alice -set(ALICE_EXAMPLES OFF CACHE BOOL "" FORCE) -set(ALICE_TEST OFF CACHE BOOL "" FORCE) -check_if_present(alice) -add_subdirectory(alice) -target_link_system_libraries(libfiction INTERFACE alice) - -# Include mockturtle -set(MOCKTURTLE_EXAMPLES OFF CACHE BOOL "" FORCE) -set(MOCKTURTLE_EXPERIMENTS OFF CACHE BOOL "" FORCE) -set(MOCKTURTLE_TEST OFF CACHE BOOL "" FORCE) -check_if_present(mockturtle) -add_subdirectory(mockturtle) -target_link_system_libraries(libfiction INTERFACE mockturtle) - -# Include JSON by Niels Lohmann -set(JSON_BuildTests OFF CACHE INTERNAL "") -add_subdirectory(json EXCLUDE_FROM_ALL) -target_link_system_libraries(libfiction INTERFACE nlohmann_json::nlohmann_json) - -# Include parallel_hashmap by Gregory Popovitch -check_if_present(parallel-hashmap) -target_include_directories(libfiction SYSTEM INTERFACE parallel-hashmap/parallel_hashmap) - -# Include undirected_graph by Fabian Löschner -target_include_directories(libfiction SYSTEM INTERFACE undirected_graph/source) - -# Include combinations by Howard Hinnant -target_include_directories(libfiction SYSTEM INTERFACE combinations) - -# Include graph-coloring by Brian Crites -add_subdirectory(graph-coloring EXCLUDE_FROM_ALL) -target_include_directories(libfiction SYSTEM INTERFACE graph-coloring/Header) -target_link_system_libraries(libfiction INTERFACE graph-coloring) - -# Include tinyXML2 -set(tinyxml2_BUILD_TESTING OFF) -check_if_present(tinyxml2) -add_subdirectory(tinyxml2 EXCLUDE_FROM_ALL) -set_property(TARGET tinyxml2 PROPERTY POSITION_INDEPENDENT_CODE ON) -target_link_system_libraries(libfiction INTERFACE tinyxml2::tinyxml2) - -# Include Catch2 -if (FICTION_TEST) - check_if_present(Catch2) - add_subdirectory(Catch2) -endif () - -# Mugen is not available under Windows -if (NOT WIN32) - # Option to enable Mugen - option(FICTION_ENABLE_MUGEN "Enable the usage of Mugen, a Python3 library by Winston Haaswijk for FCN one-pass synthesis, and its dependencies" OFF) - if (FICTION_ENABLE_MUGEN) - target_compile_definitions(libfiction INTERFACE MUGEN) - endif () - - if (FICTION_ENABLE_MUGEN) - - # Apple does not need glucose because it seems to have issues there anyways - if (NOT APPLE) - # Build glucose-syrup-4.1-parallel if Mugen is enabled - message(STATUS "Building glucose for Mugen") - add_custom_command( - OUTPUT ${PROJECT_BINARY_DIR}/glucose-syrup - PRE_BUILD - COMMAND make - COMMAND mv glucose-syrup ${PROJECT_BINARY_DIR}/glucose-syrup - COMMAND make clean - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/mugen/glucose-syrup-4.1/parallel/) - - # Make sure glucose's custom build commands are actually being executed - add_custom_target(glucose_syrup - ALL - DEPENDS ${PROJECT_BINARY_DIR}/glucose-syrup) - endif () - - # Embedding the pybind11 interpreter - target_link_system_libraries(libfiction INTERFACE pybind11::embed) - - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/mugen/mugen_info.hpp.in utils/mugen_info.hpp) - target_include_directories(libfiction INTERFACE ${PROJECT_BINARY_DIR}/libs/) - - message(STATUS "Mugen was enabled. Please note that it relies on the Python3 libraries 'graphviz', 'PySAT v0.1.6.dev6', and 'wrapt_timeout_decorator' to be properly installed") - endif () -endif () - -# Enable the usage of Z3 -option(FICTION_Z3 "Find, include, and utilize the Z3 solver by Microsoft Research. It needs to be installed manually." OFF) -if (FICTION_Z3) - message(STATUS "Usage of the Z3 solver was enabled. Make sure that it is installed on your system!") - list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/") # include FindZ3.cmake - - # Try to locate Z3 (minimum required version is 4.8.5 due to its performance improvements on the QBF solver) - find_package(Z3 4.8.5) - - if (Z3_FOUND) - # Status update - message(STATUS "Found Z3 solver version: ${Z3_VERSION_STRING}") - message(STATUS "Found Z3 library: ${Z3_LIBRARIES}") - message(STATUS "Found Z3 include directories: ${Z3_CXX_INCLUDE_DIRS}") - - # Threads are used by Z3 and are, thus, required at this point - find_package(Threads REQUIRED) - - # Compile definition to guard include files - target_compile_definitions(libfiction INTERFACE FICTION_Z3_SOLVER) - # Include Z3 library - target_include_directories(libfiction INTERFACE SYSTEM ${Z3_CXX_INCLUDE_DIRS}) - # Link Z3 - target_link_system_libraries(libfiction INTERFACE ${Z3_LIBRARIES}) - - # use libc++ on macOS - if (APPLE) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") - endif () - else () - message(SEND_ERROR "Z3 solver could not be detected") - endif () -endif () - -# Enable the usage of ALGLIB by the ALGLIB Project -option(FICTION_ALGLIB "Automatically download, include, and utilize ALGLIB by the ALGLIB project.") -if (FICTION_ALGLIB) - message(STATUS "Usage of the Z3 solver was enabled.") - - # Compile definition to guard include files - target_compile_definitions(libfiction INTERFACE FICTION_ALGLIB_ENABLED) - - # Include and link ALGLIB - add_subdirectory(alglib-cmake EXCLUDE_FROM_ALL) - target_include_directories(libfiction SYSTEM INTERFACE alglib-cmake/src/cpp/src/headers) - target_link_system_libraries(libfiction INTERFACE ALGLIB) -endif () - -# If using GCC or Clang, find TBB if installed -if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") - list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/") # include FindTBB.cmake - find_package(TBB) - if (TBB_FOUND) - # If TBB version >= 2021.1 then GCC 9 and 10 do not work properly due to incompatible ABI changes - if (${TBB_VERSION_MAJOR} GREATER_EQUAL 2021 AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0) - target_compile_definitions(libfiction INTERFACE _GLIBCXX_USE_TBB_PAR_BACKEND=0) - message(STATUS "TBB version ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} detected. Disabling parallel policies for GCC 9 and 10 due to incompatible interfaces.") - else () - # Status update - message(STATUS "Found TBB version: ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") - message(STATUS "Parallel STL algorithms are enabled") - endif () - else () - # Status update - message(STATUS "Found TBB version: ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") - message(STATUS "Parallel STL algorithms are enabled") - endif () - - # Include TBB - target_include_directories(libfiction INTERFACE ${TBB_INCLUDE_DIRS}) - # Link TBB - target_link_system_libraries(libfiction INTERFACE TBB::tbb) - else () - message(STATUS "Parallel STL algorithms are disabled. If you want to use them, please install TBB and set the TBB_ROOT_DIR, TBB_INCLUDE_DIR, and TBB_LIBRARY variables accordingly.") - endif () -elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # Status update - message(STATUS "Parallel STL algorithms are enabled on MSVC by default") -endif () - -# Enable jemalloc to be linked -option(FICTION_ENABLE_JEMALLOC "Automatically download and link jemalloc by Jason Evans.") -if (FICTION_ENABLE_JEMALLOC) - message(STATUS "jemalloc will override the standard malloc implementation globally. Note that your program may run slower when jemalloc is used!") - - # On Windows, jemalloc needs to be installed manually - if (CMAKE_SYSTEM_NAME STREQUAL "Windows") - message(STATUS "Automatic downloading and linking of jemalloc is not supported on Windows. Make sure that it is installed on your system!") - find_package(jemalloc CONFIG REQUIRED) - target_link_libraries(libfiction INTERFACE jemalloc) - else () - # On Unix-like operating systems, jemalloc can be downloaded, built and linked automatically - find_package(jemalloc) - if (NOT jemalloc_FOUND) - message(STATUS "Building and installing jemalloc will proceed automatically") - include(${PROJECT_SOURCE_DIR}/cmake/FetchJemalloc.cmake) - - set(JEMALLOC_LIBRARIES jemalloc) - set(JEMALLOC_INCLUDE_DIRS jemalloc) - endif () - - # Link jemalloc - if (APPLE) - target_link_system_libraries(libfiction INTERFACE ${JEMALLOC_LIBRARIES} c++ dl pthread m) - elseif (UNIX) - target_link_system_libraries(libfiction INTERFACE ${JEMALLOC_LIBRARIES} stdc++ dl pthread m) - else () - message(FATAL_ERROR "Unsupported environment") - endif () - - # Include jemalloc - target_include_directories(libfiction INTERFACE SYSTEM ${JEMALLOC_INCLUDE_DIRS}) - endif () -endif () diff --git a/libs/Catch2 b/libs/Catch2 deleted file mode 160000 index f80956a43a..0000000000 --- a/libs/Catch2 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f80956a43a3e276686ed59531b41617282d75a0c diff --git a/libs/alglib-cmake b/libs/alglib-cmake deleted file mode 160000 index 12a8b3b6d5..0000000000 --- a/libs/alglib-cmake +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 12a8b3b6d5ff7b50ead5684d3e6f891e03170175 diff --git a/libs/alice b/libs/alice deleted file mode 160000 index 835fd88694..0000000000 --- a/libs/alice +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 835fd886943673c73b94883d90a8d64259710c26 diff --git a/libs/graph-coloring/Header/hybrid_lmxrlf.hpp b/libs/graph-coloring/Header/hybrid_lmxrlf.hpp deleted file mode 100644 index b4e90c1c5f..0000000000 --- a/libs/graph-coloring/Header/hybrid_lmxrlf.hpp +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _HYBRID_LMXRLF_HPP_ -#define _HYBRID_LMXRLF_HPP_ - -#include "coloring_algorithm.hpp" -#include "lmxrlf.hpp" -#include "tabucol.hpp" - -using GraphColoring::GraphColor; -using GraphColoring::Lmxrlf; -using GraphColoring::Tabucol; - -namespace GraphColoring{ - class HybridLmxrlf : public GraphColor { - private: - int condition; - map> get_subgraph(map coloring); - - public: - /* Constructors */ - explicit HybridLmxrlf(map > graph, int condition = 0) : GraphColor(graph) { this->condition = condition; } - - /* Mutators */ - void set_condition(int condition) { this->condition = condition; } - map color(); - - /* Accessors */ - string get_algorithm() { return "Hybrid LMXRLF"; } - }; -} - -#endif //_HYBRID_LMXRLF_HPP_ diff --git a/libs/graph-coloring/Source/dsatur.cpp b/libs/graph-coloring/Source/dsatur.cpp deleted file mode 100644 index cbb1512439..0000000000 --- a/libs/graph-coloring/Source/dsatur.cpp +++ /dev/null @@ -1,130 +0,0 @@ - -#include -#include - -#include "../Header/dsatur.hpp" - -using std::cout; -using std::cerr; -using std::endl; - -map GraphColoring::Dsatur::color() { - if(this->graph.size() == 0) { - this->graph_colors = map(); - return map(); - } - - vector todo; - string max_degree = ""; - int degree = -1; - - // find maximal degree vertex to color first and color with 0 - for(map< string, vector >::iterator i = this->graph.begin(); i != this->graph.end(); i++) { - if((int)i->second.size() > degree) { - degree = i->second.size(); - max_degree = i->first; - } - } - if(max_degree == "") { - cerr << "Error: Could not find a max degree node in the graph (reason unknown)" << endl; - this->graph_colors = map(); - return map(); - } - this->graph_colors[max_degree] = 0; - - //Create saturation_level so that we can see which graph nodes have the - //highest saturation without having to scan through the entire graph - //each time - map saturation_level; - - //Add all nodes and set their saturation level to 0 - for(map >::iterator i = this->graph.begin(); i != this->graph.end(); i++) { - saturation_level[i->first] = 0; - } - - //For the single node that has been colored, increment its neighbors so - //that their current saturation level is correct - for(int i = 0; i < this->graph[max_degree].size(); i++) { - saturation_level[this->graph[max_degree][i]] += 1; - } - - //Set the saturation level of the already completed node to -infinity so - //that it is not chosen and recolored - saturation_level[max_degree] = INT_MIN; - - //Populate the todo list with the rest of the vertices that need to be colored - for(map< string, vector >::iterator i = this->graph.begin(); i != this->graph.end(); i++) { - if(i->first != max_degree) { - this->graph_colors[i->first] = -1; - todo.push_back(i->first); - } - } - - //Color all the remaining nodes in the todo list - while(!todo.empty()) { - int saturation = -1; - string saturation_name = ""; - vector saturation_colors; - //Find the vertex with the highest saturation level, since we keep the - //saturation levels along the way we can do this in a single pass - for(map::iterator i = saturation_level.begin(); i != saturation_level.end(); i++) { - //Find the highest saturated node and keep its name and neighbors colors - if(i->second > saturation) { - saturation = i->second; - saturation_name = i->first; - - //Since we're in this loop it means we've found a new most saturated - //node, which means we need to clear the old list of neighbors colors - //and replace it with the new highest saturated nodes neighbors colors - //Since uncolored nodes are given a -1, we can add all neighbors and - //start the check for lowest available color at greater than 0 - saturation_colors.clear(); - for(int j=0; j < this->graph[i->first].size(); j++) { - saturation_colors.push_back(this->graph_colors[this->graph[i->first][j]]); - } - } - } - if(saturation_name == "") { - cerr << "Error: Could not find a max saturated node in the graph (reason unknown)" << endl; - this->graph_colors = map(); - return graph_colors; - } - - //We now know the most saturated node, so we remove it from the todo list - for(vector::iterator itr = todo.begin(); itr != todo.end(); itr++) { - if((*itr) == saturation_name) { - todo.erase(itr); - break; - } - } - - //Find the lowest color that is not being used by any of the most saturated - //nodes neighbors, then color the most saturated node - int lowest_color = 0; - int done = 0; - while(!done) { - done = 1; - for(unsigned i=0; i < saturation_colors.size(); i++) { - if(saturation_colors[i] == lowest_color) { - lowest_color += 1; - done = 0; - } - } - } - this->graph_colors[saturation_name] = lowest_color; - - //Since we have colored another node, that nodes neighbors have now - //become more saturated, so we increase each ones saturation level - //However we first check that that node has not already been colored - //(This check is only necessary for enormeous test cases, but is - //included here for robustness) - for(int i=0; i < this->graph[saturation_name].size(); i++) { - if(saturation_level[this->graph[saturation_name][i]] != INT_MIN) { - saturation_level[this->graph[saturation_name][i]] += 1; - } - } - saturation_level[saturation_name] = INT_MIN; - } - return this->graph_colors; -} - diff --git a/libs/json b/libs/json deleted file mode 160000 index 02ac0d6525..0000000000 --- a/libs/json +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 02ac0d6525f2e046f136ca69b5105b4e4f315b2f diff --git a/libs/mockturtle b/libs/mockturtle deleted file mode 160000 index 1a91a74955..0000000000 --- a/libs/mockturtle +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1a91a7495560ae2e510316a32ca15651756dfebc diff --git a/libs/mugen/glucose-syrup-4.1/core/BoundedQueue.h b/libs/mugen/glucose-syrup-4.1/core/BoundedQueue.h deleted file mode 100644 index 5269c9b74e..0000000000 --- a/libs/mugen/glucose-syrup-4.1/core/BoundedQueue.h +++ /dev/null @@ -1,148 +0,0 @@ -/***************************************************************************************[BoundedQueue.h] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - - -#ifndef BoundedQueue_h -#define BoundedQueue_h - -#include "mtl/Vec.h" - -//================================================================================================= - -namespace Glucose { - -template -class bqueue { - vec elems; - int first; - int last; - unsigned long long sumofqueue; - int maxsize; - int queuesize; // Number of current elements (must be < maxsize !) - bool expComputed; - double exp,value; -public: - bqueue(void) : first(0), last(0), sumofqueue(0), maxsize(0), queuesize(0),expComputed(false) { } - - void initSize(int size) {growTo(size);exp = 2.0/(size+1);} // Init size of bounded size queue - - void push(T x) { - expComputed = false; - if (queuesize==maxsize) { - assert(last==first); // The queue is full, next value to enter will replace oldest one - sumofqueue -= elems[last]; - if ((++last) == maxsize) last = 0; - } else - queuesize++; - sumofqueue += x; - elems[first] = x; - if ((++first) == maxsize) {first = 0;last = 0;} - } - - T peek() { assert(queuesize>0); return elems[last]; } - void pop() {sumofqueue-=elems[last]; queuesize--; if ((++last) == maxsize) last = 0;} - - unsigned long long getsum() const {return sumofqueue;} - unsigned int getavg() const {return (unsigned int)(sumofqueue/((unsigned long long)queuesize));} - int maxSize() const {return maxsize;} - double getavgDouble() const { - double tmp = 0; - for(int i=0;i - -#include "utils/System.h" -#include "mtl/Sort.h" -#include "core/Solver.h" -#include "core/Constants.h" -#include"simp/SimpSolver.h" - -using namespace Glucose; - - -//================================================================================================= -// Statistics -//================================================================================================= - - - -//================================================================================================= -// Options: - -static const char *_cat = "CORE"; -static const char *_cr = "CORE -- RESTART"; -static const char *_cred = "CORE -- REDUCE"; -static const char *_cm = "CORE -- MINIMIZE"; - - -static DoubleOption opt_K(_cr, "K", "The constant used to force restart", 0.8, DoubleRange(0, false, 1, false)); -static DoubleOption opt_R(_cr, "R", "The constant used to block restart", 1.4, DoubleRange(1, false, 5, false)); -static IntOption opt_size_lbd_queue(_cr, "szLBDQueue", "The size of moving average for LBD (restarts)", 50, IntRange(10, INT32_MAX)); -static IntOption opt_size_trail_queue(_cr, "szTrailQueue", "The size of moving average for trail (block restarts)", 5000, IntRange(10, INT32_MAX)); - -static IntOption opt_first_reduce_db(_cred, "firstReduceDB", "The number of conflicts before the first reduce DB (or the size of leernts if chanseok is used)", - 2000, IntRange(0, INT32_MAX)); -static IntOption opt_inc_reduce_db(_cred, "incReduceDB", "Increment for reduce DB", 300, IntRange(0, INT32_MAX)); -static IntOption opt_spec_inc_reduce_db(_cred, "specialIncReduceDB", "Special increment for reduce DB", 1000, IntRange(0, INT32_MAX)); -static IntOption opt_lb_lbd_frozen_clause(_cred, "minLBDFrozenClause", "Protect clauses if their LBD decrease and is lower than (for one turn)", 30, - IntRange(0, INT32_MAX)); -static BoolOption opt_chanseok_hack(_cred, "chanseok", - "Use Chanseok Oh strategy for LBD (keep all LBD<=co and remove half of firstreduceDB other learnt clauses", false); -static IntOption opt_chanseok_limit(_cred, "co", "Chanseok Oh: all learnt clauses with LBD<=co are permanent", 5, IntRange(2, INT32_MAX)); - - -static IntOption opt_lb_size_minimzing_clause(_cm, "minSizeMinimizingClause", "The min size required to minimize clause", 30, IntRange(3, INT32_MAX)); -static IntOption opt_lb_lbd_minimzing_clause(_cm, "minLBDMinimizingClause", "The min LBD required to minimize clause", 6, IntRange(3, INT32_MAX)); - - -static DoubleOption opt_var_decay(_cat, "var-decay", "The variable activity decay factor (starting point)", 0.8, DoubleRange(0, false, 1, false)); -static DoubleOption opt_max_var_decay(_cat, "max-var-decay", "The variable activity decay factor", 0.95, DoubleRange(0, false, 1, false)); -static DoubleOption opt_clause_decay(_cat, "cla-decay", "The clause activity decay factor", 0.999, DoubleRange(0, false, 1, false)); -static DoubleOption opt_random_var_freq(_cat, "rnd-freq", "The frequency with which the decision heuristic tries to choose a random variable", 0, - DoubleRange(0, true, 1, true)); -static DoubleOption opt_random_seed(_cat, "rnd-seed", "Used by the random variable selection", 91648253, DoubleRange(0, false, HUGE_VAL, false)); -static IntOption opt_ccmin_mode(_cat, "ccmin-mode", "Controls conflict clause minimization (0=none, 1=basic, 2=deep)", 2, IntRange(0, 2)); -static IntOption opt_phase_saving(_cat, "phase-saving", "Controls the level of phase saving (0=none, 1=limited, 2=full)", 2, IntRange(0, 2)); -static BoolOption opt_rnd_init_act(_cat, "rnd-init", "Randomize the initial activity", false); -static DoubleOption opt_garbage_frac(_cat, "gc-frac", "The fraction of wasted memory allowed before a garbage collection is triggered", 0.20, - DoubleRange(0, false, HUGE_VAL, false)); -static BoolOption opt_glu_reduction(_cat, "gr", "glucose strategy to fire clause database reduction (must be false to fire Chanseok strategy)", true); -static BoolOption opt_luby_restart(_cat, "luby", "Use the Luby restart sequence", false); -static DoubleOption opt_restart_inc(_cat, "rinc", "Restart interval increase factor", 2, DoubleRange(1, false, HUGE_VAL, false)); -static IntOption opt_luby_restart_factor(_cred, "luby-factor", "Luby restart factor", 100, IntRange(1, INT32_MAX)); - -static IntOption opt_randomize_phase_on_restarts(_cat, "phase-restart", - "The amount of randomization for the phase at each restart (0=none, 1=first branch, 2=first branch (no bad clauses), 3=first branch (only initial clauses)", - 0, IntRange(0, 3)); -static BoolOption opt_fixed_randomize_phase_on_restarts(_cat, "fix-phas-rest", "Fixes the first 7 levels at random phase", false); - -static BoolOption opt_adapt(_cat, "adapt", "Adapt dynamically stategies after 100000 conflicts", true); - -static BoolOption opt_forceunsat(_cat,"forceunsat","Force the phase for UNSAT",true); -//================================================================================================= -// Constructor/Destructor: - -Solver::Solver() : - -// Parameters (user settable): -// -verbosity(0) -, showModel(0) -, K(opt_K) -, R(opt_R) -, sizeLBDQueue(opt_size_lbd_queue) -, sizeTrailQueue(opt_size_trail_queue) -, firstReduceDB(opt_first_reduce_db) -, incReduceDB(opt_chanseok_hack ? 0 : opt_inc_reduce_db) -, specialIncReduceDB(opt_chanseok_hack ? 0 : opt_spec_inc_reduce_db) -, lbLBDFrozenClause(opt_lb_lbd_frozen_clause) -, chanseokStrategy(opt_chanseok_hack) -, coLBDBound (opt_chanseok_limit) -, lbSizeMinimizingClause(opt_lb_size_minimzing_clause) -, lbLBDMinimizingClause(opt_lb_lbd_minimzing_clause) -, var_decay(opt_var_decay) -, max_var_decay(opt_max_var_decay) -, clause_decay(opt_clause_decay) -, random_var_freq(opt_random_var_freq) -, random_seed(opt_random_seed) -, ccmin_mode(opt_ccmin_mode) -, phase_saving(opt_phase_saving) -, rnd_pol(false) -, rnd_init_act(opt_rnd_init_act) -, randomizeFirstDescent(false) -, garbage_frac(opt_garbage_frac) -, certifiedOutput(NULL) -, certifiedUNSAT(false) // Not in the first parallel version -, vbyte(false) -, panicModeLastRemoved(0), panicModeLastRemovedShared(0) -, useUnaryWatched(false) -, promoteOneWatchedClause(true) -,solves(0),starts(0),decisions(0),propagations(0),conflicts(0),conflictsRestarts(0) -, curRestart(1) -, glureduce(opt_glu_reduction) -, restart_inc(opt_restart_inc) -, luby_restart(opt_luby_restart) -, adaptStrategies(opt_adapt) -, luby_restart_factor(opt_luby_restart_factor) -, randomize_on_restarts(opt_randomize_phase_on_restarts) -, fixed_randomize_on_restarts(opt_fixed_randomize_phase_on_restarts) -, newDescent(0) -, randomDescentAssignments(0) -, forceUnsatOnNewDescent(opt_forceunsat) - -, ok(true) -, cla_inc(1) -, var_inc(1) -, watches(WatcherDeleted(ca)) -, watchesBin(WatcherDeleted(ca)) -, unaryWatches(WatcherDeleted(ca)) -, qhead(0) -, simpDB_assigns(-1) -, simpDB_props(0) -, order_heap(VarOrderLt(activity)) -, progress_estimate(0) -, remove_satisfied(true) -,lastLearntClause(CRef_Undef) -// Resource constraints: -// -, conflict_budget(-1) -, propagation_budget(-1) -, asynch_interrupt(false) -, incremental(false) -, nbVarsInitialFormula(INT32_MAX) -, totalTime4Sat(0.) -, totalTime4Unsat(0.) -, nbSatCalls(0) -, nbUnsatCalls(0) -{ - MYFLAG = 0; - // Initialize only first time. Useful for incremental solving (not in // version), useless otherwise - // Kept here for simplicity - lbdQueue.initSize(sizeLBDQueue); - trailQueue.initSize(sizeTrailQueue); - sumLBD = 0; - nbclausesbeforereduce = firstReduceDB; - stats.growTo(coreStatsSize, 0); -} - -//------------------------------------------------------- -// Special constructor used for cloning solvers -//------------------------------------------------------- - -Solver::Solver(const Solver &s) : - verbosity(s.verbosity) -, showModel(s.showModel) -, K(s.K) -, R(s.R) -, sizeLBDQueue(s.sizeLBDQueue) -, sizeTrailQueue(s.sizeTrailQueue) -, firstReduceDB(s.firstReduceDB) -, incReduceDB(s.incReduceDB) -, specialIncReduceDB(s.specialIncReduceDB) -, lbLBDFrozenClause(s.lbLBDFrozenClause) -, chanseokStrategy(opt_chanseok_hack) -, coLBDBound (opt_chanseok_limit) -, lbSizeMinimizingClause(s.lbSizeMinimizingClause) -, lbLBDMinimizingClause(s.lbLBDMinimizingClause) -, var_decay(s.var_decay) -, max_var_decay(s.max_var_decay) -, clause_decay(s.clause_decay) -, random_var_freq(s.random_var_freq) -, random_seed(s.random_seed) -, ccmin_mode(s.ccmin_mode) -, phase_saving(s.phase_saving) -, rnd_pol(s.rnd_pol) -, rnd_init_act(s.rnd_init_act) -, randomizeFirstDescent(s.randomizeFirstDescent) -, garbage_frac(s.garbage_frac) -, certifiedOutput(NULL) -, certifiedUNSAT(false) // Not in the first parallel version -, panicModeLastRemoved(s.panicModeLastRemoved), panicModeLastRemovedShared(s.panicModeLastRemovedShared) -, useUnaryWatched(s.useUnaryWatched) -, promoteOneWatchedClause(s.promoteOneWatchedClause) -// Statistics: (formerly in 'SolverStats') -// -,solves(0),starts(0),decisions(0),propagations(0),conflicts(0),conflictsRestarts(0) - -, curRestart(s.curRestart) -, glureduce(s.glureduce) -, restart_inc(s.restart_inc) -, luby_restart(s.luby_restart) -, adaptStrategies(s.adaptStrategies) -, luby_restart_factor(s.luby_restart_factor) -, randomize_on_restarts(s.randomize_on_restarts) -, fixed_randomize_on_restarts(s.fixed_randomize_on_restarts) -, newDescent(s.newDescent) -, randomDescentAssignments(s.randomDescentAssignments) -, forceUnsatOnNewDescent(s.forceUnsatOnNewDescent) -, ok(true) -, cla_inc(s.cla_inc) -, var_inc(s.var_inc) -, watches(WatcherDeleted(ca)) -, watchesBin(WatcherDeleted(ca)) -, unaryWatches(WatcherDeleted(ca)) -, qhead(s.qhead) -, simpDB_assigns(s.simpDB_assigns) -, simpDB_props(s.simpDB_props) -, order_heap(VarOrderLt(activity)) -, progress_estimate(s.progress_estimate) -, remove_satisfied(s.remove_satisfied) -,lastLearntClause(CRef_Undef) -// Resource constraints: -// -, conflict_budget(s.conflict_budget) -, propagation_budget(s.propagation_budget) -, asynch_interrupt(s.asynch_interrupt) -, incremental(s.incremental) -, nbVarsInitialFormula(s.nbVarsInitialFormula) -, totalTime4Sat(s.totalTime4Sat) -, totalTime4Unsat(s.totalTime4Unsat) -, nbSatCalls(s.nbSatCalls) -, nbUnsatCalls(s.nbUnsatCalls) -{ - // Copy clauses. - s.ca.copyTo(ca); - ca.extra_clause_field = s.ca.extra_clause_field; - - // Initialize other variables - MYFLAG = 0; - // Initialize only first time. Useful for incremental solving (not in // version), useless otherwise - // Kept here for simplicity - sumLBD = s.sumLBD; - nbclausesbeforereduce = s.nbclausesbeforereduce; - - // Copy all search vectors - s.watches.copyTo(watches); - s.watchesBin.copyTo(watchesBin); - s.unaryWatches.copyTo(unaryWatches); - s.assigns.memCopyTo(assigns); - s.vardata.memCopyTo(vardata); - s.activity.memCopyTo(activity); - s.seen.memCopyTo(seen); - s.permDiff.memCopyTo(permDiff); - s.polarity.memCopyTo(polarity); - s.decision.memCopyTo(decision); - s.trail.memCopyTo(trail); - s.order_heap.copyTo(order_heap); - s.clauses.memCopyTo(clauses); - s.learnts.memCopyTo(learnts); - s.permanentLearnts.memCopyTo(permanentLearnts); - - s.lbdQueue.copyTo(lbdQueue); - s.trailQueue.copyTo(trailQueue); - s.forceUNSAT.copyTo(forceUNSAT); - s.stats.copyTo(stats); -} - - -Solver::~Solver() { -} - - -/**************************************************************** - Certified UNSAT proof in binary format -****************************************************************/ - - -void Solver::write_char(unsigned char ch) { - if(putc_unlocked((int) ch, certifiedOutput) == EOF) - exit(1); -} - - -void Solver::write_lit(int n) { - for(; n > 127; n >>= 7) - write_char(128 | (n & 127)); - write_char(n); -} - -/**************************************************************** - Set the incremental mode -****************************************************************/ - -// This function set the incremental mode to true. -// You can add special code for this mode here. - -void Solver::setIncrementalMode() { -#ifdef INCREMENTAL - incremental = true; -#else - fprintf(stderr, "c Trying to set incremental mode, but not compiled properly for this.\n"); - exit(1); -#endif -} - - -// Number of variables without selectors -void Solver::initNbInitialVars(int nb) { - nbVarsInitialFormula = nb; -} - - -bool Solver::isIncremental() { - return incremental; -} - - -//================================================================================================= -// Minor methods: - - -// Creates a new SAT variable in the solver. If 'decision' is cleared, variable will not be -// used as a decision variable (NOTE! This has effects on the meaning of a SATISFIABLE result). -// - -Var Solver::newVar(bool sign, bool dvar) { - int v = nVars(); - watches.init(mkLit(v, false)); - watches.init(mkLit(v, true)); - watchesBin.init(mkLit(v, false)); - watchesBin.init(mkLit(v, true)); - unaryWatches.init(mkLit(v, false)); - unaryWatches.init(mkLit(v, true)); - assigns.push(l_Undef); - vardata.push(mkVarData(CRef_Undef, 0)); - activity.push(rnd_init_act ? drand(random_seed) * 0.00001 : 0); - seen.push(0); - permDiff.push(0); - polarity.push(sign); - forceUNSAT.push(0); - decision.push(); - trail.capacity(v + 1); - setDecisionVar(v, dvar); - return v; -} - - -bool Solver::addClause_(vec &ps) { - - assert(decisionLevel() == 0); - if(!ok) return false; - - // Check if clause is satisfied and remove false/duplicate literals: - sort(ps); - - vec oc; - oc.clear(); - - Lit p; - int i, j, flag = 0; - if(certifiedUNSAT) { - for(i = j = 0, p = lit_Undef; i < ps.size(); i++) { - oc.push(ps[i]); - if(value(ps[i]) == l_True || ps[i] == ~p || value(ps[i]) == l_False) - flag = 1; - } - } - - for(i = j = 0, p = lit_Undef; i < ps.size(); i++) - if(value(ps[i]) == l_True || ps[i] == ~p) - return true; - else if(value(ps[i]) != l_False && ps[i] != p) - ps[j++] = p = ps[i]; - ps.shrink(i - j); - - if(flag && (certifiedUNSAT)) { - if(vbyte) { - write_char('a'); - for(i = j = 0, p = lit_Undef; i < ps.size(); i++) - write_lit(2 * (var(ps[i]) + 1) + sign(ps[i])); - write_lit(0); - - write_char('d'); - for(i = j = 0, p = lit_Undef; i < oc.size(); i++) - write_lit(2 * (var(oc[i]) + 1) + sign(oc[i])); - write_lit(0); - } - else { - for(i = j = 0, p = lit_Undef; i < ps.size(); i++) - fprintf(certifiedOutput, "%i ", (var(ps[i]) + 1) * (-2 * sign(ps[i]) + 1)); - fprintf(certifiedOutput, "0\n"); - - fprintf(certifiedOutput, "d "); - for(i = j = 0, p = lit_Undef; i < oc.size(); i++) - fprintf(certifiedOutput, "%i ", (var(oc[i]) + 1) * (-2 * sign(oc[i]) + 1)); - fprintf(certifiedOutput, "0\n"); - } - } - - - if(ps.size() == 0) - return ok = false; - else if(ps.size() == 1) { - uncheckedEnqueue(ps[0]); - return ok = (propagate() == CRef_Undef); - } else { - CRef cr = ca.alloc(ps, false); - clauses.push(cr); - attachClause(cr); - } - - return true; -} - - -void Solver::attachClause(CRef cr) { - const Clause &c = ca[cr]; - - assert(c.size() > 1); - if(c.size() == 2) { - watchesBin[~c[0]].push(Watcher(cr, c[1])); - watchesBin[~c[1]].push(Watcher(cr, c[0])); - } else { - watches[~c[0]].push(Watcher(cr, c[1])); - watches[~c[1]].push(Watcher(cr, c[0])); - } - if(c.learnt()) stats[learnts_literals] += c.size(); - else stats[clauses_literals] += c.size(); -} - - -void Solver::attachClausePurgatory(CRef cr) { - const Clause &c = ca[cr]; - - assert(c.size() > 1); - unaryWatches[~c[0]].push(Watcher(cr, c[1])); - -} - - -void Solver::detachClause(CRef cr, bool strict) { - const Clause &c = ca[cr]; - - assert(c.size() > 1); - if(c.size() == 2) { - if(strict) { - remove(watchesBin[~c[0]], Watcher(cr, c[1])); - remove(watchesBin[~c[1]], Watcher(cr, c[0])); - } else { - // Lazy detaching: (NOTE! Must clean all watcher lists before garbage collecting this clause) - watchesBin.smudge(~c[0]); - watchesBin.smudge(~c[1]); - } - } else { - if(strict) { - remove(watches[~c[0]], Watcher(cr, c[1])); - remove(watches[~c[1]], Watcher(cr, c[0])); - } else { - // Lazy detaching: (NOTE! Must clean all watcher lists before garbage collecting this clause) - watches.smudge(~c[0]); - watches.smudge(~c[1]); - } - } - if(c.learnt()) stats[learnts_literals] -= c.size(); - else stats[clauses_literals] -= c.size(); -} - - -// The purgatory is the 1-Watched scheme for imported clauses - -void Solver::detachClausePurgatory(CRef cr, bool strict) { - const Clause &c = ca[cr]; - - assert(c.size() > 1); - if(strict) - remove(unaryWatches[~c[0]], Watcher(cr, c[1])); - else - unaryWatches.smudge(~c[0]); -} - - -void Solver::removeClause(CRef cr, bool inPurgatory) { - - Clause &c = ca[cr]; - - if(certifiedUNSAT) { - if(vbyte) { - write_char('d'); - for(int i = 0; i < c.size(); i++) - write_lit(2 * (var(c[i]) + 1) + sign(c[i])); - write_lit(0); - } - else { - fprintf(certifiedOutput, "d "); - for(int i = 0; i < c.size(); i++) - fprintf(certifiedOutput, "%i ", (var(c[i]) + 1) * (-2 * sign(c[i]) + 1)); - fprintf(certifiedOutput, "0\n"); - } - } - - if(inPurgatory) - detachClausePurgatory(cr); - else - detachClause(cr); - // Don't leave pointers to free'd memory! - if(locked(c)) vardata[var(c[0])].reason = CRef_Undef; - c.mark(1); - ca.free(cr); -} - - -bool Solver::satisfied(const Clause &c) const { -#ifdef INCREMENTAL - if(incremental) - return (value(c[0]) == l_True) || (value(c[1]) == l_True); -#endif - - // Default mode - for(int i = 0; i < c.size(); i++) - if(value(c[i]) == l_True) - return true; - return false; -} - - -/************************************************************ - * Compute LBD functions - *************************************************************/ - -template inline unsigned int Solver::computeLBD(const T &lits, int end) { - int nblevels = 0; - MYFLAG++; -#ifdef INCREMENTAL - if(incremental) { // ----------------- INCREMENTAL MODE - if(end==-1) end = lits.size(); - int nbDone = 0; - for(int i=0;i=end) break; - if(isSelector(var(lits[i]))) continue; - nbDone++; - int l = level(var(lits[i])); - if (permDiff[l] != MYFLAG) { - permDiff[l] = MYFLAG; - nblevels++; - } - } - } else { // -------- DEFAULT MODE. NOT A LOT OF DIFFERENCES... BUT EASIER TO READ -#endif - for(int i = 0; i < lits.size(); i++) { - int l = level(var(lits[i])); - if(permDiff[l] != MYFLAG) { - permDiff[l] = MYFLAG; - nblevels++; - } - } -#ifdef INCREMENTAL - } -#endif - return nblevels; -} - - - -/****************************************************************** - * Minimisation with binary reolution - ******************************************************************/ -void Solver::minimisationWithBinaryResolution(vec &out_learnt) { - - // Find the LBD measure - unsigned int lbd = computeLBD(out_learnt); - Lit p = ~out_learnt[0]; - - if(lbd <= lbLBDMinimizingClause) { - MYFLAG++; - - for(int i = 1; i < out_learnt.size(); i++) { - permDiff[var(out_learnt[i])] = MYFLAG; - } - - vec &wbin = watchesBin[p]; - int nb = 0; - for(int k = 0; k < wbin.size(); k++) { - Lit imp = wbin[k].blocker; - if(permDiff[var(imp)] == MYFLAG && value(imp) == l_True) { - nb++; - permDiff[var(imp)] = MYFLAG - 1; - } - } - int l = out_learnt.size() - 1; - if(nb > 0) { - stats[nbReducedClauses]++; - for(int i = 1; i < out_learnt.size() - nb; i++) { - if(permDiff[var(out_learnt[i])] != MYFLAG) { - Lit p = out_learnt[l]; - out_learnt[l] = out_learnt[i]; - out_learnt[i] = p; - l--; - i--; - } - } - - out_learnt.shrink(nb); - - } - } -} - -// Revert to the state at given level (keeping all assignment at 'level' but not beyond). -// - -void Solver::cancelUntil(int level) { - if(decisionLevel() > level) { - for(int c = trail.size() - 1; c >= trail_lim[level]; c--) { - Var x = var(trail[c]); - assigns[x] = l_Undef; - if(phase_saving > 1 || ((phase_saving == 1) && c > trail_lim.last())) { - polarity[x] = sign(trail[c]); - } - insertVarOrder(x); - } - qhead = trail_lim[level]; - trail.shrink(trail.size() - trail_lim[level]); - trail_lim.shrink(trail_lim.size() - level); - } -} - - -//================================================================================================= -// Major methods: - -Lit Solver::pickBranchLit() { - Var next = var_Undef; - - // Random decision: - if(((randomizeFirstDescent && conflicts == 0) || drand(random_seed) < random_var_freq) && !order_heap.empty()) { - next = order_heap[irand(random_seed, order_heap.size())]; - if(value(next) == l_Undef && decision[next]) - stats[rnd_decisions]++; - } - - // Activity based decision: - while(next == var_Undef || value(next) != l_Undef || !decision[next]) - if(order_heap.empty()) { - next = var_Undef; - break; - } else { - next = order_heap.removeMin(); - } - - if(randomize_on_restarts && !fixed_randomize_on_restarts && newDescent && (decisionLevel() % 2 == 0)) { - return mkLit(next, (randomDescentAssignments >> (decisionLevel() % 32)) & 1); - } - - if(fixed_randomize_on_restarts && decisionLevel() < 7) { - return mkLit(next, (randomDescentAssignments >> (decisionLevel() % 32)) & 1); - } - - if(next == var_Undef) return lit_Undef; - - if(forceUnsatOnNewDescent && newDescent) { - if(forceUNSAT[next] != 0) - return mkLit(next, forceUNSAT[next] < 0); - return mkLit(next, polarity[next]); - - } - - return next == var_Undef ? lit_Undef : mkLit(next, rnd_pol ? drand(random_seed) < 0.5 : polarity[next]); -} - - -/*_________________________________________________________________________________________________ -| -| analyze : (confl : Clause*) (out_learnt : vec&) (out_btlevel : int&) -> [void] -| -| Description: -| Analyze conflict and produce a reason clause. -| -| Pre-conditions: -| * 'out_learnt' is assumed to be cleared. -| * Current decision level must be greater than root level. -| -| Post-conditions: -| * 'out_learnt[0]' is the asserting literal at level 'out_btlevel'. -| * If out_learnt.size() > 1 then 'out_learnt[1]' has the greatest decision level of the -| rest of literals. There may be others from the same level though. -| -|________________________________________________________________________________________________@*/ -void Solver::analyze(CRef confl, vec &out_learnt, vec &selectors, int &out_btlevel, unsigned int &lbd, unsigned int &szWithoutSelectors) { - int pathC = 0; - Lit p = lit_Undef; - - - // Generate conflict clause: - // - out_learnt.push(); // (leave room for the asserting literal) - int index = trail.size() - 1; - do { - assert(confl != CRef_Undef); // (otherwise should be UIP) - Clause &c = ca[confl]; - // Special case for binary clauses - // The first one has to be SAT - if(p != lit_Undef && c.size() == 2 && value(c[0]) == l_False) { - - assert(value(c[1]) == l_True); - Lit tmp = c[0]; - c[0] = c[1], c[1] = tmp; - } - - if(c.learnt()) { - parallelImportClauseDuringConflictAnalysis(c, confl); - claBumpActivity(c); - } else { // original clause - if(!c.getSeen()) { - stats[originalClausesSeen]++; - c.setSeen(true); - } - } - - // DYNAMIC NBLEVEL trick (see competition'09 companion paper) - if(c.learnt() && c.lbd() > 2) { - unsigned int nblevels = computeLBD(c); - if(nblevels + 1 < c.lbd()) { // improve the LBD - if(c.lbd() <= lbLBDFrozenClause) { - // seems to be interesting : keep it for the next round - c.setCanBeDel(false); - } - if(chanseokStrategy && nblevels <= coLBDBound) { - c.nolearnt(); - learnts.remove(confl); - permanentLearnts.push(confl); - stats[nbPermanentLearnts]++; - - } else { - c.setLBD(nblevels); // Update it - } - } - } - - - for(int j = (p == lit_Undef) ? 0 : 1; j < c.size(); j++) { - Lit q = c[j]; - - if(!seen[var(q)]) { - if(level(var(q)) == 0) { - } else { // Here, the old case - if(!isSelector(var(q))) - varBumpActivity(var(q)); - - // This variable was responsible for a conflict, - // consider it as a UNSAT assignation for this literal - bumpForceUNSAT(~q); // Negation because q is false here - - seen[var(q)] = 1; - if(level(var(q)) >= decisionLevel()) { - pathC++; - // UPDATEVARACTIVITY trick (see competition'09 companion paper) - if(!isSelector(var(q)) && (reason(var(q)) != CRef_Undef) && ca[reason(var(q))].learnt()) - lastDecisionLevel.push(q); - } else { - if(isSelector(var(q))) { - assert(value(q) == l_False); - selectors.push(q); - } else - out_learnt.push(q); - } - } - } //else stats[sumResSeen]++; - } - - // Select next clause to look at: - while (!seen[var(trail[index--])]); - p = trail[index + 1]; - //stats[sumRes]++; - confl = reason(var(p)); - seen[var(p)] = 0; - pathC--; - - } while(pathC > 0); - out_learnt[0] = ~p; - - // Simplify conflict clause: - // - int i, j; - - for(int i = 0; i < selectors.size(); i++) - out_learnt.push(selectors[i]); - - out_learnt.copyTo(analyze_toclear); - if(ccmin_mode == 2) { - uint32_t abstract_level = 0; - for(i = 1; i < out_learnt.size(); i++) - abstract_level |= abstractLevel(var(out_learnt[i])); // (maintain an abstraction of levels involved in conflict) - - for(i = j = 1; i < out_learnt.size(); i++) - if(reason(var(out_learnt[i])) == CRef_Undef || !litRedundant(out_learnt[i], abstract_level)) - out_learnt[j++] = out_learnt[i]; - - } else if(ccmin_mode == 1) { - for(i = j = 1; i < out_learnt.size(); i++) { - Var x = var(out_learnt[i]); - - if(reason(x) == CRef_Undef) - out_learnt[j++] = out_learnt[i]; - else { - Clause &c = ca[reason(var(out_learnt[i]))]; - // Thanks to Siert Wieringa for this bug fix! - for(int k = ((c.size() == 2) ? 0 : 1); k < c.size(); k++) - if(!seen[var(c[k])] && level(var(c[k])) > 0) { - out_learnt[j++] = out_learnt[i]; - break; - } - } - } - } else - i = j = out_learnt.size(); - - // stats[max_literals]+=out_learnt.size(); - out_learnt.shrink(i - j); - // stats[tot_literals]+=out_learnt.size(); - - - /* *************************************** - Minimisation with binary clauses of the asserting clause - First of all : we look for small clauses - Then, we reduce clauses with small LBD. - Otherwise, this can be useless - */ - if(!incremental && out_learnt.size() <= lbSizeMinimizingClause) { - minimisationWithBinaryResolution(out_learnt); - } - // Find correct backtrack level: - // - if(out_learnt.size() == 1) - out_btlevel = 0; - else { - int max_i = 1; - // Find the first literal assigned at the next-highest level: - for(int i = 2; i < out_learnt.size(); i++) - if(level(var(out_learnt[i])) > level(var(out_learnt[max_i]))) - max_i = i; - // Swap-in this literal at index 1: - Lit p = out_learnt[max_i]; - out_learnt[max_i] = out_learnt[1]; - out_learnt[1] = p; - out_btlevel = level(var(p)); - } -#ifdef INCREMENTAL - if(incremental) { - szWithoutSelectors = 0; - for(int i=0;i0) break; - } - } else -#endif - szWithoutSelectors = out_learnt.size(); - - // Compute LBD - lbd = computeLBD(out_learnt, out_learnt.size() - selectors.size()); - - // UPDATEVARACTIVITY trick (see competition'09 companion paper) - if(lastDecisionLevel.size() > 0) { - for(int i = 0; i < lastDecisionLevel.size(); i++) { - if(ca[reason(var(lastDecisionLevel[i]))].lbd() < lbd) - varBumpActivity(var(lastDecisionLevel[i])); - } - lastDecisionLevel.clear(); - } - - - for(int j = 0; j < analyze_toclear.size(); j++) seen[var(analyze_toclear[j])] = 0; // ('seen[]' is now cleared) - for(int j = 0; j < selectors.size(); j++) seen[var(selectors[j])] = 0; -} - - -// Check if 'p' can be removed. 'abstract_levels' is used to abort early if the algorithm is -// visiting literals at levels that cannot be removed later. - -bool Solver::litRedundant(Lit p, uint32_t abstract_levels) { - analyze_stack.clear(); - analyze_stack.push(p); - int top = analyze_toclear.size(); - while(analyze_stack.size() > 0) { - assert(reason(var(analyze_stack.last())) != CRef_Undef); - Clause &c = ca[reason(var(analyze_stack.last()))]; - analyze_stack.pop(); // - if(c.size() == 2 && value(c[0]) == l_False) { - assert(value(c[1]) == l_True); - Lit tmp = c[0]; - c[0] = c[1], c[1] = tmp; - } - - for(int i = 1; i < c.size(); i++) { - Lit p = c[i]; - if(!seen[var(p)]) { - if(level(var(p)) > 0) { - if(reason(var(p)) != CRef_Undef && (abstractLevel(var(p)) & abstract_levels) != 0) { - seen[var(p)] = 1; - analyze_stack.push(p); - analyze_toclear.push(p); - } else { - for(int j = top; j < analyze_toclear.size(); j++) - seen[var(analyze_toclear[j])] = 0; - analyze_toclear.shrink(analyze_toclear.size() - top); - return false; - } - } - } - } - } - - return true; -} - - -/*_________________________________________________________________________________________________ -| -| analyzeFinal : (p : Lit) -> [void] -| -| Description: -| Specialized analysis procedure to express the final conflict in terms of assumptions. -| Calculates the (possibly empty) set of assumptions that led to the assignment of 'p', and -| stores the result in 'out_conflict'. -|________________________________________________________________________________________________@*/ -void Solver::analyzeFinal(Lit p, vec &out_conflict) { - out_conflict.clear(); - out_conflict.push(p); - - if(decisionLevel() == 0) - return; - - seen[var(p)] = 1; - - for(int i = trail.size() - 1; i >= trail_lim[0]; i--) { - Var x = var(trail[i]); - if(seen[x]) { - if(reason(x) == CRef_Undef) { - assert(level(x) > 0); - out_conflict.push(~trail[i]); - } else { - Clause &c = ca[reason(x)]; - // for (int j = 1; j < c.size(); j++) Minisat (glucose 2.0) loop - // Bug in case of assumptions due to special data structures for Binary. - // Many thanks to Sam Bayless (sbayless@cs.ubc.ca) for discover this bug. - for(int j = ((c.size() == 2) ? 0 : 1); j < c.size(); j++) - if(level(var(c[j])) > 0) - seen[var(c[j])] = 1; - } - - seen[x] = 0; - } - } - - seen[var(p)] = 0; -} - - -void Solver::uncheckedEnqueue(Lit p, CRef from) { - assert(value(p) == l_Undef); - assigns[var(p)] = lbool(!sign(p)); - vardata[var(p)] = mkVarData(from, decisionLevel()); - trail.push_(p); -} - - -void Solver::bumpForceUNSAT(Lit q) { - forceUNSAT[var(q)] = sign(q) ? -1 : +1; - return; -} - - -/*_________________________________________________________________________________________________ -| -| propagate : [void] -> [Clause*] -| -| Description: -| Propagates all enqueued facts. If a conflict arises, the conflicting clause is returned, -| otherwise CRef_Undef. -| -| Post-conditions: -| * the propagation queue is empty, even if there was a conflict. -|________________________________________________________________________________________________@*/ -CRef Solver::propagate() { - CRef confl = CRef_Undef; - int num_props = 0; - watches.cleanAll(); - watchesBin.cleanAll(); - unaryWatches.cleanAll(); - while(qhead < trail.size()) { - Lit p = trail[qhead++]; // 'p' is enqueued fact to propagate. - vec &ws = watches[p]; - Watcher *i, *j, *end; - num_props++; - - - // First, Propagate binary clauses - vec &wbin = watchesBin[p]; - for(int k = 0; k < wbin.size(); k++) { - - Lit imp = wbin[k].blocker; - - if(value(imp) == l_False) { - return wbin[k].cref; - } - - if(value(imp) == l_Undef) { - uncheckedEnqueue(imp, wbin[k].cref); - } - } - - // Now propagate other 2-watched clauses - for(i = j = (Watcher *) ws, end = i + ws.size(); i != end;) { - // Try to avoid inspecting the clause: - Lit blocker = i->blocker; - if(value(blocker) == l_True) { - *j++ = *i++; - continue; - } - - // Make sure the false literal is data[1]: - CRef cr = i->cref; - Clause &c = ca[cr]; - assert(!c.getOneWatched()); - Lit false_lit = ~p; - if(c[0] == false_lit) - c[0] = c[1], c[1] = false_lit; - assert(c[1] == false_lit); - i++; - - // If 0th watch is true, then clause is already satisfied. - Lit first = c[0]; - Watcher w = Watcher(cr, first); - if(first != blocker && value(first) == l_True) { - - *j++ = w; - continue; - } -#ifdef INCREMENTAL - if(incremental) { // ----------------- INCREMENTAL MODE - int choosenPos = -1; - for (int k = 2; k < c.size(); k++) { - - if (value(c[k]) != l_False){ - if(decisionLevel()>assumptions.size()) { - choosenPos = k; - break; - } else { - choosenPos = k; - - if(value(c[k])==l_True || !isSelector(var(c[k]))) { - break; - } - } - - } - } - if(choosenPos!=-1) { - c[1] = c[choosenPos]; c[choosenPos] = false_lit; - watches[~c[1]].push(w); - goto NextClause; } - } else { // ----------------- DEFAULT MODE (NOT INCREMENTAL) -#endif - for(int k = 2; k < c.size(); k++) { - - if(value(c[k]) != l_False) { - c[1] = c[k]; - c[k] = false_lit; - watches[~c[1]].push(w); - goto NextClause; - } - } -#ifdef INCREMENTAL - } -#endif - // Did not find watch -- clause is unit under assignment: - *j++ = w; - if(value(first) == l_False) { - confl = cr; - qhead = trail.size(); - // Copy the remaining watches: - while(i < end) - *j++ = *i++; - } else { - uncheckedEnqueue(first, cr); - - - } - NextClause:; - } - ws.shrink(i - j); - - // unaryWatches "propagation" - if(useUnaryWatched && confl == CRef_Undef) { - confl = propagateUnaryWatches(p); - - } - - } - - - propagations += num_props; - simpDB_props -= num_props; - - return confl; -} - - -/*_________________________________________________________________________________________________ -| -| propagateUnaryWatches : [Lit] -> [Clause*] -| -| Description: -| Propagates unary watches of Lit p, return a conflict -| otherwise CRef_Undef -| -|________________________________________________________________________________________________@*/ - -CRef Solver::propagateUnaryWatches(Lit p) { - CRef confl = CRef_Undef; - Watcher *i, *j, *end; - vec &ws = unaryWatches[p]; - for(i = j = (Watcher *) ws, end = i + ws.size(); i != end;) { - // Try to avoid inspecting the clause: - Lit blocker = i->blocker; - if(value(blocker) == l_True) { - *j++ = *i++; - continue; - } - - // Make sure the false literal is data[1]: - CRef cr = i->cref; - Clause &c = ca[cr]; - assert(c.getOneWatched()); - Lit false_lit = ~p; - assert(c[0] == false_lit); // this is unary watch... No other choice if "propagated" - //if (c[0] == false_lit) - //c[0] = c[1], c[1] = false_lit; - //assert(c[1] == false_lit); - i++; - Watcher w = Watcher(cr, c[0]); - for(int k = 1; k < c.size(); k++) { - if(value(c[k]) != l_False) { - c[0] = c[k]; - c[k] = false_lit; - unaryWatches[~c[0]].push(w); - goto NextClauseUnary; - } - } - - // Did not find watch -- clause is empty under assignment: - *j++ = w; - - confl = cr; - qhead = trail.size(); - // Copy the remaining watches: - while(i < end) - *j++ = *i++; - - // We can add it now to the set of clauses when backtracking - //printf("*"); - if(promoteOneWatchedClause) { - stats[nbPromoted]++; - // Let's find the two biggest decision levels in the clause s.t. it will correctly be propagated when we'll backtrack - int maxlevel = -1; - int index = -1; - for(int k = 1; k < c.size(); k++) { - assert(value(c[k]) == l_False); - assert(level(var(c[k])) <= level(var(c[0]))); - if(level(var(c[k])) > maxlevel) { - index = k; - maxlevel = level(var(c[k])); - } - } - detachClausePurgatory(cr, true); // TODO: check that the cleanAll is ok (use ",true" otherwise) - assert(index != -1); - Lit tmp = c[1]; - c[1] = c[index], c[index] = tmp; - attachClause(cr); - // TODO used in function ParallelSolver::reportProgressArrayImports - //Override :-( - //goodImportsFromThreads[ca[cr].importedFrom()]++; - ca[cr].setOneWatched(false); - ca[cr].setExported(2); - } - NextClauseUnary:; - } - ws.shrink(i - j); - - return confl; -} - - -/*_________________________________________________________________________________________________ -| -| reduceDB : () -> [void] -| -| Description: -| Remove half of the learnt clauses, minus the clauses locked by the current assignment. Locked -| clauses are clauses that are reason to some assignment. Binary clauses are never removed. -|________________________________________________________________________________________________@*/ - - -void Solver::reduceDB() { - - int i, j; - stats[nbReduceDB]++; - if(chanseokStrategy) - sort(learnts, reduceDBAct_lt(ca)); - else { - sort(learnts, reduceDB_lt(ca)); - - // We have a lot of "good" clauses, it is difficult to compare them. Keep more ! - if(ca[learnts[learnts.size() / RATIOREMOVECLAUSES]].lbd() <= 3) nbclausesbeforereduce += specialIncReduceDB; - // Useless :-) - if(ca[learnts.last()].lbd() <= 5) nbclausesbeforereduce += specialIncReduceDB; - - } - // Don't delete binary or locked clauses. From the rest, delete clauses from the first half - // Keep clauses which seem to be usefull (their lbd was reduce during this sequence) - - int limit = learnts.size() / 2; - - for(i = j = 0; i < learnts.size(); i++) { - Clause &c = ca[learnts[i]]; - if(c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) { - removeClause(learnts[i]); - stats[nbRemovedClauses]++; - } - else { - if(!c.canBeDel()) limit++; //we keep c, so we can delete an other clause - c.setCanBeDel(true); // At the next step, c can be delete - learnts[j++] = learnts[i]; - } - } - learnts.shrink(i - j); - checkGarbage(); -} - - -void Solver::removeSatisfied(vec &cs) { - - int i, j; - for(i = j = 0; i < cs.size(); i++) { - Clause &c = ca[cs[i]]; - - - if(satisfied(c)) if(c.getOneWatched()) - removeClause(cs[i], true); - else - removeClause(cs[i]); - else - cs[j++] = cs[i]; - } - cs.shrink(i - j); -} - - -void Solver::rebuildOrderHeap() { - vec vs; - for(Var v = 0; v < nVars(); v++) - if(decision[v] && value(v) == l_Undef) - vs.push(v); - order_heap.build(vs); - -} - - -/*_________________________________________________________________________________________________ -| -| simplify : [void] -> [bool] -| -| Description: -| Simplify the clause database according to the current top-level assigment. Currently, the only -| thing done here is the removal of satisfied clauses, but more things can be put here. -|________________________________________________________________________________________________@*/ -bool Solver::simplify() { - assert(decisionLevel() == 0); - - if(!ok) return ok = false; - else { - CRef cr = propagate(); - if(cr != CRef_Undef) { - return ok = false; - } - } - - - if(nAssigns() == simpDB_assigns || (simpDB_props > 0)) - return true; - - // Remove satisfied clauses: - removeSatisfied(learnts); - removeSatisfied(permanentLearnts); - removeSatisfied(unaryWatchedClauses); - if(remove_satisfied) // Can be turned off. - removeSatisfied(clauses); - checkGarbage(); - rebuildOrderHeap(); - - simpDB_assigns = nAssigns(); - simpDB_props = stats[clauses_literals] + stats[learnts_literals]; // (shouldn't depend on stats really, but it will do for now) - - return true; -} - - -void Solver::adaptSolver() { - bool adjusted = false; - bool reinit = false; - printf("c\nc Try to adapt solver strategies\nc \n"); - /* printf("c Adjusting solver for the SAT Race 2015 (alpha feature)\n"); - printf("c key successive Conflicts : %" PRIu64"\n",stats[noDecisionConflict]); - printf("c nb unary clauses learnt : %" PRIu64"\n",stats[nbUn]); - printf("c key avg dec per conflicts : %.2f\n", (float)decisions / (float)conflicts);*/ - float decpc = (float) decisions / (float) conflicts; - if(decpc <= 1.2) { - chanseokStrategy = true; - coLBDBound = 4; - glureduce = true; - adjusted = true; - printf("c Adjusting for low decision levels.\n"); - reinit = true; - firstReduceDB = 2000; - nbclausesbeforereduce = firstReduceDB; - curRestart = (conflicts / nbclausesbeforereduce) + 1; - incReduceDB = 0; - } - if(stats[noDecisionConflict] < 30000) { - luby_restart = true; - luby_restart_factor = 100; - - var_decay = 0.999; - max_var_decay = 0.999; - adjusted = true; - printf("c Adjusting for low successive conflicts.\n"); - } - if(stats[noDecisionConflict] > 54400) { - printf("c Adjusting for high successive conflicts.\n"); - chanseokStrategy = true; - glureduce = true; - coLBDBound = 3; - firstReduceDB = 30000; - var_decay = 0.99; - max_var_decay = 0.99; - randomize_on_restarts = 1; - adjusted = true; - } - if(stats[nbDL2] - stats[nbBin] > 20000) { - var_decay = 0.91; - max_var_decay = 0.91; - adjusted = true; - printf("c Adjusting for a very large number of true glue clauses found.\n"); - } - if(!adjusted) { - printf("c Nothing extreme in this problem, continue with glucose default strategies.\n"); - } - printf("c\n"); - if(adjusted) { // Let's reinitialize the glucose restart strategy counters - lbdQueue.fastclear(); - sumLBD = 0; - conflictsRestarts = 0; - } - - if(chanseokStrategy && adjusted) { - int moved = 0; - int i, j; - for(i = j = 0; i < learnts.size(); i++) { - Clause &c = ca[learnts[i]]; - if(c.lbd() <= coLBDBound) { - permanentLearnts.push(learnts[i]); - moved++; - } - else { - learnts[j++] = learnts[i]; - } - } - learnts.shrink(i - j); - printf("c Activating Chanseok Strategy: moved %d clauses to the permanent set.\n", moved); - } - - if(reinit) { - assert(decisionLevel() == 0); - for(int i = 0; i < learnts.size(); i++) { - removeClause(learnts[i]); - } - learnts.shrink(learnts.size()); - checkGarbage(); -/* - order_heap.clear(); - for(int i=0;i [lbool] -| -| Description: -| Search for a model the specified number of conflicts. -| NOTE! Use negative value for 'nof_conflicts' indicate infinity. -| -| Output: -| 'l_True' if a partial assigment that is consistent with respect to the clauseset is found. If -| all variables are decision variables, this means that the clause set is satisfiable. 'l_False' -| if the clause set is unsatisfiable. 'l_Undef' if the bound on number of conflicts is reached. -|________________________________________________________________________________________________@*/ -lbool Solver::search(int nof_conflicts) { - assert(ok); - int backtrack_level; - int conflictC = 0; - vec learnt_clause, selectors; - unsigned int nblevels, szWithoutSelectors = 0; - bool blocked = false; - bool aDecisionWasMade = false; - - starts++; - for(; ;) { - if(decisionLevel() == 0) { // We import clauses FIXME: ensure that we will import clauses enventually (restart after some point) - parallelImportUnaryClauses(); - - if(parallelImportClauses()) - return l_False; - - } - CRef confl = propagate(); - - if(confl != CRef_Undef) { - newDescent = false; - if(parallelJobIsFinished()) - return l_Undef; - - if(!aDecisionWasMade) - stats[noDecisionConflict]++; - aDecisionWasMade = false; - - stats[sumDecisionLevels] += decisionLevel(); - stats[sumTrail] += trail.size(); - // CONFLICT - conflicts++; - conflictC++; - conflictsRestarts++; - if(conflicts % 5000 == 0 && var_decay < max_var_decay) - var_decay += 0.01; - - if(verbosity >= 1 && starts>0 && conflicts % verbEveryConflicts == 0) { - printf("c | %8d %7d %5d | %7d %8d %8d | %5d %8d %6d %8d | %6.3f %% |\n", - (int) starts, (int) stats[nbstopsrestarts], (int) (conflicts / starts), - (int) stats[dec_vars] - (trail_lim.size() == 0 ? trail.size() : trail_lim[0]), nClauses(), (int) stats[clauses_literals], - (int) stats[nbReduceDB], nLearnts(), (int) stats[nbDL2], (int) stats[nbRemovedClauses], progressEstimate() * 100); - } - if(decisionLevel() == 0) { - return l_False; - - } - if(adaptStrategies && conflicts == 100000) { - cancelUntil(0); - adaptSolver(); - adaptStrategies = false; - return l_Undef; - } - - trailQueue.push(trail.size()); - // BLOCK RESTART (CP 2012 paper) - if(conflictsRestarts > LOWER_BOUND_FOR_BLOCKING_RESTART && lbdQueue.isvalid() && trail.size() > R * trailQueue.getavg()) { - lbdQueue.fastclear(); - stats[nbstopsrestarts]++; - if(!blocked) { - stats[lastblockatrestart] = starts; - stats[nbstopsrestartssame]++; - blocked = true; - } - } - - learnt_clause.clear(); - selectors.clear(); - - analyze(confl, learnt_clause, selectors, backtrack_level, nblevels, szWithoutSelectors); - - lbdQueue.push(nblevels); - sumLBD += nblevels; - - cancelUntil(backtrack_level); - - if(certifiedUNSAT) { - if(vbyte) { - write_char('a'); - for(int i = 0; i < learnt_clause.size(); i++) - write_lit(2 * (var(learnt_clause[i]) + 1) + sign(learnt_clause[i])); - write_lit(0); - } - else { - for(int i = 0; i < learnt_clause.size(); i++) - fprintf(certifiedOutput, "%i ", (var(learnt_clause[i]) + 1) * - (-2 * sign(learnt_clause[i]) + 1)); - fprintf(certifiedOutput, "0\n"); - } - } - - - if(learnt_clause.size() == 1) { - uncheckedEnqueue(learnt_clause[0]); - stats[nbUn]++; - parallelExportUnaryClause(learnt_clause[0]); - } else { - CRef cr; - if(chanseokStrategy && nblevels <= coLBDBound) { - cr = ca.alloc(learnt_clause, false); - permanentLearnts.push(cr); - stats[nbPermanentLearnts]++; - } else { - cr = ca.alloc(learnt_clause, true); - ca[cr].setLBD(nblevels); - ca[cr].setOneWatched(false); - learnts.push(cr); - claBumpActivity(ca[cr]); - } -#ifdef INCREMENTAL - ca[cr].setSizeWithoutSelectors(szWithoutSelectors); -#endif - if(nblevels <= 2) { stats[nbDL2]++; } // stats - if(ca[cr].size() == 2) stats[nbBin]++; // stats - attachClause(cr); - lastLearntClause = cr; // Use in multithread (to hard to put inside ParallelSolver) - parallelExportClauseDuringSearch(ca[cr]); - uncheckedEnqueue(learnt_clause[0], cr); - - } - varDecayActivity(); - claDecayActivity(); - - - } else { - // Our dynamic restart, see the SAT09 competition compagnion paper - if((luby_restart && nof_conflicts <= conflictC) || - (!luby_restart && (lbdQueue.isvalid() && ((lbdQueue.getavg() * K) > (sumLBD / conflictsRestarts))))) { - lbdQueue.fastclear(); - progress_estimate = progressEstimate(); - int bt = 0; -#ifdef INCREMENTAL - if(incremental) // DO NOT BACKTRACK UNTIL 0.. USELESS - bt = (decisionLevel() firstReduceDB) || - (glureduce && conflicts >= ((unsigned int) curRestart * nbclausesbeforereduce))) { - - if(learnts.size() > 0) { - curRestart = (conflicts / nbclausesbeforereduce) + 1; - reduceDB(); - if(!panicModeIsEnabled()) - nbclausesbeforereduce += incReduceDB; - } - } - - lastLearntClause = CRef_Undef; - Lit next = lit_Undef; - while(decisionLevel() < assumptions.size()) { - // Perform user provided assumption: - Lit p = assumptions[decisionLevel()]; - if(value(p) == l_True) { - // Dummy decision level: - newDecisionLevel(); - } else if(value(p) == l_False) { - analyzeFinal(~p, conflict); - return l_False; - } else { - next = p; - break; - } - } - - if(next == lit_Undef) { - // New variable decision: - decisions++; - next = pickBranchLit(); - if(next == lit_Undef) { - printf("c last restart ## conflicts : %d %d \n", conflictC, decisionLevel()); - // Model found: - return l_True; - } - } - - // Increase decision level and enqueue 'next' - aDecisionWasMade = true; - newDecisionLevel(); - uncheckedEnqueue(next); - } - } -} - - -double Solver::progressEstimate() const { - double progress = 0; - double F = 1.0 / nVars(); - - for(int i = 0; i <= decisionLevel(); i++) { - int beg = i == 0 ? 0 : trail_lim[i - 1]; - int end = i == decisionLevel() ? trail.size() : trail_lim[i]; - progress += pow(F, i) * (end - beg); - } - - return progress / nVars(); -} - - -void Solver::printIncrementalStats() { - - printf("c---------- Glucose Stats -------------------------\n"); - printf("c restarts : %" - PRIu64 - "\n", starts); - printf("c nb ReduceDB : %" - PRIu64 - "\n", stats[nbReduceDB]); - printf("c nb removed Clauses : %" - PRIu64 - "\n", stats[nbRemovedClauses]); - printf("c nb learnts DL2 : %" - PRIu64 - "\n", stats[nbDL2]); - printf("c nb learnts size 2 : %" - PRIu64 - "\n", stats[nbBin]); - printf("c nb learnts size 1 : %" - PRIu64 - "\n", stats[nbUn]); - - printf("c conflicts : %" - PRIu64 - "\n", conflicts); - printf("c decisions : %" - PRIu64 - "\n", decisions); - printf("c propagations : %" - PRIu64 - "\n", propagations); - - printf("\nc SAT Calls : %d in %g seconds\n", nbSatCalls, totalTime4Sat); - printf("c UNSAT Calls : %d in %g seconds\n", nbUnsatCalls, totalTime4Unsat); - - printf("c--------------------------------------------------\n"); -} - - -double Solver::luby(double y, int x) { - - // Find the finite subsequence that contains index 'x', and the - // size of that subsequence: - int size, seq; - for(size = 1, seq = 0; size < x + 1; seq++, size = 2 * size + 1); - - while(size - 1 != x) { - size = (size - 1) >> 1; - seq--; - x = x % size; - } - - return pow(y, seq); -} - - -// NOTE: assumptions passed in member-variable 'assumptions'. - -lbool Solver::solve_(bool do_simp, bool turn_off_simp) // Parameters are useless in core but useful for SimpSolver.... -{ - - if(incremental && certifiedUNSAT) { - printf("Can not use incremental and certified unsat in the same time\n"); - exit(-1); - } - - model.clear(); - conflict.clear(); - if(!ok) return l_False; - double curTime = cpuTime(); - - solves++; - - - lbool status = l_Undef; - if(!incremental && verbosity >= 1) { - printf("c ========================================[ MAGIC CONSTANTS ]==============================================\n"); - printf("c | Constants are supposed to work well together :-) |\n"); - printf("c | however, if you find better choices, please let us known... |\n"); - printf("c |-------------------------------------------------------------------------------------------------------|\n"); - if(adaptStrategies) { - printf("c | Adapt dynamically the solver after 100000 conflicts (restarts, reduction strategies...) |\n"); - printf("c |-------------------------------------------------------------------------------------------------------|\n"); - } - printf("c | | | |\n"); - printf("c | - Restarts: | - Reduce Clause DB: | - Minimize Asserting: |\n"); - if(chanseokStrategy) { - printf("c | * LBD Queue : %6d | chanseok Strategy | * size < %3d |\n", lbdQueue.maxSize(), - lbSizeMinimizingClause); - printf("c | * Trail Queue : %6d | * learnts size : %6d | * lbd < %3d |\n", trailQueue.maxSize(), - firstReduceDB, lbLBDMinimizingClause); - printf("c | * K : %6.2f | * Bound LBD : %6d | |\n", K, coLBDBound); - printf("c | * R : %6.2f | * Protected : (lbd)< %2d | |\n", R, lbLBDFrozenClause); - } else { - printf("c | * LBD Queue : %6d | * First : %6d | * size < %3d |\n", lbdQueue.maxSize(), - nbclausesbeforereduce, lbSizeMinimizingClause); - printf("c | * Trail Queue : %6d | * Inc : %6d | * lbd < %3d |\n", trailQueue.maxSize(), incReduceDB, - lbLBDMinimizingClause); - printf("c | * K : %6.2f | * Special : %6d | |\n", K, specialIncReduceDB); - printf("c | * R : %6.2f | * Protected : (lbd)< %2d | |\n", R, lbLBDFrozenClause); - } - printf("c | | | |\n"); - printf("c ==================================[ Search Statistics (every %6d conflicts) ]=========================\n", verbEveryConflicts); - printf("c | |\n"); - - printf("c | RESTARTS | ORIGINAL | LEARNT | Progress |\n"); - printf("c | NB Blocked Avg Cfc | Vars Clauses Literals | Red Learnts LBD2 Removed | |\n"); - printf("c =========================================================================================================\n"); - } - - // Search: - int curr_restarts = 0; - while(status == l_Undef) { - status = search( - luby_restart ? luby(restart_inc, curr_restarts) * luby_restart_factor : 0); // the parameter is useless in glucose, kept to allow modifications - - if(!withinBudget()) break; - curr_restarts++; - } - - if(!incremental && verbosity >= 1) - printf("c =========================================================================================================\n"); - - if(certifiedUNSAT) { // Want certified output - if(status == l_False) { - if(vbyte) { - write_char('a'); - write_lit(0); - } - else { - fprintf(certifiedOutput, "0\n"); - } - } - fclose(certifiedOutput); - } - - - if(status == l_True) { - // Extend & copy model: - model.growTo(nVars()); - for(int i = 0; i < nVars(); i++) model[i] = value(i); - } else if(status == l_False && conflict.size() == 0) - ok = false; - - - cancelUntil(0); - - - double finalTime = cpuTime(); - if(status == l_True) { - nbSatCalls++; - totalTime4Sat += (finalTime - curTime); - } - if(status == l_False) { - nbUnsatCalls++; - totalTime4Unsat += (finalTime - curTime); - } - - - return status; - -} - - - - - -//================================================================================================= -// Writing CNF to DIMACS: -// -// FIXME: this needs to be rewritten completely. - -static Var mapVar(Var x, vec &map, Var &max) { - if(map.size() <= x || map[x] == -1) { - map.growTo(x + 1, -1); - map[x] = max++; - } - return map[x]; -} - - -void Solver::toDimacs(FILE *f, Clause &c, vec &map, Var &max) { - if(satisfied(c)) return; - - for(int i = 0; i < c.size(); i++) - if(value(c[i]) != l_False) - fprintf(f, "%s%d ", sign(c[i]) ? "-" : "", mapVar(var(c[i]), map, max) + 1); - fprintf(f, "0\n"); -} - - -void Solver::toDimacs(const char *file, const vec &assumps) { - FILE *f = fopen(file, "wr"); - if(f == NULL) - fprintf(stderr, "could not open file %s\n", file), exit(1); - toDimacs(f, assumps); - fclose(f); -} - - -void Solver::toDimacs(FILE *f, const vec &assumps) { - // Handle case when solver is in contradictory state: - if(!ok) { - fprintf(f, "p cnf 1 2\n1 0\n-1 0\n"); - return; - } - - vec map; - Var max = 0; - - // Cannot use removeClauses here because it is not safe - // to deallocate them at this point. Could be improved. - int cnt = 0; - for(int i = 0; i < clauses.size(); i++) - if(!satisfied(ca[clauses[i]])) - cnt++; - - for(int i = 0; i < clauses.size(); i++) - if(!satisfied(ca[clauses[i]])) { - Clause &c = ca[clauses[i]]; - for(int j = 0; j < c.size(); j++) - if(value(c[j]) != l_False) - mapVar(var(c[j]), map, max); - } - - // Assumptions are added as unit clauses: - cnt += assumptions.size(); - - fprintf(f, "p cnf %d %d\n", max, cnt); - - for(int i = 0; i < assumptions.size(); i++) { - assert(value(assumptions[i]) != l_False); - fprintf(f, "%s%d 0\n", sign(assumptions[i]) ? "-" : "", mapVar(var(assumptions[i]), map, max) + 1); - } - - for(int i = 0; i < clauses.size(); i++) - toDimacs(f, ca[clauses[i]], map, max); - - if(verbosity > 0) - printf("Wrote %d clauses with %d variables.\n", cnt, max); -} - - -//================================================================================================= -// Garbage Collection methods: - -void Solver::relocAll(ClauseAllocator &to) { - // All watchers: - // for (int i = 0; i < watches.size(); i++) - watches.cleanAll(); - watchesBin.cleanAll(); - unaryWatches.cleanAll(); - for(int v = 0; v < nVars(); v++) - for(int s = 0; s < 2; s++) { - Lit p = mkLit(v, s); - // printf(" >>> RELOCING: %s%d\n", sign(p)?"-":"", var(p)+1); - vec &ws = watches[p]; - for(int j = 0; j < ws.size(); j++) - ca.reloc(ws[j].cref, to); - vec &ws2 = watchesBin[p]; - for(int j = 0; j < ws2.size(); j++) - ca.reloc(ws2[j].cref, to); - vec &ws3 = unaryWatches[p]; - for(int j = 0; j < ws3.size(); j++) - ca.reloc(ws3[j].cref, to); - } - - // All reasons: - // - for(int i = 0; i < trail.size(); i++) { - Var v = var(trail[i]); - - if(reason(v) != CRef_Undef && (ca[reason(v)].reloced() || locked(ca[reason(v)]))) - ca.reloc(vardata[v].reason, to); - } - - // All learnt: - // - for(int i = 0; i < learnts.size(); i++) - ca.reloc(learnts[i], to); - - for(int i = 0; i < permanentLearnts.size(); i++) - ca.reloc(permanentLearnts[i], to); - - // All original: - // - for(int i = 0; i < clauses.size(); i++) - ca.reloc(clauses[i], to); - - for(int i = 0; i < unaryWatchedClauses.size(); i++) - ca.reloc(unaryWatchedClauses[i], to); -} - - -void Solver::garbageCollect() { - // Initialize the next region to a size corresponding to the estimated utilization degree. This - // is not precise but should avoid some unnecessary reallocations for the new region: - ClauseAllocator to(ca.size() - ca.wasted()); - relocAll(to); - if(verbosity >= 2) - printf("| Garbage collection: %12d bytes => %12d bytes |\n", - ca.size() * ClauseAllocator::Unit_Size, to.size() * ClauseAllocator::Unit_Size); - to.moveTo(ca); -} - -//-------------------------------------------------------------- -// Functions related to MultiThread. -// Useless in case of single core solver (aka original glucose) -// Keep them empty if you just use core solver -//-------------------------------------------------------------- - -bool Solver::panicModeIsEnabled() { - return false; -} - - -void Solver::parallelImportUnaryClauses() { -} - - -bool Solver::parallelImportClauses() { - return false; -} - - -void Solver::parallelExportUnaryClause(Lit p) { -} - - -void Solver::parallelExportClauseDuringSearch(Clause &c) { -} - - -bool Solver::parallelJobIsFinished() { - // Parallel: another job has finished let's quit - return false; -} - - -void Solver::parallelImportClauseDuringConflictAnalysis(Clause &c, CRef confl) { -} diff --git a/libs/mugen/glucose-syrup-4.1/core/Solver.h b/libs/mugen/glucose-syrup-4.1/core/Solver.h deleted file mode 100644 index 54335ffd22..0000000000 --- a/libs/mugen/glucose-syrup-4.1/core/Solver.h +++ /dev/null @@ -1,650 +0,0 @@ -/***************************************************************************************[Solver.h] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - -#ifndef Glucose_Solver_h -#define Glucose_Solver_h - -#include "mtl/Heap.h" -#include "mtl/Alg.h" -#include "utils/Options.h" -#include "core/SolverTypes.h" -#include "core/BoundedQueue.h" -#include "core/Constants.h" -#include "mtl/Clone.h" -#include "core/SolverStats.h" - - -namespace Glucose { -// Core stats - -enum CoreStats { - sumResSeen, - sumRes, - sumTrail, - nbPromoted, - originalClausesSeen, - sumDecisionLevels, - nbPermanentLearnts, - nbRemovedClauses, - nbRemovedUnaryWatchedClauses, - nbReducedClauses, - nbDL2, - nbBin, - nbUn, - nbReduceDB, - rnd_decisions, - nbstopsrestarts, - nbstopsrestartssame, - lastblockatrestart, - dec_vars, - clauses_literals, - learnts_literals, - max_literals, - tot_literals, - noDecisionConflict -} ; - -#define coreStatsSize 24 -//================================================================================================= -// Solver -- the main class: - -class Solver : public Clone { - - friend class SolverConfiguration; - -public: - - // Constructor/Destructor: - // - Solver(); - Solver(const Solver &s); - - virtual ~Solver(); - - /** - * Clone function - */ - virtual Clone* clone() const { - return new Solver(*this); - } - - // Problem specification: - // - virtual Var newVar (bool polarity = true, bool dvar = true); // Add a new variable with parameters specifying variable mode. - bool addClause (const vec& ps); // Add a clause to the solver. - bool addEmptyClause(); // Add the empty clause, making the solver contradictory. - bool addClause (Lit p); // Add a unit clause to the solver. - bool addClause (Lit p, Lit q); // Add a binary clause to the solver. - bool addClause (Lit p, Lit q, Lit r); // Add a ternary clause to the solver. - virtual bool addClause_( vec& ps); // Add a clause to the solver without making superflous internal copy. Will - // change the passed vector 'ps'. - // Solving: - // - bool simplify (); // Removes already satisfied clauses. - bool solve (const vec& assumps); // Search for a model that respects a given set of assumptions. - lbool solveLimited (const vec& assumps); // Search for a model that respects a given set of assumptions (With resource constraints). - bool solve (); // Search without assumptions. - bool solve (Lit p); // Search for a model that respects a single assumption. - bool solve (Lit p, Lit q); // Search for a model that respects two assumptions. - bool solve (Lit p, Lit q, Lit r); // Search for a model that respects three assumptions. - bool okay () const; // FALSE means solver is in a conflicting state - - // Convenience versions of 'toDimacs()': - void toDimacs (FILE* f, const vec& assumps); // Write CNF to file in DIMACS-format. - void toDimacs (const char *file, const vec& assumps); - void toDimacs (FILE* f, Clause& c, vec& map, Var& max); - void toDimacs (const char* file); - void toDimacs (const char* file, Lit p); - void toDimacs (const char* file, Lit p, Lit q); - void toDimacs (const char* file, Lit p, Lit q, Lit r); - - // Display clauses and literals - void printLit(Lit l); - void printClause(CRef c); - void printInitialClause(CRef c); - - // Variable mode: - // - void setPolarity (Var v, bool b); // Declare which polarity the decision heuristic should use for a variable. Requires mode 'polarity_user'. - void setDecisionVar (Var v, bool b); // Declare if a variable should be eligible for selection in the decision heuristic. - - // Read state: - // - lbool value (Var x) const; // The current value of a variable. - lbool value (Lit p) const; // The current value of a literal. - lbool modelValue (Var x) const; // The value of a variable in the last model. The last call to solve must have been satisfiable. - lbool modelValue (Lit p) const; // The value of a literal in the last model. The last call to solve must have been satisfiable. - int nAssigns () const; // The current number of assigned literals. - int nClauses () const; // The current number of original clauses. - int nLearnts () const; // The current number of learnt clauses. - int nVars () const; // The current number of variables. - int nFreeVars () ; - - inline char valuePhase(Var v) {return polarity[v];} - - // Incremental mode - void setIncrementalMode(); - void initNbInitialVars(int nb); - void printIncrementalStats(); - bool isIncremental(); - // Resource contraints: - // - void setConfBudget(int64_t x); - void setPropBudget(int64_t x); - void budgetOff(); - void interrupt(); // Trigger a (potentially asynchronous) interruption of the solver. - void clearInterrupt(); // Clear interrupt indicator flag. - - // Memory managment: - // - virtual void garbageCollect(); - void checkGarbage(double gf); - void checkGarbage(); - - // Extra results: (read-only member variable) - // - vec model; // If problem is satisfiable, this vector contains the model (if any). - vec conflict; // If problem is unsatisfiable (possibly under assumptions), - // this vector represent the final conflict clause expressed in the assumptions. - - // Mode of operation: - // - int verbosity; - int verbEveryConflicts; - int showModel; - - // Constants For restarts - double K; - double R; - double sizeLBDQueue; - double sizeTrailQueue; - - // Constants for reduce DB - int firstReduceDB; - int incReduceDB; - int specialIncReduceDB; - unsigned int lbLBDFrozenClause; - bool chanseokStrategy; - int coLBDBound; // Keep all learnts with lbd<=coLBDBound - // Constant for reducing clause - int lbSizeMinimizingClause; - unsigned int lbLBDMinimizingClause; - - // Constant for heuristic - double var_decay; - double max_var_decay; - double clause_decay; - double random_var_freq; - double random_seed; - int ccmin_mode; // Controls conflict clause minimization (0=none, 1=basic, 2=deep). - int phase_saving; // Controls the level of phase saving (0=none, 1=limited, 2=full). - bool rnd_pol; // Use random polarities for branching heuristics. - bool rnd_init_act; // Initialize variable activities with a small random value. - bool randomizeFirstDescent; // the first decisions (until first cnflict) are made randomly - // Useful for syrup! - - // Constant for Memory managment - double garbage_frac; // The fraction of wasted memory allowed before a garbage collection is triggered. - - // Certified UNSAT ( Thanks to Marijn Heule - // New in 2016 : proof in DRAT format, possibility to use binary output - FILE* certifiedOutput; - bool certifiedUNSAT; - bool vbyte; - - void write_char (unsigned char c); - void write_lit (int n); - - - // Panic mode. - // Save memory - uint32_t panicModeLastRemoved, panicModeLastRemovedShared; - - bool useUnaryWatched; // Enable unary watched literals - bool promoteOneWatchedClause; // One watched clauses are promotted to two watched clauses if found empty - - // Functions useful for multithread solving - // Useless in the sequential case - // Overide in ParallelSolver - virtual void parallelImportClauseDuringConflictAnalysis(Clause &c,CRef confl); - virtual bool parallelImportClauses(); // true if the empty clause was received - virtual void parallelImportUnaryClauses(); - virtual void parallelExportUnaryClause(Lit p); - virtual void parallelExportClauseDuringSearch(Clause &c); - virtual bool parallelJobIsFinished(); - virtual bool panicModeIsEnabled(); - - - double luby(double y, int x); - - // Statistics - vec stats; - - // Important stats completely related to search. Keep here - uint64_t solves,starts,decisions,propagations,conflicts,conflictsRestarts; - -protected: - - long curRestart; - - // Alpha variables - bool glureduce; - uint32_t restart_inc; - bool luby_restart; - bool adaptStrategies; - uint32_t luby_restart_factor; - bool randomize_on_restarts, fixed_randomize_on_restarts, newDescent; - uint32_t randomDescentAssignments; - bool forceUnsatOnNewDescent; - // Helper structures: - // - struct VarData { CRef reason; int level; }; - static inline VarData mkVarData(CRef cr, int l){ VarData d = {cr, l}; return d; } - - struct Watcher { - CRef cref; - Lit blocker; - Watcher(CRef cr, Lit p) : cref(cr), blocker(p) {} - bool operator==(const Watcher& w) const { return cref == w.cref; } - bool operator!=(const Watcher& w) const { return cref != w.cref; } -/* Watcher &operator=(Watcher w) { - this->cref = w.cref; - this->blocker = w.blocker; - return *this; - } -*/ - }; - - struct WatcherDeleted - { - const ClauseAllocator& ca; - WatcherDeleted(const ClauseAllocator& _ca) : ca(_ca) {} - bool operator()(const Watcher& w) const { return ca[w.cref].mark() == 1; } - }; - - struct VarOrderLt { - const vec& activity; - bool operator () (Var x, Var y) const { return activity[x] > activity[y]; } - VarOrderLt(const vec& act) : activity(act) { } - }; - - - // Solver state: - // - int lastIndexRed; - bool ok; // If FALSE, the constraints are already unsatisfiable. No part of the solver state may be used! - double cla_inc; // Amount to bump next clause with. - vec activity; // A heuristic measurement of the activity of a variable. - double var_inc; // Amount to bump next variable with. - OccLists, WatcherDeleted> - watches; // 'watches[lit]' is a list of constraints watching 'lit' (will go there if literal becomes true). - OccLists, WatcherDeleted> - watchesBin; // 'watches[lit]' is a list of constraints watching 'lit' (will go there if literal becomes true). - OccLists, WatcherDeleted> - unaryWatches; // Unary watch scheme (clauses are seen when they become empty - vec clauses; // List of problem clauses. - vec learnts; // List of learnt clauses. - vec permanentLearnts; // The list of learnts clauses kept permanently - vec unaryWatchedClauses; // List of imported clauses (after the purgatory) // TODO put inside ParallelSolver - - vec assigns; // The current assignments. - vec polarity; // The preferred polarity of each variable. - vec forceUNSAT; - void bumpForceUNSAT(Lit q); // Handles the forces - - vec decision; // Declares if a variable is eligible for selection in the decision heuristic. - vec trail; // Assignment stack; stores all assigments made in the order they were made. - vec nbpos; - vec trail_lim; // Separator indices for different decision levels in 'trail'. - vec vardata; // Stores reason and level for each variable. - int qhead; // Head of queue (as index into the trail -- no more explicit propagation queue in MiniSat). - int simpDB_assigns; // Number of top-level assignments since last execution of 'simplify()'. - int64_t simpDB_props; // Remaining number of propagations that must be made before next execution of 'simplify()'. - vec assumptions; // Current set of assumptions provided to solve by the user. - Heap order_heap; // A priority queue of variables ordered with respect to the variable activity. - double progress_estimate;// Set by 'search()'. - bool remove_satisfied; // Indicates whether possibly inefficient linear scan for satisfied clauses should be performed in 'simplify'. - vec permDiff; // permDiff[var] contains the current conflict number... Used to count the number of LBD - - - // UPDATEVARACTIVITY trick (see competition'09 companion paper) - vec lastDecisionLevel; - - ClauseAllocator ca; - - int nbclausesbeforereduce; // To know when it is time to reduce clause database - - // Used for restart strategies - bqueue trailQueue,lbdQueue; // Bounded queues for restarts. - float sumLBD; // used to compute the global average of LBD. Restarts... - int sumAssumptions; - CRef lastLearntClause; - - - // Temporaries (to reduce allocation overhead). Each variable is prefixed by the method in which it is - // used, exept 'seen' wich is used in several places. - // - vec seen; - vec analyze_stack; - vec analyze_toclear; - vec add_tmp; - unsigned int MYFLAG; - - // Initial reduceDB strategy - double max_learnts; - double learntsize_adjust_confl; - int learntsize_adjust_cnt; - - // Resource contraints: - // - int64_t conflict_budget; // -1 means no budget. - int64_t propagation_budget; // -1 means no budget. - bool asynch_interrupt; - - // Variables added for incremental mode - int incremental; // Use incremental SAT Solver - int nbVarsInitialFormula; // nb VAR in formula without assumptions (incremental SAT) - double totalTime4Sat,totalTime4Unsat; - int nbSatCalls,nbUnsatCalls; - vec assumptionPositions,initialPositions; - - - // Main internal methods: - // - void insertVarOrder (Var x); // Insert a variable in the decision order priority queue. - Lit pickBranchLit (); // Return the next decision variable. - void newDecisionLevel (); // Begins a new decision level. - void uncheckedEnqueue (Lit p, CRef from = CRef_Undef); // Enqueue a literal. Assumes value of literal is undefined. - bool enqueue (Lit p, CRef from = CRef_Undef); // Test if fact 'p' contradicts current state, enqueue otherwise. - CRef propagate (); // Perform unit propagation. Returns possibly conflicting clause. - CRef propagateUnaryWatches(Lit p); // Perform propagation on unary watches of p, can find only conflicts - void cancelUntil (int level); // Backtrack until a certain level. - void analyze (CRef confl, vec& out_learnt, vec & selectors, int& out_btlevel,unsigned int &nblevels,unsigned int &szWithoutSelectors); // (bt = backtrack) - void analyzeFinal (Lit p, vec& out_conflict); // COULD THIS BE IMPLEMENTED BY THE ORDINARIY "analyze" BY SOME REASONABLE GENERALIZATION? - bool litRedundant (Lit p, uint32_t abstract_levels); // (helper method for 'analyze()') - lbool search (int nof_conflicts); // Search for a given number of conflicts. - virtual lbool solve_ (bool do_simp = true, bool turn_off_simp = false); // Main solve method (assumptions given in 'assumptions'). - virtual void reduceDB (); // Reduce the set of learnt clauses. - void removeSatisfied (vec& cs); // Shrink 'cs' to contain only non-satisfied clauses. - void rebuildOrderHeap (); - - void adaptSolver(); // Adapt solver strategies - - // Maintaining Variable/Clause activity: - // - void varDecayActivity (); // Decay all variables with the specified factor. Implemented by increasing the 'bump' value instead. - void varBumpActivity (Var v, double inc); // Increase a variable with the current 'bump' value. - void varBumpActivity (Var v); // Increase a variable with the current 'bump' value. - void claDecayActivity (); // Decay all clauses with the specified factor. Implemented by increasing the 'bump' value instead. - void claBumpActivity (Clause& c); // Increase a clause with the current 'bump' value. - - // Operations on clauses: - // - void attachClause (CRef cr); // Attach a clause to watcher lists. - void detachClause (CRef cr, bool strict = false); // Detach a clause to watcher lists. - void detachClausePurgatory(CRef cr, bool strict = false); - void attachClausePurgatory(CRef cr); - void removeClause (CRef cr, bool inPurgatory = false); // Detach and free a clause. - bool locked (const Clause& c) const; // Returns TRUE if a clause is a reason for some implication in the current state. - bool satisfied (const Clause& c) const; // Returns TRUE if a clause is satisfied in the current state. - - template unsigned int computeLBD(const T & lits,int end=-1); - void minimisationWithBinaryResolution(vec &out_learnt); - - virtual void relocAll (ClauseAllocator& to); - - // Misc: - // - int decisionLevel () const; // Gives the current decisionlevel. - uint32_t abstractLevel (Var x) const; // Used to represent an abstraction of sets of decision levels. - CRef reason (Var x) const; - int level (Var x) const; - double progressEstimate () const; // DELETE THIS ?? IT'S NOT VERY USEFUL ... - bool withinBudget () const; - inline bool isSelector(Var v) {return (incremental && v>nbVarsInitialFormula);} - - // Static helpers: - // - - // Returns a random float 0 <= x < 1. Seed must never be 0. - static inline double drand(double& seed) { - seed *= 1389796; - int q = (int)(seed / 2147483647); - seed -= (double)q * 2147483647; - return seed / 2147483647; } - - // Returns a random integer 0 <= x < size. Seed must never be 0. - static inline int irand(double& seed, int size) { - return (int)(drand(seed) * size); } -}; - - -//================================================================================================= -// Implementation of inline methods: - -inline CRef Solver::reason(Var x) const { return vardata[x].reason; } -inline int Solver::level (Var x) const { return vardata[x].level; } - -inline void Solver::insertVarOrder(Var x) { - if (!order_heap.inHeap(x) && decision[x]) order_heap.insert(x); } - -inline void Solver::varDecayActivity() { var_inc *= (1 / var_decay); } -inline void Solver::varBumpActivity(Var v) { varBumpActivity(v, var_inc); } -inline void Solver::varBumpActivity(Var v, double inc) { - if ( (activity[v] += inc) > 1e100 ) { - // Rescale: - for (int i = 0; i < nVars(); i++) - activity[i] *= 1e-100; - var_inc *= 1e-100; } - - // Update order_heap with respect to new activity: - if (order_heap.inHeap(v)) - order_heap.decrease(v); } - -inline void Solver::claDecayActivity() { cla_inc *= (1 / clause_decay); } -inline void Solver::claBumpActivity (Clause& c) { - if ( (c.activity() += cla_inc) > 1e20 ) { - // Rescale: - for (int i = 0; i < learnts.size(); i++) - ca[learnts[i]].activity() *= 1e-20; - cla_inc *= 1e-20; } } - -inline void Solver::checkGarbage(void){ return checkGarbage(garbage_frac); } -inline void Solver::checkGarbage(double gf){ - if (ca.wasted() > ca.size() * gf) - garbageCollect(); } - -// NOTE: enqueue does not set the ok flag! (only public methods do) -inline bool Solver::enqueue (Lit p, CRef from) { return value(p) != l_Undef ? value(p) != l_False : (uncheckedEnqueue(p, from), true); } -inline bool Solver::addClause (const vec& ps) { ps.copyTo(add_tmp); return addClause_(add_tmp); } -inline bool Solver::addEmptyClause () { add_tmp.clear(); return addClause_(add_tmp); } -inline bool Solver::addClause (Lit p) { add_tmp.clear(); add_tmp.push(p); return addClause_(add_tmp); } -inline bool Solver::addClause (Lit p, Lit q) { add_tmp.clear(); add_tmp.push(p); add_tmp.push(q); return addClause_(add_tmp); } -inline bool Solver::addClause (Lit p, Lit q, Lit r) { add_tmp.clear(); add_tmp.push(p); add_tmp.push(q); add_tmp.push(r); return addClause_(add_tmp); } - inline bool Solver::locked (const Clause& c) const { - if(c.size()>2) - return value(c[0]) == l_True && reason(var(c[0])) != CRef_Undef && ca.lea(reason(var(c[0]))) == &c; - return - (value(c[0]) == l_True && reason(var(c[0])) != CRef_Undef && ca.lea(reason(var(c[0]))) == &c) - || - (value(c[1]) == l_True && reason(var(c[1])) != CRef_Undef && ca.lea(reason(var(c[1]))) == &c); - } -inline void Solver::newDecisionLevel() { trail_lim.push(trail.size()); } - -inline int Solver::decisionLevel () const { return trail_lim.size(); } -inline uint32_t Solver::abstractLevel (Var x) const { return 1 << (level(x) & 31); } -inline lbool Solver::value (Var x) const { return assigns[x]; } -inline lbool Solver::value (Lit p) const { return assigns[var(p)] ^ sign(p); } -inline lbool Solver::modelValue (Var x) const { return model[x]; } -inline lbool Solver::modelValue (Lit p) const { return model[var(p)] ^ sign(p); } -inline int Solver::nAssigns () const { return trail.size(); } -inline int Solver::nClauses () const { return clauses.size(); } -inline int Solver::nLearnts () const { return learnts.size(); } -inline int Solver::nVars () const { return vardata.size(); } -inline int Solver::nFreeVars () { - int a = stats[dec_vars]; - return (int)(a) - (trail_lim.size() == 0 ? trail.size() : trail_lim[0]); } -inline void Solver::setPolarity (Var v, bool b) { polarity[v] = b; } -inline void Solver::setDecisionVar(Var v, bool b) -{ - if ( b && !decision[v]) stats[dec_vars]++; - else if (!b && decision[v]) stats[dec_vars]--; - - decision[v] = b; - insertVarOrder(v); -} -inline void Solver::setConfBudget(int64_t x){ conflict_budget = conflicts + x; } -inline void Solver::setPropBudget(int64_t x){ propagation_budget = propagations + x; } -inline void Solver::interrupt(){ asynch_interrupt = true; } -inline void Solver::clearInterrupt(){ asynch_interrupt = false; } -inline void Solver::budgetOff(){ conflict_budget = propagation_budget = -1; } -inline bool Solver::withinBudget() const { - return !asynch_interrupt && - (conflict_budget < 0 || conflicts < (uint64_t)conflict_budget) && - (propagation_budget < 0 || propagations < (uint64_t)propagation_budget); } - -// FIXME: after the introduction of asynchronous interrruptions the solve-versions that return a -// pure bool do not give a safe interface. Either interrupts must be possible to turn off here, or -// all calls to solve must return an 'lbool'. I'm not yet sure which I prefer. -inline bool Solver::solve () { budgetOff(); assumptions.clear(); return solve_() == l_True; } -inline bool Solver::solve (Lit p) { budgetOff(); assumptions.clear(); assumptions.push(p); return solve_() == l_True; } -inline bool Solver::solve (Lit p, Lit q) { budgetOff(); assumptions.clear(); assumptions.push(p); assumptions.push(q); return solve_() == l_True; } -inline bool Solver::solve (Lit p, Lit q, Lit r) { budgetOff(); assumptions.clear(); assumptions.push(p); assumptions.push(q); assumptions.push(r); return solve_() == l_True; } -inline bool Solver::solve (const vec& assumps){ budgetOff(); assumps.copyTo(assumptions); return solve_() == l_True; } -inline lbool Solver::solveLimited (const vec& assumps){ assumps.copyTo(assumptions); return solve_(); } -inline bool Solver::okay () const { return ok; } - -inline void Solver::toDimacs (const char* file){ vec as; toDimacs(file, as); } -inline void Solver::toDimacs (const char* file, Lit p){ vec as; as.push(p); toDimacs(file, as); } -inline void Solver::toDimacs (const char* file, Lit p, Lit q){ vec as; as.push(p); as.push(q); toDimacs(file, as); } -inline void Solver::toDimacs (const char* file, Lit p, Lit q, Lit r){ vec as; as.push(p); as.push(q); as.push(r); toDimacs(file, as); } - - - -//================================================================================================= -// Debug etc: - - -inline void Solver::printLit(Lit l) -{ - printf("%s%d:%c", sign(l) ? "-" : "", var(l)+1, value(l) == l_True ? '1' : (value(l) == l_False ? '0' : 'X')); -} - - -inline void Solver::printClause(CRef cr) -{ - Clause &c = ca[cr]; - for (int i = 0; i < c.size(); i++){ - printLit(c[i]); - printf(" "); - } -} - -inline void Solver::printInitialClause(CRef cr) -{ - Clause &c = ca[cr]; - for (int i = 0; i < c.size(); i++){ - if(!isSelector(var(c[i]))) { - printLit(c[i]); - printf(" "); - } - } -} - -//================================================================================================= -struct reduceDBAct_lt { - ClauseAllocator& ca; - - reduceDBAct_lt(ClauseAllocator& ca_) : ca(ca_) { - } - - bool operator()(CRef x, CRef y) { - - // Main criteria... Like in MiniSat we keep all binary clauses - if (ca[x].size() > 2 && ca[y].size() == 2) return 1; - - if (ca[y].size() > 2 && ca[x].size() == 2) return 0; - if (ca[x].size() == 2 && ca[y].size() == 2) return 0; - - return ca[x].activity() < ca[y].activity(); - } -}; - -struct reduceDB_lt { - ClauseAllocator& ca; - - reduceDB_lt(ClauseAllocator& ca_) : ca(ca_) { - } - - bool operator()(CRef x, CRef y) { - - // Main criteria... Like in MiniSat we keep all binary clauses - if (ca[x].size() > 2 && ca[y].size() == 2) return 1; - - if (ca[y].size() > 2 && ca[x].size() == 2) return 0; - if (ca[x].size() == 2 && ca[y].size() == 2) return 0; - - // Second one based on literal block distance - if (ca[x].lbd() > ca[y].lbd()) return 1; - if (ca[x].lbd() < ca[y].lbd()) return 0; - - - // Finally we can use old activity or size, we choose the last one - return ca[x].activity() < ca[y].activity(); - //return x->size() < y->size(); - - //return ca[x].size() > 2 && (ca[y].size() == 2 || ca[x].activity() < ca[y].activity()); } - } -}; - - -} - - -#endif diff --git a/libs/mugen/glucose-syrup-4.1/core/SolverTypes.h b/libs/mugen/glucose-syrup-4.1/core/SolverTypes.h deleted file mode 100644 index 422ec78bec..0000000000 --- a/libs/mugen/glucose-syrup-4.1/core/SolverTypes.h +++ /dev/null @@ -1,519 +0,0 @@ -/***************************************************************************************[SolverTypes.h] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - - -#ifndef Glucose_SolverTypes_h -#define Glucose_SolverTypes_h - -#include -#include -#include - -#include "mtl/IntTypes.h" -#include "mtl/Alg.h" -#include "mtl/Vec.h" -#include "mtl/Map.h" -#include "mtl/Alloc.h" - - -namespace Glucose { - -//================================================================================================= -// Variables, literals, lifted booleans, clauses: - - -// NOTE! Variables are just integers. No abstraction here. They should be chosen from 0..N, -// so that they can be used as array indices. - -typedef int Var; -#define var_Undef (-1) - - -struct Lit { - int x; - - // Use this as a constructor: - friend Lit mkLit(Var var, bool sign); - - bool operator == (Lit p) const { return x == p.x; } - bool operator != (Lit p) const { return x != p.x; } - bool operator < (Lit p) const { return x < p.x; } // '<' makes p, ~p adjacent in the ordering. -}; - - -inline Lit mkLit (Var var, bool sign = false) { Lit p; p.x = var + var + (int)sign; return p; } -inline Lit operator ~(Lit p) { Lit q; q.x = p.x ^ 1; return q; } -inline Lit operator ^(Lit p, bool b) { Lit q; q.x = p.x ^ (unsigned int)b; return q; } -inline bool sign (Lit p) { return p.x & 1; } -inline int var (Lit p) { return p.x >> 1; } - -// Mapping Literals to and from compact integers suitable for array indexing: -inline int toInt (Var v) { return v; } -inline int toInt (Lit p) { return p.x; } -inline Lit toLit (int i) { Lit p; p.x = i; return p; } - -//const Lit lit_Undef = mkLit(var_Undef, false); // }- Useful special constants. -//const Lit lit_Error = mkLit(var_Undef, true ); // } - -const Lit lit_Undef = { -2 }; // }- Useful special constants. -const Lit lit_Error = { -1 }; // } - - -//================================================================================================= -// Lifted booleans: -// -// NOTE: this implementation is optimized for the case when comparisons between values are mostly -// between one variable and one constant. Some care had to be taken to make sure that gcc -// does enough constant propagation to produce sensible code, and this appears to be somewhat -// fragile unfortunately. - -#define l_True (Glucose::lbool((uint8_t)0)) // gcc does not do constant propagation if these are real constants. -#define l_False (Glucose::lbool((uint8_t)1)) -#define l_Undef (Glucose::lbool((uint8_t)2)) - -class lbool { - uint8_t value; - -public: - explicit lbool(uint8_t v) : value(v) { } - - lbool() : value(0) { } - explicit lbool(bool x) : value(!x) { } - - bool operator == (lbool b) const { return ((b.value&2) & (value&2)) | (!(b.value&2)&(value == b.value)); } - bool operator != (lbool b) const { return !(*this == b); } - lbool operator ^ (bool b) const { return lbool((uint8_t)(value^(uint8_t)b)); } - - lbool operator && (lbool b) const { - uint8_t sel = (this->value << 1) | (b.value << 3); - uint8_t v = (0xF7F755F4 >> sel) & 3; - return lbool(v); } - - lbool operator || (lbool b) const { - uint8_t sel = (this->value << 1) | (b.value << 3); - uint8_t v = (0xFCFCF400 >> sel) & 3; - return lbool(v); } - - friend int toInt (lbool l); - friend lbool toLbool(int v); -}; -inline int toInt (lbool l) { return l.value; } -inline lbool toLbool(int v) { return lbool((uint8_t)v); } - -//================================================================================================= -// Clause -- a simple class for representing a clause: - -class Clause; -typedef RegionAllocator::Ref CRef; - -#define BITS_LBD 20 -#ifdef INCREMENTAL - #define BITS_SIZEWITHOUTSEL 19 -#endif -#define BITS_REALSIZE 32 -class Clause { - struct { - unsigned mark : 2; - unsigned learnt : 1; - unsigned canbedel : 1; - unsigned extra_size : 2; // extra size (end of 32bits) 0..3 - unsigned seen : 1; - unsigned reloced : 1; - unsigned exported : 2; // Values to keep track of the clause status for exportations - unsigned oneWatched : 1; - unsigned lbd : BITS_LBD; - - unsigned size : BITS_REALSIZE; - -#ifdef INCREMENTAL - unsigned szWithoutSelectors : BITS_SIZEWITHOUTSEL; -#endif - } header; - - union { Lit lit; float act; uint32_t abs; CRef rel; } data[0]; - - friend class ClauseAllocator; - - // NOTE: This constructor cannot be used directly (doesn't allocate enough memory). - template - Clause(const V& ps, int _extra_size, bool learnt) { - assert(_extra_size < (1<<2)); - header.mark = 0; - header.learnt = learnt; - header.extra_size = _extra_size; - header.reloced = 0; - header.size = ps.size(); - header.lbd = 0; - header.canbedel = 1; - header.exported = 0; - header.oneWatched = 0; - header.seen = 0; - for (int i = 0; i < ps.size(); i++) - data[i].lit = ps[i]; - - if (header.extra_size > 0){ - if (header.learnt) - data[header.size].act = 0; - else - calcAbstraction(); - if (header.extra_size > 1) { - data[header.size+1].abs = 0; // learntFrom - } - } - } - -public: - void calcAbstraction() { - assert(header.extra_size > 0); - uint32_t abstraction = 0; - for (int i = 0; i < size(); i++) - abstraction |= 1 << (var(data[i].lit) & 31); - data[header.size].abs = abstraction; } - - int size () const { return header.size; } - void shrink (int i) { assert(i <= size()); - if (header.extra_size > 0) { - data[header.size-i] = data[header.size]; - if (header.extra_size > 1) { // Special case for imported clauses - data[header.size-i-1] = data[header.size-1]; - } - } - header.size -= i; } - void pop () { shrink(1); } - bool learnt () const { return header.learnt; } - void nolearnt () { header.learnt = false;} - bool has_extra () const { return header.extra_size > 0; } - uint32_t mark () const { return header.mark; } - void mark (uint32_t m) { header.mark = m; } - const Lit& last () const { return data[header.size-1].lit; } - - bool reloced () const { return header.reloced; } - CRef relocation () const { return data[0].rel; } - void relocate (CRef c) { header.reloced = 1; data[0].rel = c; } - - // NOTE: somewhat unsafe to change the clause in-place! Must manually call 'calcAbstraction' afterwards for - // subsumption operations to behave correctly. - Lit& operator [] (int i) { return data[i].lit; } - Lit operator [] (int i) const { return data[i].lit; } - operator const Lit* (void) const { return (Lit*)data; } - - float& activity () { assert(header.extra_size > 0); return data[header.size].act; } - uint32_t abstraction () const { assert(header.extra_size > 0); return data[header.size].abs; } - - // Handle imported clauses lazy sharing - bool wasImported() const {return header.extra_size > 1;} - uint32_t importedFrom () const { assert(header.extra_size > 1); return data[header.size + 1].abs;} - void setImportedFrom(uint32_t ifrom) {assert(header.extra_size > 1); data[header.size+1].abs = ifrom;} - - Lit subsumes (const Clause& other) const; - void strengthen (Lit p); - void setLBD(int i) {header.lbd=i; /*if (i < (1<<(BITS_LBD-1))) header.lbd = i; else header.lbd = (1<<(BITS_LBD-1));*/} - // unsigned int& lbd () { return header.lbd; } - unsigned int lbd () const { return header.lbd; } - void setCanBeDel(bool b) {header.canbedel = b;} - bool canBeDel() {return header.canbedel;} - void setSeen(bool b) {header.seen = b;} - bool getSeen() {return header.seen;} - void setExported(unsigned int b) {header.exported = b;} - unsigned int getExported() {return header.exported;} - void setOneWatched(bool b) {header.oneWatched = b;} - bool getOneWatched() {return header.oneWatched;} -#ifdef INCREMNENTAL - void setSizeWithoutSelectors (unsigned int n) {header.szWithoutSelectors = n; } - unsigned int sizeWithoutSelectors () const { return header.szWithoutSelectors; } -#endif - -}; - - -//================================================================================================= -// ClauseAllocator -- a simple class for allocating memory for clauses: - - - const CRef CRef_Undef = RegionAllocator::Ref_Undef; - class ClauseAllocator : public RegionAllocator - { - static int clauseWord32Size(int size, int extra_size){ - return (sizeof(Clause) + (sizeof(Lit) * (size + extra_size))) / sizeof(uint32_t); } - public: - bool extra_clause_field; - - ClauseAllocator(uint32_t start_cap) : RegionAllocator(start_cap), extra_clause_field(false){} - ClauseAllocator() : extra_clause_field(false){} - - void moveTo(ClauseAllocator& to){ - to.extra_clause_field = extra_clause_field; - RegionAllocator::moveTo(to); } - - template - CRef alloc(const Lits& ps, bool learnt = false, bool imported = false) - { - assert(sizeof(Lit) == sizeof(uint32_t)); - assert(sizeof(float) == sizeof(uint32_t)); - - bool use_extra = learnt | extra_clause_field; - int extra_size = imported?3:(use_extra?1:0); - CRef cid = RegionAllocator::alloc(clauseWord32Size(ps.size(), extra_size)); - new (lea(cid)) Clause(ps, extra_size, learnt); - - return cid; - } - - // Deref, Load Effective Address (LEA), Inverse of LEA (AEL): - Clause& operator[](Ref r) { return (Clause&)RegionAllocator::operator[](r); } - const Clause& operator[](Ref r) const { return (Clause&)RegionAllocator::operator[](r); } - Clause* lea (Ref r) { return (Clause*)RegionAllocator::lea(r); } - const Clause* lea (Ref r) const { return (Clause*)RegionAllocator::lea(r); } - Ref ael (const Clause* t){ return RegionAllocator::ael((uint32_t*)t); } - - void free(CRef cid) - { - Clause& c = operator[](cid); - RegionAllocator::free(clauseWord32Size(c.size(), c.has_extra())); - } - - void reloc(CRef& cr, ClauseAllocator& to) - { - Clause& c = operator[](cr); - - if (c.reloced()) { cr = c.relocation(); return; } - - cr = to.alloc(c, c.learnt(), c.wasImported()); - c.relocate(cr); - - // Copy extra data-fields: - // (This could be cleaned-up. Generalize Clause-constructor to be applicable here instead?) - to[cr].mark(c.mark()); - if (to[cr].learnt()) { - to[cr].activity() = c.activity(); - to[cr].setLBD(c.lbd()); - to[cr].setExported(c.getExported()); - to[cr].setOneWatched(c.getOneWatched()); -#ifdef INCREMENTAL - to[cr].setSizeWithoutSelectors(c.sizeWithoutSelectors()); -#endif - to[cr].setCanBeDel(c.canBeDel()); - if (c.wasImported()) { - to[cr].setImportedFrom(c.importedFrom()); - } - } - else { - to[cr].setSeen(c.getSeen()); - if (to[cr].has_extra()) to[cr].calcAbstraction(); - } - } - }; - - -//================================================================================================= -// OccLists -- a class for maintaining occurence lists with lazy deletion: - -template -class OccLists -{ - vec occs; - vec dirty; - vec dirties; - Deleted deleted; - - public: - OccLists(const Deleted& d) : deleted(d) {} - - void init (const Idx& idx){ occs.growTo(toInt(idx)+1); dirty.growTo(toInt(idx)+1, 0); } - // Vec& operator[](const Idx& idx){ return occs[toInt(idx)]; } - Vec& operator[](const Idx& idx){ return occs[toInt(idx)]; } - Vec& lookup (const Idx& idx){ if (dirty[toInt(idx)]) clean(idx); return occs[toInt(idx)]; } - - void cleanAll (); - void copyTo(OccLists ©) const { - - copy.occs.growTo(occs.size()); - for(int i = 0;i -void OccLists::cleanAll() -{ - for (int i = 0; i < dirties.size(); i++) - // Dirties may contain duplicates so check here if a variable is already cleaned: - if (dirty[toInt(dirties[i])]) - clean(dirties[i]); - dirties.clear(); -} - - -template -void OccLists::clean(const Idx& idx) -{ - Vec& vec = occs[toInt(idx)]; - int i, j; - for (i = j = 0; i < vec.size(); i++) - if (!deleted(vec[i])) - vec[j++] = vec[i]; - vec.shrink(i - j); - dirty[toInt(idx)] = 0; -} - - -//================================================================================================= -// CMap -- a class for mapping clauses to values: - - -template -class CMap -{ - struct CRefHash { - uint32_t operator()(CRef cr) const { return (uint32_t)cr; } }; - - typedef Map HashTable; - HashTable map; - - public: - // Size-operations: - void clear () { map.clear(); } - int size () const { return map.elems(); } - - - // Insert/Remove/Test mapping: - void insert (CRef cr, const T& t){ map.insert(cr, t); } - void growTo (CRef cr, const T& t){ map.insert(cr, t); } // NOTE: for compatibility - void remove (CRef cr) { map.remove(cr); } - bool has (CRef cr, T& t) { return map.peek(cr, t); } - - // Vector interface (the clause 'c' must already exist): - const T& operator [] (CRef cr) const { return map[cr]; } - T& operator [] (CRef cr) { return map[cr]; } - - // Iteration (not transparent at all at the moment): - int bucket_count() const { return map.bucket_count(); } - const vec& bucket(int i) const { return map.bucket(i); } - - // Move contents to other map: - void moveTo(CMap& other){ map.moveTo(other.map); } - - // TMP debug: - void debug(){ - printf(" --- size = %d, bucket_count = %d\n", size(), map.bucket_count()); } -}; - - -/*_________________________________________________________________________________________________ -| -| subsumes : (other : const Clause&) -> Lit -| -| Description: -| Checks if clause subsumes 'other', and at the same time, if it can be used to simplify 'other' -| by subsumption resolution. -| -| Result: -| lit_Error - No subsumption or simplification -| lit_Undef - Clause subsumes 'other' -| p - The literal p can be deleted from 'other' -|________________________________________________________________________________________________@*/ -inline Lit Clause::subsumes(const Clause& other) const -{ - //if (other.size() < size() || (extra.abst & ~other.extra.abst) != 0) - //if (other.size() < size() || (!learnt() && !other.learnt() && (extra.abst & ~other.extra.abst) != 0)) - assert(!header.learnt); assert(!other.header.learnt); - assert(header.extra_size > 0); assert(other.header.extra_size > 0); - if (other.header.size < header.size || (data[header.size].abs & ~other.data[other.header.size].abs) != 0) - return lit_Error; - - Lit ret = lit_Undef; - const Lit* c = (const Lit*)(*this); - const Lit* d = (const Lit*)other; - - for (unsigned i = 0; i < header.size; i++) { - // search for c[i] or ~c[i] - for (unsigned j = 0; j < other.header.size; j++) - if (c[i] == d[j]) - goto ok; - else if (ret == lit_Undef && c[i] == ~d[j]){ - ret = c[i]; - goto ok; - } - - // did not find it - return lit_Error; - ok:; - } - - return ret; -} - -inline void Clause::strengthen(Lit p) -{ - remove(*this, p); - calcAbstraction(); -} - -//================================================================================================= -} - - -#endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Clone.h b/libs/mugen/glucose-syrup-4.1/mtl/Clone.h deleted file mode 100644 index c0ec225ced..0000000000 --- a/libs/mugen/glucose-syrup-4.1/mtl/Clone.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef Glucose_Clone_h -#define Glucose_Clone_h - - -namespace Glucose { - - class Clone { - public: - virtual Clone* clone() const = 0; - }; -}; - -#endif \ No newline at end of file diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Vec.h b/libs/mugen/glucose-syrup-4.1/mtl/Vec.h deleted file mode 100644 index 77841c7ea7..0000000000 --- a/libs/mugen/glucose-syrup-4.1/mtl/Vec.h +++ /dev/null @@ -1,152 +0,0 @@ -/*******************************************************************************************[Vec.h] -Copyright (c) 2003-2007, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -**************************************************************************************************/ - -#ifndef Glucose_Vec_h -#define Glucose_Vec_h - -#include -#include - -#include "mtl/IntTypes.h" -#include "mtl/XAlloc.h" -#include - -namespace Glucose { - -//================================================================================================= -// Automatically resizable arrays -// -// NOTE! Don't use this vector on datatypes that cannot be re-located in memory (with realloc) - -template -class vec { - T* data; - int sz; - int cap; - - // Don't allow copying (error prone): - vec& operator = (vec& other) { assert(0); return *this; } - vec (vec& other) { assert(0); } - - // Helpers for calculating next capacity: - static inline int imax (int x, int y) { int mask = (y-x) >> (sizeof(int)*8-1); return (x&mask) + (y&(~mask)); } - //static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } - static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } - -public: - // Constructors: - vec() : data(NULL) , sz(0) , cap(0) { } - explicit vec(int size) : data(NULL) , sz(0) , cap(0) { growTo(size); } - vec(int size, const T& pad) : data(NULL) , sz(0) , cap(0) { growTo(size, pad); } - ~vec() { clear(true); } - - // Pointer to first element: - operator T* (void) { return data; } - - // Size operations: - int size (void) const { return sz; } - void shrink (int nelems) { assert(nelems <= sz); for (int i = 0; i < nelems; i++) sz--, data[sz].~T(); } - void shrink_ (int nelems) { assert(nelems <= sz); sz -= nelems; } - int capacity (void) const { return cap; } - void capacity (int min_cap); - void growTo (int size); - void growTo (int size, const T& pad); - void clear (bool dealloc = false); - - // Stack interface: - void push (void) { if (sz == cap) capacity(sz+1); new (&data[sz]) T(); sz++; } - void push (const T& elem) { if (sz == cap) capacity(sz+1); data[sz++] = elem; } - void push_ (const T& elem) { assert(sz < cap); data[sz++] = elem; } - void pop (void) { assert(sz > 0); sz--, data[sz].~T(); } - - void remove(const T &elem) { - int tmp; - for(tmp = 0;tmp& copy) const { copy.clear(); copy.growTo(sz); for (int i = 0; i < sz; i++) copy[i] = data[i]; } - void moveTo(vec& dest) { dest.clear(true); dest.data = data; dest.sz = sz; dest.cap = cap; data = NULL; sz = 0; cap = 0; } - void memCopyTo(vec& copy) const{ - copy.capacity(cap); - copy.sz = sz; - memcpy(copy.data,data,sizeof(T)*cap); - } - -}; - - -template -void vec::capacity(int min_cap) { - if (cap >= min_cap) return; - int add = imax((min_cap - cap + 1) & ~1, ((cap >> 1) + 2) & ~1); // NOTE: grow by approximately 3/2 - if (add > INT_MAX - cap || ((data = (T*)::realloc(data, (cap += add) * sizeof(T))) == NULL) && errno == ENOMEM) - throw OutOfMemoryException(); - } - - -template -void vec::growTo(int size, const T& pad) { - if (sz >= size) return; - capacity(size); - for (int i = sz; i < size; i++) data[i] = pad; - sz = size; } - - -template -void vec::growTo(int size) { - if (sz >= size) return; - capacity(size); - for (int i = sz; i < size; i++) new (&data[i]) T(); - sz = size; } - - -template -void vec::clear(bool dealloc) { - if (data != NULL){ - for (int i = 0; i < sz; i++) data[i].~T(); - sz = 0; - if (dealloc) free(data), data = NULL, cap = 0; } } - -//================================================================================================= -} - -#endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/VecThreads.h b/libs/mugen/glucose-syrup-4.1/mtl/VecThreads.h deleted file mode 100644 index e3951958ee..0000000000 --- a/libs/mugen/glucose-syrup-4.1/mtl/VecThreads.h +++ /dev/null @@ -1,216 +0,0 @@ -/*******************************************************************************************[VecThreads.h] - * Threads safe version used in Glucose-Syrup, 2015, Gilles Audemard, Laurent Simon -Copyright (c) 2003-2007, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -**************************************************************************************************/ - -#ifndef Glucose_VecThreads_h -#define Glucose_VecThreads_h - -#include -#include - -#include "mtl/IntTypes.h" -#include "mtl/XAlloc.h" -#include - -namespace Glucose { - -//================================================================================================= -// Automatically resizable arrays -// -// NOTE! Don't use this vector on datatypes that cannot be re-located in memory (with realloc) - -template -class vecThreads { - T* data; - int sz; - int cap; - bool lock; - int nbusers; - - // Don't allow copying (error prone): - vecThreads& operator = (vecThreads& other) { assert(0); return *this; } - vecThreads (vecThreads& other) { assert(0); } - - // Helpers for calculating next capacity: - static inline int imax (int x, int y) { int mask = (y-x) >> (sizeof(int)*8-1); return (x&mask) + (y&(~mask)); } - //static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } - static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } - -public: - // Constructors: - vecThreads() : data(NULL) , sz(0) , cap(0), lock(false), nbusers(0) { } - explicit vecThreads(int size) : data(NULL) , sz(0) , cap(0), lock(false), nbusers(0) { growTo(size); } - vecThreads(int size, const T& pad) : data(NULL) , sz(0) , cap(0), lock(false), nbusers(0) { growTo(size, pad); } - ~vecThreads() { clear(true); } - - // Pointer to first element: - operator T* (void) { return data; } - - // Size operations: - int size (void) const { return sz; } - void shrink (int nelems) { assert(nelems <= sz); for (int i = 0; i < nelems; i++) sz--, data[sz].~T(); } - void shrink_ (int nelems) { assert(nelems <= sz); sz -= nelems; } - int capacity (void) const { return cap; } - void capacity (int min_cap); - void capacityProtected (int min_cap); - void growTo (int size); - void growTo (int size, const T& pad); - void clear (bool dealloc = false); - - // Stack interface: - void push (void) { if (sz == cap) capacity(sz+1); new (&data[sz]) T(); sz++; } - void push (const T& elem) { if (sz == cap) capacity(sz+1); data[sz++] = elem; } - void push_ (const T& elem) { assert(sz < cap); data[sz++] = elem; } - void pop (void) { assert(sz > 0); sz--, data[sz].~T(); } - - void startMaintenance(); - void endMaintenance(); - void startLoop(); - void endLoop(); - - void remove(const T &elem) { - int tmp; - for(tmp = 0;tmp& copy) const { copy.clear(); copy.growTo(sz); - startLoop();for (int i = 0; i < sz; i++) copy[i] = data[i]; endLoop();} - void moveTo(vecThreads& dest) { - assert(false); // This cannot be made thread safe from here. - dest.clear(true); - startMaintenance(); - dest.data = data; dest.sz = sz; dest.cap = cap; data = NULL; sz = 0; cap = 0; - endMaintenance(); } - void memCopyTo(vecThreads& copy) const{ - copy.capacity(cap); - copy.sz = sz; - memcpy(copy.data,data,sizeof(T)*cap); - } - -}; - -template -void vecThreads::startLoop() { - bool retry = true; - while (retry) { - while(!__sync_bool_compare_and_swap(&lock,false, true)); - if (nbusers >= 0) {nbusers++; retry=false;} - lock = false; - } -} - -template -void vecThreads::endLoop() { - while(!__sync_bool_compare_and_swap(&lock,false, true)); - nbusers--; - lock = false; -} - -template -inline void vecThreads::startMaintenance() { - bool retry = true; - while (retry) { - while(!__sync_bool_compare_and_swap(&lock,false, true)); - if (nbusers == 0) {nbusers--; retry=false;} - lock = false; - } -} - -template -inline void vecThreads::endMaintenance() { - while(!__sync_bool_compare_and_swap(&lock,false, true)); - nbusers++; - lock = false; -} -template -inline void vecThreads::capacityProtected(int min_cap) { - startMaintenance(); - capacity(min_cap); - endMaintenance(); -} - -template -void vecThreads::capacity(int min_cap) { - if (cap >= min_cap) return; - - int add = imax((min_cap - cap + 1) & ~1, ((cap >> 1) + 2) & ~1); // NOTE: grow by approximately 3/2 - if (add > INT_MAX - cap || ((data = (T*)::realloc(data, (cap += add) * sizeof(T))) == NULL) && errno == ENOMEM) - throw OutOfMemoryException(); - - } - - -template -void vecThreads::growTo(int size, const T& pad) { - if (sz >= size) return; - startMaintenance(); - capacity(size); - for (int i = sz; i < size; i++) data[i] = pad; - sz = size; - endMaintenance(); -} - - -template -void vecThreads::growTo(int size) { - if (sz >= size) return; - startMaintenance(); - capacity(size); - for (int i = sz; i < size; i++) new (&data[i]) T(); - sz = size; - endMaintenance(); -} - - -template -void vecThreads::clear(bool dealloc) { - if (data != NULL){ - startMaintenance(); - for (int i = 0; i < sz; i++) data[i].~T(); - sz = 0; - if (dealloc) free(data), data = NULL, cap = 0; - endMaintenance();} } - -//================================================================================================= -} - -#endif diff --git a/libs/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc b/libs/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc deleted file mode 100644 index c55a374660..0000000000 --- a/libs/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc +++ /dev/null @@ -1,699 +0,0 @@ -/***************************************************************************************[MultiSolvers.cc] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - -#include -#include "parallel/MultiSolvers.h" -#include "mtl/Sort.h" -#include "utils/System.h" -#include "simp/SimpSolver.h" -#include -#include -#include "parallel/SolverConfiguration.h" - -using namespace Glucose; - -extern const char *_parallel; -extern const char *_cunstable; -// Options at the parallel solver level -static IntOption opt_nbsolversmultithreads(_parallel, "nthreads", "Number of core threads for syrup (0 for automatic)", 0); -static IntOption opt_maxnbsolvers(_parallel, "maxnbthreads", "Maximum number of core threads to ask for (when nbthreads=0)", 4); -static IntOption opt_maxmemory(_parallel, "maxmemory", "Maximum memory to use (in Mb, 0 for no software limit)", 20000); -static IntOption opt_statsInterval(_parallel, "statsinterval", "Seconds (real time) between two stats reports", 5); -// -// Shared with ClausesBuffer.cc -BoolOption opt_whenFullRemoveOlder(_parallel, "removeolder", "When the FIFO for exchanging clauses between threads is full, remove older clauses", false); -IntOption opt_fifoSizeByCore(_parallel, "fifosize", "Size of the FIFO structure for exchanging clauses between threads, by threads", 100000); -// -// Shared options with Solver.cc -BoolOption opt_dontExportDirectReusedClauses(_cunstable, "reusedClauses", "Don't export directly reused clauses", false); -BoolOption opt_plingeling(_cunstable, "plingeling", "plingeling strategy for sharing clauses (exploratory feature)", false); - -#include -#include -#include - - -static inline double cpuTime(void) { - struct rusage ru; - getrusage(RUSAGE_SELF, &ru); - return (double) ru.ru_utime.tv_sec + (double) ru.ru_utime.tv_usec / 1000000; -} - - -void MultiSolvers::informEnd(lbool res) { - result = res; - pthread_cond_broadcast(&cfinished); -} - - -MultiSolvers::MultiSolvers(ParallelSolver *s) : - use_simplification(true), ok(true), maxnbthreads(4), nbthreads(opt_nbsolversmultithreads), nbsolvers(opt_nbsolversmultithreads), nbcompanions(4), nbcompbysolver(2), - allClonesAreBuilt(0), showModel(false), winner(-1), var_decay(1 / 0.95), clause_decay(1 / 0.999), cla_inc(1), var_inc(1), random_var_freq(0.02), restart_first(100), - restart_inc(1.5), learntsize_factor((double) 1 / (double) 3), learntsize_inc(1.1), expensive_ccmin(true), polarity_mode(polarity_false), maxmemory(opt_maxmemory), - maxnbsolvers(opt_maxnbsolvers), verb(0), verbEveryConflicts(10000), numvar(0), numclauses(0) { - result = l_Undef; - SharedCompanion *sc = new SharedCompanion(); - this->sharedcomp = sc; - - // Generate only solver 0. - // It loads the formula - // All others solvers are clone of this one - solvers.push(s); - s->verbosity = 0; // No reportf in solvers... All is done in MultiSolver - s->setThreadNumber(0); - //s->belongsto = this; - s->sharedcomp = sc; - sc->addSolver(s); - assert(solvers[0]->threadNumber() == 0); - - pthread_mutex_init(&m, NULL); //PTHREAD_MUTEX_INITIALIZER; - pthread_mutex_init(&mfinished, NULL); //PTHREAD_MUTEX_INITIALIZER; - pthread_cond_init(&cfinished, NULL); - - if(nbsolvers > 0) - fprintf(stdout, "c %d solvers engines and 1 companion as a blackboard created.\n", nbsolvers); -} - - -MultiSolvers::MultiSolvers() : MultiSolvers(new ParallelSolver(-1)) { - -} - - -MultiSolvers::~MultiSolvers() { } - - -/** - * Generate All solvers - */ - -void MultiSolvers::generateAllSolvers() { - assert(solvers[0] != NULL); - assert(allClonesAreBuilt == 0); - - for(int i = 1; i < nbsolvers; i++) { - ParallelSolver *s = (ParallelSolver *) solvers[0]->clone(); - solvers.push(s); - s->verbosity = 0; // No reportf in solvers... All is done in MultiSolver - s->setThreadNumber(i); - s->sharedcomp = this->sharedcomp; - this->sharedcomp->addSolver(s); - assert(solvers[i]->threadNumber() == i); - } - - adjustParameters(); - - allClonesAreBuilt = 1; -} - - -/** - * Choose solver for threads i (if no given in command line see above) - */ - - -ParallelSolver *MultiSolvers::retrieveSolver(int i) { - return new ParallelSolver(i); -} - - -Var MultiSolvers::newVar(bool sign, bool dvar) { - assert(solvers[0] != NULL); - numvar++; - int v; - sharedcomp->newVar(sign); - if(!allClonesAreBuilt) { // At the beginning we want to generate only solvers 0 - v = solvers[0]->newVar(sign, dvar); - assert(numvar == v + 1); // Just a useless check - } else { - for(int i = 0; i < nbsolvers; i++) { - v = solvers[i]->newVar(sign, dvar); - } - } - return numvar; -} - - -bool MultiSolvers::addClause_(vec &ps) { - assert(solvers[0] != NULL); // There is at least one solver. - // Check if clause is satisfied and remove false/duplicate literals: - if(!okay()) return false; - - sort(ps); - Lit p; - int i, j; - for(i = j = 0, p = lit_Undef; i < ps.size(); i++) - if(solvers[0]->value(ps[i]) == l_True || ps[i] == ~p) - return true; - else if(solvers[0]->value(ps[i]) != l_False && ps[i] != p) - ps[j++] = p = ps[i]; - ps.shrink(i - j); - - - if(ps.size() == 0) { - return ok = false; - } - else if(ps.size() == 1) { - assert(solvers[0]->value(ps[0]) == l_Undef); // TODO : Passes values to all threads - solvers[0]->uncheckedEnqueue(ps[0]); - if(!allClonesAreBuilt) { - return ok = ((solvers[0]->propagate()) == CRef_Undef); // checks only main solver here for propagation constradiction - } - - // Here, all clones are built. - // Gives the unit clause to everybody - for(int i = 0; i < nbsolvers; i++) - solvers[i]->uncheckedEnqueue(ps[0]); - return ok = ((solvers[0]->propagate()) == CRef_Undef); // checks only main solver here for propagation constradiction - } else { - // printf("Adding clause %0xd for solver %d.\n",(void*)c, thn); - // At the beginning only solver 0 load the formula - solvers[0]->addClause(ps); - - if(!allClonesAreBuilt) { - numclauses++; - return true; - } - // Clones are built, need to pass the clause to all the threads - for(int i = 1; i < nbsolvers; i++) { - solvers[i]->addClause(ps); - } - numclauses++; - } - return true; -} - - -bool MultiSolvers::simplify() { - assert(solvers[0] != NULL); // There is at least one solver. - - if(!okay()) return false; - return ok = solvers[0]->simplify(); -} - - -bool MultiSolvers::eliminate() { - - // TODO allow variable elimination when all threads are built! - assert(allClonesAreBuilt == false); - - SimpSolver *s = (SimpSolver *) getPrimarySolver(); - s->use_simplification = use_simplification; - if(!use_simplification) return true; - - return s->eliminate(true); -} - - -// TODO: Use a template here -void *localLaunch(void *arg) { - ParallelSolver *s = (ParallelSolver *) arg; - - (void) s->solve(); - - pthread_exit(NULL); -} - - -#define MAXIMUM_SLEEP_DURATION 5 - - -void MultiSolvers::printStats() { - static int nbprinted = 1; - double cpu_time = cpuTime(); - printf("c\n"); - - printf("c |-------------------------------------------------------------------------------------------------------|\n"); - printf("c | id | starts | decisions | confls | Init T | learnts | exported | imported | promoted | %% | \n"); - printf("c |-------------------------------------------------------------------------------------------------------|\n"); - - //printf("%.0fs | ",cpu_time); - for(int i = 0; i < solvers.size(); i++) { - solvers[i]->reportProgress(); - //printf(" %2d: %12ld confl. |", i, (long int) solvers[i]->conflicts); - } - long long int totalconf = 0; - long long int totalprop = 0; - for(int i = 0; i < solvers.size(); i++) { - totalconf += (long int) solvers[i]->conflicts; - totalprop += solvers[i]->propagations; - } - printf("c \n"); - - printf("c synthesis %11lld conflicts %11lld propagations %8.0f conflicts/sec %8.0f propagations/sec\n", - totalconf, totalprop, (double) totalconf / cpu_time, (double) totalprop / cpu_time); - - - nbprinted++; -} - - -// Still a ugly function... To be rewritten with some statistics class some day -void MultiSolvers::printFinalStats() { - sharedcomp->printStats(); - printf("c\nc\n"); - printf("c\n"); - printf("c |---------------------------------------- FINAL STATS --------------------------------------------------|\n"); - printf("c\n"); - - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - printf("c | Threads | Total "); - for(int i = 0; i < solvers.size(); i++) { - printf("| %10d ", i); - } - printf("|\n"); - - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - -//-- - printf("c | Conflicts "); - long long int totalconf = 0; - for(int i = 0; i < solvers.size(); i++) - totalconf += solvers[i]->conflicts; - printf("| %15lld ", totalconf); - - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->conflicts); - printf("|\n"); - - //-- - printf("c | Decisions "); - long long int totaldecs = 0; - for(int i = 0; i < solvers.size(); i++) - totaldecs += solvers[i]->decisions; - printf("| %15lld ", totaldecs); - - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->decisions); - printf("|\n"); - - //-- - printf("c | Propagations "); - long long int totalprops = 0; - for(int i = 0; i < solvers.size(); i++) - totalprops += solvers[i]->propagations; - printf("| %15lld ", totalprops); - - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->propagations); - printf("|\n"); - - - printf("c | Avg_Trail "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->conflicts==0 ? 0 : solvers[i]->stats[sumTrail] / solvers[i]->conflicts); - printf("|\n"); - - //-- - printf("c | Avg_DL "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->conflicts==0 ? 0 : solvers[i]->stats[sumDecisionLevels] / solvers[i]->conflicts); - printf("|\n"); - - //-- - printf("c | Avg_Res "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->conflicts==0 ? 0 : solvers[i]->stats[sumRes] / solvers[i]->conflicts); - printf("|\n"); - - //-- - printf("c | Avg_Res_Seen "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->conflicts==0 ? 0 : solvers[i]->stats[sumResSeen] / solvers[i]->conflicts); - printf("|\n"); - - //-- - - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - printf("c | Exported "); - uint64_t exported = 0; - for(int i = 0; i < solvers.size(); i++) - exported += solvers[i]->stats[nbexported]; - printf("| %15" PRIu64" ", exported); - - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbexported]); - printf("|\n"); -//-- - printf("c | Imported "); - uint64_t imported = 0; - for(int i = 0; i < solvers.size(); i++) - imported += solvers[i]->stats[nbimported]; - printf("| %15" PRIu64" ", imported); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbimported]); - printf("|\n"); -//-- - - printf("c | Good "); - uint64_t importedGood = 0; - for(int i = 0; i < solvers.size(); i++) - importedGood += solvers[i]->stats[nbImportedGoodClauses]; - printf("| %15" PRIu64" ", importedGood); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbImportedGoodClauses]); - printf("|\n"); -//-- - - printf("c | Purge "); - uint64_t importedPurg = 0; - for(int i = 0; i < solvers.size(); i++) - importedPurg += solvers[i]->stats[nbimportedInPurgatory]; - printf("| %15" PRIu64" ", importedPurg); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbimportedInPurgatory]); - printf("|\n"); -//-- - - printf("c | Promoted "); - uint64_t promoted = 0; - for(int i = 0; i < solvers.size(); i++) - promoted += solvers[i]->stats[nbPromoted]; - printf("| %15" PRIu64" ", promoted); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbPromoted]); - printf("|\n"); -//-- - - printf("c | Remove_Imp "); - uint64_t removedimported = 0; - for(int i = 0; i < solvers.size(); i++) - removedimported += solvers[i]->stats[nbRemovedUnaryWatchedClauses]; - printf("| %15" PRIu64" ", removedimported); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->stats[nbRemovedUnaryWatchedClauses]); - printf("|\n"); -//-- - - printf("c | Blocked_Reuse "); - uint64_t blockedreused = 0; - for(int i = 0; i < solvers.size(); i++) - blockedreused += solvers[i]->nbNotExportedBecauseDirectlyReused; - printf("| %15" PRIu64" ", blockedreused); - for(int i = 0; i < solvers.size(); i++) - printf("| %10" PRIu64" ", solvers[i]->nbNotExportedBecauseDirectlyReused); - printf("|\n"); -//-- - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - printf("c | Unaries "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) { - printf("| %10" PRIu64" ", solvers[i]->stats[nbUn]); - } - printf("|\n"); -//-- - - printf("c | Binaries "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) { - printf("| %10" PRIu64" ", solvers[i]->stats[nbBin]); - } - printf("|\n"); -//-- - - - printf("c | Glues "); - printf("| "); - for(int i = 0; i < solvers.size(); i++) { - printf("| %10" PRIu64" ", solvers[i]->stats[nbDL2]); - } - printf("|\n"); -//-- - - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - printf("c | Orig_Seen "); - uint64_t origseen = 0; - - for(int i = 0; i < solvers.size(); i++) { - origseen += solvers[i]->stats[originalClausesSeen]; - } - printf("| %13" PRIu64" %% ", origseen * 100 / nClauses() / solvers.size()); - - for(int i = 0; i < solvers.size(); i++) { - printf("| %10" PRIu64" ", solvers[i]->stats[originalClausesSeen]); - } - - printf("|\n"); - - - int winner = -1; - for(int i = 0; i < solvers.size(); i++) { - if(sharedcomp->winner() == solvers[i]) - winner = i; - } - -//-- - if(winner != -1) { - printf("c | Diff Orig seen"); - printf("| "); - - for(int i = 0; i < solvers.size(); i++) { - if(i == winner) { - printf("| X "); - continue; - } - if(solvers[i]->stats[originalClausesSeen] > solvers[winner]->stats[originalClausesSeen]) - printf("| %10" PRIu64" ", solvers[i]->stats[originalClausesSeen] - solvers[winner]->stats[originalClausesSeen]); - else - printf("| -%9" PRIu64" ", solvers[winner]->stats[originalClausesSeen] - solvers[i]->stats[originalClausesSeen]); - - } - - printf("|\n"); - } - - -//-- - - if(winner != -1) { - int sum = 0; - printf("c | Hamming "); - for(int i = 0; i < solvers.size(); i++) { - if(i == winner) - continue; - int nb = 0; - for(int j = 0; j < nVars(); j++) { - if(solvers[i]->valuePhase(j) != solvers[winner]->valuePhase(j)) nb++; - } - sum += nb; - - } - sum = sum / (solvers.size() > 1 ? solvers.size() - 1 : 1); - - printf("| %13d %% ", sum * 100 / nVars()); - - for(int i = 0; i < solvers.size(); i++) { - if(i == winner) { - printf("| X "); - continue; - } - int nb = 0; - for(int j = 0; j < nVars(); j++) { - if(solvers[i]->valuePhase(j) != solvers[winner]->valuePhase(j)) nb++; - } - printf("| %10d ", nb); - sum += nb; - - } - printf("|\n"); - } - - printf("c |---------------|-----------------"); - for(int i = 0; i < solvers.size(); i++) - printf("|------------"); - printf("|\n"); - - -} - - -// Well, all those parameteres are just naive guesses... No experimental evidences for this. -void MultiSolvers::adjustParameters() { - SolverConfiguration::configure(this, nbsolvers); -} - - -void MultiSolvers::adjustNumberOfCores() { - float mem = memUsed(); - if(nbthreads == 0) { // Automatic configuration - if(verb >= 1) - printf("c | Automatic Adjustement of the number of solvers. MaxMemory=%5d, MaxCores=%3d. |\n", maxmemory, maxnbsolvers); - unsigned int tmpnbsolvers = maxmemory * 4 / 10 / mem; - if(tmpnbsolvers > maxnbsolvers) tmpnbsolvers = maxnbsolvers; - if(tmpnbsolvers < 1) tmpnbsolvers = 1; - if(verb >= 1) - printf("c | One Solver is taking %.2fMb... Let's take %d solvers for this run (max 40%% of the maxmemory). |\n", mem, tmpnbsolvers); - nbsolvers = tmpnbsolvers; - nbthreads = nbsolvers; - } else { - assert(nbthreads == nbsolvers); - } -} - - -lbool MultiSolvers::solve() { - pthread_attr_t thAttr; - int i; - - adjustNumberOfCores(); - sharedcomp->setNbThreads(nbsolvers); - if(verb >= 1) - printf("c | Generating clones |\n"); - generateAllSolvers(); - if(verb >= 1) { - printf("c |  all clones generated. Memory = %6.2fMb. |\n", memUsed()); - printf("c ========================================================================================================|\n"); - } - - - model.clear(); - - /* Initialize and set thread detached attribute */ - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - - - - // Launching all solvers - for(i = 0; i < nbsolvers; i++) { - pthread_t *pt = (pthread_t *) malloc(sizeof(pthread_t)); - threads.push(pt); - solvers[i]->pmfinished = &mfinished; - solvers[i]->pcfinished = &cfinished; - pthread_create(threads[i], &thAttr, &localLaunch, (void *) solvers[i]); - } - - bool done = false; - bool adjustedlimitonce = false; - - (void) pthread_mutex_lock(&m); - while(!done) { - struct timespec timeout; - time(&timeout.tv_sec); - timeout.tv_sec += MAXIMUM_SLEEP_DURATION; - timeout.tv_nsec = 0; - if(pthread_cond_timedwait(&cfinished, &mfinished, &timeout) != ETIMEDOUT) - done = true; - else - printStats(); - - float mem = memUsed(); - if(verb >= 1) printf("c Total Memory so far : %.2fMb\n", mem); - if((maxmemory > 0) && (mem > maxmemory) && !sharedcomp->panicMode) - printf("c ** reduceDB switching to Panic Mode due to memory limitations !\n"), sharedcomp->panicMode = true; - - if(!done && !adjustedlimitonce) { - uint64_t sumconf = 0; - uint64_t sumimported = 0; - for(int i = 0; i < nbsolvers; i++) { - sumconf += solvers[i]->conflicts; - sumimported += solvers[i]->stats[nbimported]; - } - if(sumconf > 10000000 && sumimported > 4 * sumconf) { // too many many imported clauses (after a while) - for(int i = 0; i < nbsolvers; i++) { // we have like 32 threads, so we need to export just very good clauses - solvers[i]->goodlimitlbd -= 2; - solvers[i]->goodlimitsize -= 4; - } - adjustedlimitonce = true; - printf("c adjusting (once) the limits to send fewer clauses.\n"); - } - } - } - - (void) pthread_mutex_unlock(&m); - - for(i = 0; i < nbsolvers; i++) { // Wait for all threads to finish - pthread_join(*threads[i], NULL); - } - - assert(sharedcomp != NULL); - result = sharedcomp->jobStatus; - if(result == l_True) { - sharedcomp->jobFinishedBy->extendModel(); - int n = sharedcomp->jobFinishedBy->nVars(); - model.growTo(n); - for(int i = 0; i < n; i++) { - model[i] = sharedcomp->jobFinishedBy->model[i]; - assert(model[i] != l_Undef); - } - } - - - return result; - /* - for(int i=0;i& ps); // Add a clause to the solver. NOTE! 'ps' may be shrunk by this method! - bool addClause_( vec& ps); - - bool simplify (); // Removes already satisfied clauses. - - int nVars () const; // The current number of variables. - int nClauses () const; // The current number of variables. - ParallelSolver *getPrimarySolver(); - - void generateAllSolvers(); - - // Solving: - // - lbool solve (); // Search without assumptions. - bool eliminate(); // Perform variable elimination - void adjustParameters(); - void adjustNumberOfCores(); - void interrupt() {} - vec model; // If problem is satisfiable, this vector contains the model (if any). - inline bool okay() { - if(!ok) return ok; - for(int i = 0;iokay()) { - ok = false; - return false; - } - } - return true; - - } - - bool use_simplification; - - - protected: - friend class ParallelSolver; - friend class SolverCompanion; - -struct Stats { - uint64_t min, max, avg, std, med; - Stats(uint64_t _min = 0,uint64_t _max = 0,uint64_t _avg = 0,uint64_t _std = 0,uint64_t _med = 0) : - min(_min), max(_max), avg(_avg), std(_std), med(_med) {} -}; - - void printStats(); - int ok; - lbool result; - int maxnbthreads; // Maximal number of threads - int nbthreads; // Current number of threads - int nbsolvers; // Number of CDCL solvers - int nbcompanions; // Number of companions - int nbcompbysolver; // Number of companions by solvers - bool immediateSharingGlue ; - int allClonesAreBuilt; - bool showModel; // show model on/off - - int winner; - - vec add_tmp; - - double var_decay; // Inverse of the variable activity decay factor. (default 1 / 0.95) - double clause_decay; // Inverse of the clause activity decay factor. (1 / 0.999) - double cla_inc; // Amount to bump next clause with. - double var_inc; // Amount to bump next variable with. - double random_var_freq; // The frequency with which the decision heuristic tries to choose a random variable. (default 0.02) - int restart_first; // The initial restart limit. (default 100) - double restart_inc; // The factor with which the restart limit is multiplied in each restart. (default 1.5) - double learntsize_factor; // The intitial limit for learnt clauses is a factor of the original clauses. (default 1 / 3) - double learntsize_inc; // The limit for learnt clauses is multiplied with this factor each restart. (default 1.1) - bool expensive_ccmin; // Controls conflict clause minimization. (default TRUE) - int polarity_mode; // Controls which polarity the decision heuristic chooses. See enum below for allowed modes. (default polarity_false) - unsigned int maxmemory; - unsigned int maxnbsolvers; - int verb; - int verbEveryConflicts; - int numvar; // Number of variables - int numclauses; // Number of clauses - - enum { polarity_true = 0, polarity_false = 1, polarity_user = 2, polarity_rnd = 3 }; - - //ClauseAllocator ca; - SharedCompanion * sharedcomp; - - void informEnd(lbool res); - ParallelSolver* retrieveSolver(int i); - - pthread_mutex_t m; // mutex for any high level sync between all threads (like reportf) - pthread_mutex_t mfinished; // mutex on which main process may wait for... As soon as one process finishes it release the mutex - pthread_cond_t cfinished; // condition variable that says that a thread has finished - - vec solvers; // set of plain solvers - vec solvercompanions; // set of companion solvers - vec threads; // all threads of this process - vec threadIndexOfSolver; // threadIndexOfSolver[solvers[i]] is the index in threads[] of the solver i - vec threadIndexOfSolverCompanion; // threadIndexOfSolverCompanion[solvercompanions[i]] is the index in threads[] of the solvercompanion i -}; - -inline bool MultiSolvers::addClause (const vec& ps) { ps.copyTo(add_tmp); return addClause_(add_tmp); } - -inline void MultiSolvers::setVerbosity(int i) {verb = i;} -inline void MultiSolvers::setVerbEveryConflicts(int i) {verbEveryConflicts=i;} -inline int MultiSolvers::nVars () const { return numvar; } -inline int MultiSolvers::nClauses () const { return numclauses; } -inline int MultiSolvers::verbosity() {return verb;} -inline ParallelSolver* MultiSolvers::getPrimarySolver() {return solvers[0];} - - -} -#endif - diff --git a/libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc b/libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc deleted file mode 100644 index bca361ca17..0000000000 --- a/libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc +++ /dev/null @@ -1,176 +0,0 @@ -/***************************************************************************************[SolverConfiguration.cc] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - -#include "parallel/MultiSolvers.h" -#include "core/Solver.h" -//#include "parallel/ParallelSolver.h" -#include "parallel/SolverConfiguration.h" - -using namespace Glucose; - - -void SolverConfiguration::configure(MultiSolvers *ms, int nbsolvers) { - for(int i = 1;isolvers[i]->randomizeFirstDescent = true; - ms->solvers[i]->adaptStrategies = (i%2==0); // Just half of the cores are in adaptive mode - ms->solvers[i]->forceUnsatOnNewDescent = (i%4==0); // Just half of adaptive cores have the unsat force - } - if (nbsolvers > 8) { // configuration for the second phase of the sat race 2015 - for(int i=0;isolvers[i]->goodlimitlbd = 5; - ms->solvers[i]->goodlimitsize = 15; - } - } - -} - - -void SolverConfiguration::configureSAT15Adapt(MultiSolvers *ms, int nbsolvers) { - for(int i = 1;isolvers[i]->randomizeFirstDescent = true; - ms->solvers[i]->adaptStrategies = (i%2==0); // Just half of the cores are in adaptive mode - } - if (nbsolvers > 8) { // configuration for the second phase of the sat race 2015 - for(int i=0;isolvers[i]->goodlimitlbd = 5; - ms->solvers[i]->goodlimitsize = 15; - } - } -} - - -void SolverConfiguration::configureSAT15Default(MultiSolvers *ms, int nbsolvers) { - for(int i = 1;isolvers[i]->randomizeFirstDescent = true; - - if (nbsolvers > 8) { // configuration for the second phase of the sat race 2015 - for(int i=0;isolvers[i]->goodlimitlbd = 5; - ms->solvers[i]->goodlimitsize = 15; - - } - } - -} - -void SolverConfiguration::configureSAT14(MultiSolvers *ms, int nbsolvers) { - - if (nbsolvers < 2 ) return; - - ms->solvers[1]->var_decay = 0.94; - ms->solvers[1]->max_var_decay = 0.96; - ms->solvers[1]->firstReduceDB=600; - - if (nbsolvers < 3 ) return; - - ms->solvers[2]->var_decay = 0.90; - ms->solvers[2]->max_var_decay = 0.97; - ms->solvers[2]->firstReduceDB=500; - - if (nbsolvers < 4 ) return; - - ms->solvers[3]->var_decay = 0.85; - ms->solvers[3]->max_var_decay = 0.93; - ms->solvers[3]->firstReduceDB=400; - - if (nbsolvers < 5 ) return; - - // Glucose 2.0 (+ blocked restarts) - ms->solvers[4]->var_decay = 0.95; - ms->solvers[4]->max_var_decay = 0.95; - ms->solvers[4]->firstReduceDB=4000; - ms->solvers[4]->lbdQueue.growTo(100); - ms->solvers[4]->sizeLBDQueue = 100; - ms->solvers[4]->K = 0.7; - ms->solvers[4]->incReduceDB = 500; - - if (nbsolvers < 6 ) return; - - ms->solvers[5]->var_decay = 0.93; - ms->solvers[5]->max_var_decay = 0.96; - ms->solvers[5]->firstReduceDB=100; - ms->solvers[5]->incReduceDB = 500; - - if (nbsolvers < 7 ) return; - - ms->solvers[6]->var_decay = 0.75; - ms->solvers[6]->max_var_decay = 0.94; - ms->solvers[6]->firstReduceDB=2000; - - if (nbsolvers < 8 ) return; - - ms->solvers[7]->var_decay = 0.94; - ms->solvers[7]->max_var_decay = 0.96; - ms->solvers[7]->firstReduceDB=800; - - if (nbsolvers < 9) return; - -// ms->solvers[8]->reduceOnSize = true; // NOT USED ANYMORE - - if (nbsolvers < 10 ) return; - -// ms->solvers[9]->reduceOnSize = true; // NOT USED ANYMORE -// ms->solvers[9]->reduceOnSizeSize = 14; - - if (nbsolvers < 11 ) return; - - double noisevar_decay = 0.005; - int noiseReduceDB = 50; - for (int i=10;isolvers[i]-> var_decay = ms->solvers[i%8]->var_decay; - ms->solvers[i]-> max_var_decay = ms->solvers[i%8]->max_var_decay; - ms->solvers[i]-> firstReduceDB= ms->solvers[i%8]->firstReduceDB; - ms->solvers[i]->var_decay += noisevar_decay; - ms->solvers[i]->firstReduceDB+=noiseReduceDB; - if ((i+1) % 8 == 0) { - noisevar_decay += 0.006; - noiseReduceDB += 25; - } - } - } diff --git a/libs/mugen/glucose-syrup-4.1/simp/Main.cc b/libs/mugen/glucose-syrup-4.1/simp/Main.cc deleted file mode 100644 index 415d21f25f..0000000000 --- a/libs/mugen/glucose-syrup-4.1/simp/Main.cc +++ /dev/null @@ -1,299 +0,0 @@ -/***************************************************************************************[Main.cc] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - -#include - -#include -#include -#include - -#include "utils/System.h" -#include "utils/ParseUtils.h" -#include "utils/Options.h" -#include "core/Dimacs.h" -#include "simp/SimpSolver.h" - -using namespace Glucose; - -//================================================================================================= - -static const char* _certified = "CORE -- CERTIFIED UNSAT"; - -void printStats(Solver& solver) -{ - double cpu_time = cpuTime(); - double mem_used = 0;//memUsedPeak(); - printf("c restarts : %" PRIu64" (%" PRIu64" conflicts in avg)\n", solver.starts,(solver.starts>0 ?solver.conflicts/solver.starts : 0)); - printf("c blocked restarts : %" PRIu64" (multiple: %" PRIu64") \n", solver.stats[nbstopsrestarts],solver.stats[nbstopsrestartssame]); - printf("c last block at restart : %" PRIu64"\n",solver.stats[lastblockatrestart]); - printf("c nb ReduceDB : %" PRIu64"\n", solver.stats[nbReduceDB]); - printf("c nb removed Clauses : %" PRIu64"\n",solver.stats[nbRemovedClauses]); - printf("c nb learnts DL2 : %" PRIu64"\n", solver.stats[nbDL2]); - printf("c nb learnts size 2 : %" PRIu64"\n", solver.stats[nbBin]); - printf("c nb learnts size 1 : %" PRIu64"\n", solver.stats[nbUn]); - if(solver.chanseokStrategy) - printf("c nb permanent learnts : %" PRIu64"\n", solver.stats[nbPermanentLearnts]); - - printf("c conflicts : %-12" PRIu64" (%.0f /sec)\n", solver.conflicts , solver.conflicts /cpu_time); - printf("c decisions : %-12" PRIu64" (%4.2f %% random) (%.0f /sec)\n", solver.decisions, (float)solver.stats[rnd_decisions]*100 / (float)solver.decisions, solver.decisions /cpu_time); - printf("c propagations : %-12" PRIu64" (%.0f /sec)\n", solver.propagations, solver.propagations/cpu_time); - // printf("c conflict literals : %-12" PRIu64" (%4.2f %% deleted)\n", solver.stats[tot_literals], (solver.stats[max_literals] - solver.stats[tot_literals])*100 / (double)solver.stats[max_literals]); - // printf("c Average resolutions : %-12" PRIu64" (%.0f seen ones)\n",solver.stats[sumRes]/solver.conflicts,((double)solver.stats[sumResSeen])/solver.conflicts); - printf("c nb reduced Clauses : %" PRIu64"\n",solver.stats[nbReducedClauses]); - - if (mem_used != 0) printf("Memory used : %.2f MB\n", mem_used); - printf("c CPU time : %g s\n", cpu_time); -} - - - -static Solver* solver; -// Terminate by notifying the solver and back out gracefully. This is mainly to have a test-case -// for this feature of the Solver as it may take longer than an immediate call to '_exit()'. -static void SIGINT_interrupt(int signum) { solver->interrupt(); } - -// Note that '_exit()' rather than 'exit()' has to be used. The reason is that 'exit()' calls -// destructors and may cause deadlocks if a malloc/free function happens to be running (these -// functions are guarded by locks for multithreaded use). -static void SIGINT_exit(int signum) { - printf("\n"); printf("*** INTERRUPTED ***\n"); - if (solver->verbosity > 0){ - printStats(*solver); - printf("\n"); printf("*** INTERRUPTED ***\n"); } - _exit(1); } - - -//================================================================================================= -// Main: - -int main(int argc, char** argv) -{ - try { - printf("c\nc This is glucose 4.0 -- based on MiniSAT (Many thanks to MiniSAT team)\nc\n"); - - - setUsageHelp("c USAGE: %s [options] \n\n where input may be either in plain or gzipped DIMACS.\n"); - - // Extra options: - // - IntOption verb ("MAIN", "verb", "Verbosity level (0=silent, 1=some, 2=more).", 1, IntRange(0, 2)); - BoolOption mod ("MAIN", "model", "show model.", false); - IntOption vv ("MAIN", "vv", "Verbosity every vv conflicts", 10000, IntRange(1,INT32_MAX)); - BoolOption pre ("MAIN", "pre", "Completely turn on/off any preprocessing.", true); - StringOption dimacs ("MAIN", "dimacs", "If given, stop after preprocessing and write the result to this file."); - IntOption cpu_lim("MAIN", "cpu-lim","Limit on CPU time allowed in seconds.\n", INT32_MAX, IntRange(0, INT32_MAX)); - IntOption mem_lim("MAIN", "mem-lim","Limit on memory usage in megabytes.\n", INT32_MAX, IntRange(0, INT32_MAX)); - // BoolOption opt_incremental ("MAIN","incremental", "Use incremental SAT solving",false); - - BoolOption opt_certified (_certified, "certified", "Certified UNSAT using DRUP format", false); - StringOption opt_certified_file (_certified, "certified-output", "Certified UNSAT output file", "NULL"); - BoolOption opt_vbyte (_certified, "vbyte", "Emit proof in variable-byte encoding", false); - - parseOptions(argc, argv, true); - - SimpSolver S; - double initial_time = cpuTime(); - - S.parsing = 1; - S.use_simplification = pre; - - //if (!pre) S.eliminate(true); - - S.verbosity = verb; - S.verbEveryConflicts = vv; - S.showModel = mod; - - S.certifiedUNSAT = opt_certified; - S.vbyte = opt_vbyte; - if(S.certifiedUNSAT) { - if(!strcmp(opt_certified_file,"NULL")) { - S.vbyte = false; // Cannot write binary to stdout - S.certifiedOutput = fopen("/dev/stdout", "wb"); - if(S.verbosity >= 1) - printf("c\nc Write unsat proof on stdout using text format\nc\n"); - } else - S.certifiedOutput = fopen(opt_certified_file, "wb"); - const char *name = opt_certified_file; - if(S.verbosity >= 1) - printf("c\nc Write unsat proof on %s using %s format\nc\n",name,S.vbyte ? "binary" : "text"); - } - - solver = &S; - // Use signal handlers that forcibly quit until the solver will be able to respond to - // interrupts: - signal(SIGINT, SIGINT_exit); - signal(SIGXCPU,SIGINT_exit); - - - // Set limit on CPU-time: - if (cpu_lim != INT32_MAX){ - rlimit rl; - getrlimit(RLIMIT_CPU, &rl); - if (rl.rlim_max == RLIM_INFINITY || (rlim_t)cpu_lim < rl.rlim_max){ - rl.rlim_cur = cpu_lim; - if (setrlimit(RLIMIT_CPU, &rl) == -1) - printf("c WARNING! Could not set resource limit: CPU-time.\n"); - } } - - // Set limit on virtual memory: - if (mem_lim != INT32_MAX){ - rlim_t new_mem_lim = (rlim_t)mem_lim * 1024*1024; - rlimit rl; - getrlimit(RLIMIT_AS, &rl); - if (rl.rlim_max == RLIM_INFINITY || new_mem_lim < rl.rlim_max){ - rl.rlim_cur = new_mem_lim; - if (setrlimit(RLIMIT_AS, &rl) == -1) - printf("c WARNING! Could not set resource limit: Virtual memory.\n"); - } } - - if (argc == 1) - printf("c Reading from standard input... Use '--help' for help.\n"); - - gzFile in = (argc == 1) ? gzdopen(0, "rb") : gzopen(argv[1], "rb"); - if (in == NULL) - printf("ERROR! Could not open file: %s\n", argc == 1 ? "" : argv[1]), exit(1); - - if (S.verbosity > 0){ - printf("c ========================================[ Problem Statistics ]===========================================\n"); - printf("c | |\n"); } - - FILE* res = (argc >= 3) ? fopen(argv[argc-1], "wb") : NULL; - parse_DIMACS(in, S); - gzclose(in); - - if (S.verbosity > 0){ - printf("c | Number of variables: %12d |\n", S.nVars()); - printf("c | Number of clauses: %12d |\n", S.nClauses()); } - - double parsed_time = cpuTime(); - if (S.verbosity > 0){ - printf("c | Parse time: %12.2f s |\n", parsed_time - initial_time); - printf("c | |\n"); } - - // Change to signal-handlers that will only notify the solver and allow it to terminate - // voluntarily: - signal(SIGINT, SIGINT_interrupt); - signal(SIGXCPU,SIGINT_interrupt); - - S.parsing = 0; - if(pre/* && !S.isIncremental()*/) { - printf("c | Preprocesing is fully done\n"); - S.eliminate(true); - double simplified_time = cpuTime(); - if (S.verbosity > 0){ - printf("c | Simplification time: %12.2f s |\n", simplified_time - parsed_time); - } - } - printf("c | |\n"); - if (!S.okay()){ - if (S.certifiedUNSAT) fprintf(S.certifiedOutput, "0\n"), fclose(S.certifiedOutput); - if (res != NULL) fprintf(res, "UNSAT\n"), fclose(res); - if (S.verbosity > 0){ - printf("c =========================================================================================================\n"); - printf("Solved by simplification\n"); - printStats(S); - printf("\n"); } - printf("s UNSATISFIABLE\n"); - exit(20); - } - - if (dimacs){ - if (S.verbosity > 0) - printf("c =======================================[ Writing DIMACS ]===============================================\n"); - S.toDimacs((const char*)dimacs); - if (S.verbosity > 0) - printStats(S); - exit(0); - } - - vec dummy; - lbool ret = S.solveLimited(dummy); - - if (S.verbosity > 0){ - printStats(S); - printf("\n"); } - printf(ret == l_True ? "s SATISFIABLE\n" : ret == l_False ? "s UNSATISFIABLE\n" : "s INDETERMINATE\n"); - - if (res != NULL){ - if (ret == l_True){ - printf("SAT\n"); - for (int i = 0; i < S.nVars(); i++) - if (S.model[i] != l_Undef) - fprintf(res, "%s%s%d", (i==0)?"":" ", (S.model[i]==l_True)?"":"-", i+1); - fprintf(res, " 0\n"); - } else { - if (ret == l_False){ - fprintf(res, "UNSAT\n"); - } - } - fclose(res); - } else { - if(S.showModel && ret==l_True) { - printf("v "); - for (int i = 0; i < S.nVars(); i++) - if (S.model[i] != l_Undef) - printf("%s%s%d", (i==0)?"":" ", (S.model[i]==l_True)?"":"-", i+1); - printf(" 0\n"); - } - - - } - - -#ifdef NDEBUG - exit(ret == l_True ? 10 : ret == l_False ? 20 : 0); // (faster than "return", which will invoke the destructor for 'Solver') -#else - return (ret == l_True ? 10 : ret == l_False ? 20 : 0); -#endif - } catch (OutOfMemoryException&){ - printf("c =========================================================================================================\n"); - printf("INDETERMINATE\n"); - exit(0); - } -} diff --git a/libs/mugen/glucose-syrup-4.1/simp/SimpSolver.h b/libs/mugen/glucose-syrup-4.1/simp/SimpSolver.h deleted file mode 100644 index 42f68e89c8..0000000000 --- a/libs/mugen/glucose-syrup-4.1/simp/SimpSolver.h +++ /dev/null @@ -1,237 +0,0 @@ -/***************************************************************************************[SimpSolver.h] - Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - LRI - Univ. Paris Sud, France (2009-2013) - Labri - Univ. Bordeaux, France - - Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon - CRIL - Univ. Artois, France - Labri - Univ. Bordeaux, France - -Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it -is based on. (see below). - -Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel -version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software -without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -- The above and below copyrights notices and this permission notice shall be included in all -copies or substantial portions of the Software; -- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of -the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event -using Glucose Parallel as an embedded SAT engine (single core or not). - - ---------------- Original Minisat Copyrights - -Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson -Copyright (c) 2007-2010, Niklas Sorensson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and -associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT -NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - **************************************************************************************************/ - -#ifndef Glucose_SimpSolver_h -#define Glucose_SimpSolver_h - -#include "mtl/Queue.h" -#include "core/Solver.h" -#include "mtl/Clone.h" - -namespace Glucose { - -//================================================================================================= - - -class SimpSolver : public Solver { - public: - // Constructor/Destructor: - // - SimpSolver(); - ~SimpSolver(); - - SimpSolver(const SimpSolver &s); - - - /** - * Clone function - */ - virtual Clone* clone() const { - return new SimpSolver(*this); - } - - - // Problem specification: - // - virtual Var newVar (bool polarity = true, bool dvar = true); // Add a new variable with parameters specifying variable mode. - bool addClause (const vec& ps); - bool addEmptyClause(); // Add the empty clause to the solver. - bool addClause (Lit p); // Add a unit clause to the solver. - bool addClause (Lit p, Lit q); // Add a binary clause to the solver. - bool addClause (Lit p, Lit q, Lit r); // Add a ternary clause to the solver. - virtual bool addClause_( vec& ps); - bool substitute(Var v, Lit x); // Replace all occurences of v with x (may cause a contradiction). - - // Variable mode: - // - void setFrozen (Var v, bool b); // If a variable is frozen it will not be eliminated. - bool isEliminated(Var v) const; - - // Solving: - // - bool solve (const vec& assumps, bool do_simp = true, bool turn_off_simp = false); - lbool solveLimited(const vec& assumps, bool do_simp = true, bool turn_off_simp = false); - bool solve ( bool do_simp = true, bool turn_off_simp = false); - bool solve (Lit p , bool do_simp = true, bool turn_off_simp = false); - bool solve (Lit p, Lit q, bool do_simp = true, bool turn_off_simp = false); - bool solve (Lit p, Lit q, Lit r, bool do_simp = true, bool turn_off_simp = false); - bool eliminate (bool turn_off_elim = false); // Perform variable elimination based simplification. - - // Memory managment: - // - virtual void garbageCollect(); - - - // Generate a (possibly simplified) DIMACS file: - // -#if 0 - void toDimacs (const char* file, const vec& assumps); - void toDimacs (const char* file); - void toDimacs (const char* file, Lit p); - void toDimacs (const char* file, Lit p, Lit q); - void toDimacs (const char* file, Lit p, Lit q, Lit r); -#endif - - // Mode of operation: - // - int parsing; - int grow; // Allow a variable elimination step to grow by a number of clauses (default to zero). - int clause_lim; // Variables are not eliminated if it produces a resolvent with a length above this limit. - // -1 means no limit. - int subsumption_lim; // Do not check if subsumption against a clause larger than this. -1 means no limit. - double simp_garbage_frac; // A different limit for when to issue a GC during simplification (Also see 'garbage_frac'). - - bool use_asymm; // Shrink clauses by asymmetric branching. - bool use_rcheck; // Check if a clause is already implied. Prett costly, and subsumes subsumptions :) - bool use_elim; // Perform variable elimination. - // Statistics: - // - int merges; - int asymm_lits; - int eliminated_vars; - bool use_simplification; - - protected: - - // Helper structures: - // - struct ElimLt { - const vec& n_occ; - explicit ElimLt(const vec& no) : n_occ(no) {} - - // TODO: are 64-bit operations here noticably bad on 32-bit platforms? Could use a saturating - // 32-bit implementation instead then, but this will have to do for now. - uint64_t cost (Var x) const { return (uint64_t)n_occ[toInt(mkLit(x))] * (uint64_t)n_occ[toInt(~mkLit(x))]; } - bool operator()(Var x, Var y) const { return cost(x) < cost(y); } - - // TODO: investigate this order alternative more. - // bool operator()(Var x, Var y) const { - // int c_x = cost(x); - // int c_y = cost(y); - // return c_x < c_y || c_x == c_y && x < y; } - }; - - struct ClauseDeleted { - const ClauseAllocator& ca; - explicit ClauseDeleted(const ClauseAllocator& _ca) : ca(_ca) {} - bool operator()(const CRef& cr) const { return ca[cr].mark() == 1; } }; - - // Solver state: - // - int elimorder; - vec elimclauses; - vec touched; - OccLists, ClauseDeleted> - occurs; - vec n_occ; - Heap elim_heap; - Queue subsumption_queue; - vec frozen; - vec eliminated; - int bwdsub_assigns; - int n_touched; - - // Temporaries: - // - CRef bwdsub_tmpunit; - - // Main internal methods: - // - virtual lbool solve_ (bool do_simp = true, bool turn_off_simp = false); - bool asymm (Var v, CRef cr); - bool asymmVar (Var v); - void updateElimHeap (Var v); - void gatherTouchedClauses (); - bool merge (const Clause& _ps, const Clause& _qs, Var v, vec& out_clause); - bool merge (const Clause& _ps, const Clause& _qs, Var v, int& size); - bool backwardSubsumptionCheck (bool verbose = false); - bool eliminateVar (Var v); - void extendModel (); - - void removeClause (CRef cr,bool inPurgatory=false); - bool strengthenClause (CRef cr, Lit l); - void cleanUpClauses (); - bool implied (const vec& c); - virtual void relocAll (ClauseAllocator& to); -}; - - -//================================================================================================= -// Implementation of inline methods: - - -inline bool SimpSolver::isEliminated (Var v) const { return eliminated[v]; } -inline void SimpSolver::updateElimHeap(Var v) { - assert(use_simplification); - // if (!frozen[v] && !isEliminated(v) && value(v) == l_Undef) - if (elim_heap.inHeap(v) || (!frozen[v] && !isEliminated(v) && value(v) == l_Undef)) - elim_heap.update(v); } - - -inline bool SimpSolver::addClause (const vec& ps) { ps.copyTo(add_tmp); return addClause_(add_tmp); } -inline bool SimpSolver::addEmptyClause() { add_tmp.clear(); return addClause_(add_tmp); } -inline bool SimpSolver::addClause (Lit p) { add_tmp.clear(); add_tmp.push(p); return addClause_(add_tmp); } -inline bool SimpSolver::addClause (Lit p, Lit q) { add_tmp.clear(); add_tmp.push(p); add_tmp.push(q); return addClause_(add_tmp); } -inline bool SimpSolver::addClause (Lit p, Lit q, Lit r) { add_tmp.clear(); add_tmp.push(p); add_tmp.push(q); add_tmp.push(r); return addClause_(add_tmp); } -inline void SimpSolver::setFrozen (Var v, bool b) { frozen[v] = (char)b; if (use_simplification && !b) { updateElimHeap(v); } } - -inline bool SimpSolver::solve ( bool do_simp, bool turn_off_simp) { budgetOff(); assumptions.clear(); return solve_(do_simp, turn_off_simp) == l_True; } -inline bool SimpSolver::solve (Lit p , bool do_simp, bool turn_off_simp) { budgetOff(); assumptions.clear(); assumptions.push(p); return solve_(do_simp, turn_off_simp) == l_True; } -inline bool SimpSolver::solve (Lit p, Lit q, bool do_simp, bool turn_off_simp) { budgetOff(); assumptions.clear(); assumptions.push(p); assumptions.push(q); return solve_(do_simp, turn_off_simp) == l_True; } -inline bool SimpSolver::solve (Lit p, Lit q, Lit r, bool do_simp, bool turn_off_simp) { budgetOff(); assumptions.clear(); assumptions.push(p); assumptions.push(q); assumptions.push(r); return solve_(do_simp, turn_off_simp) == l_True; } -inline bool SimpSolver::solve (const vec& assumps, bool do_simp, bool turn_off_simp){ - budgetOff(); assumps.copyTo(assumptions); return solve_(do_simp, turn_off_simp) == l_True; } - -inline lbool SimpSolver::solveLimited (const vec& assumps, bool do_simp, bool turn_off_simp){ - assumps.copyTo(assumptions); return solve_(do_simp, turn_off_simp); } - -//================================================================================================= -} - -#endif diff --git a/libs/parallel-hashmap b/libs/parallel-hashmap deleted file mode 160000 index 8442f1c82c..0000000000 --- a/libs/parallel-hashmap +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8442f1c82cad04c026e3db4959c6b7a5396f982a diff --git a/libs/pybind11 b/libs/pybind11 deleted file mode 160000 index 1dc76208d5..0000000000 --- a/libs/pybind11 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1dc76208d5822e78fc8129552b4d622c78b7ce64 diff --git a/libs/tinyxml2 b/libs/tinyxml2 deleted file mode 160000 index 3324d04d58..0000000000 --- a/libs/tinyxml2 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3324d04d58de9d5db09327db6442f075e519f11b diff --git a/libs/undirected_graph/source/graph_search_iterator.h b/libs/undirected_graph/source/graph_search_iterator.h deleted file mode 100644 index eadaf5d9a6..0000000000 --- a/libs/undirected_graph/source/graph_search_iterator.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - Header file for graph_search_iterator for the undirected_graph container - Copyright (C) 2015 Fabian Löschner - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -*/ - -#ifndef GRAPHSEARCHITERATOR -#define GRAPHSEARCHITERATOR - -#include -#include -#include -#include - -/** - * @brief Base class for search iterators in a graph - * - * This class provides the base for search iterators on graphs with a container of - * currently waiting vertices (/vertex iterators) and a list of already visited - * vertices. Subclasses have to reimplement the next() function to increment the 'iterator'. - * @tparam graph Type of the graph that should be traversed. Required to get the necessary iterator and data types. - * @tparam waiting_container Type of the waiting container that should be used. (stack/queue) - */ -template -class graph_search_iterator -{ -public: - /** - * @brief Constructs a new graph search iterator - * - * Constructs a graph search iterator object, initializing it with the supplied values. - * @param g A reference to the graph that should be traversed. - * @param start Vertex iterator to the node that should be used as a starting point. - */ - graph_search_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) : m_graph(&g) - { - if(!g.empty()) { - m_waiting.push(start); - m_discovered.emplace(start->first); - } - } - - /** - * @brief Check whether search has finished - * - * Returns whether the search alghorithms iterated through all accessible vertices. - * @return true if the search finished. - */ - bool end() const - { - return m_waiting.empty(); - } - - /** - * @brief Returns number of discovered vertices - * - * Returns the number of vertices the search algorithm already traveresed. - * @return Number of visited vertices. - */ - size_t discovered() const - { - return m_discovered.size(); - } - - /** - * @brief Returns iterator to next vertex in the search order - * - * This method should be reimplemented in subclasses to increment the search iterator - * to the next the element depending on the search algorithm - * @return Iterator to the next element - */ - virtual typename graph::graph_vertex_const_iterator next() = 0; - -protected: - //! Queue for waiting vertex iterators - waiting_container m_waiting; - //! The vertices that were already visited - std::unordered_set m_discovered; - //! The graph this iterator traverses - const graph* m_graph; -}; - -/** - * @brief Breadth first iterator - * - * This iterator traverses an undirected_graph using the breadth first algortihm. - * @tparam graph Type of the graph that should be traversed. - */ -template -class breadth_first_iterator : public graph_search_iterator> -{ -public: - /** - * @brief Constructs a new BFS iterator - * - * Constructs a breadth first search iterator object, initializing it with the supplied values. - * @param g A reference to the graph that should be traversed. - * @param start Vertex iterator to the node that should be used as a starting point. - */ - breadth_first_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) - : graph_search_iterator>(g, start) {} - - /** - * @brief Returns iterator to the next vertex - * - * This method increments the iterator using the BFS algorithm and returns - * an iterator to the next element. - * @return Iterator to the next element - */ - typename graph::graph_vertex_const_iterator next() - { - auto next = this->m_waiting.front(); - this->m_waiting.pop(); - - for(auto it = this->m_graph->begin_adjacent(next->first); it != this->m_graph->end_adjacent(next->first); ++it) { - if(this->m_discovered.count(*it) == 0) { - this->m_waiting.push(this->m_graph->find_vertex(*it)); - this->m_discovered.emplace(*it); - } - } - - return next; - } -}; - -/** - * @brief Depth first iterator - * - * This iterator traverses an undirected_graph using the depth first algortihm. - * @tparam graph Type of the graph that should be traversed. - */ -template -class depth_first_iterator : public graph_search_iterator> -{ -public: - /** - * @brief Constructs a new DFS iterator - * - * Constructs a depth first search iterator object, initializing it with the supplied values. - * @param g A reference to the graph that should be traversed. - * @param start Vertex iterator to the node that should be used as a starting point. - */ - depth_first_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) - : graph_search_iterator>(g, start) {} - - /** - * @brief Returns iterator to the next vertex - * - * This method increments the iterator using the DFS algorithm and returns - * an iterator to the next element. - * @return Iterator to the next element - */ - typename graph::graph_vertex_const_iterator next() - { - auto next = this->m_waiting.top(); - this->m_waiting.pop(); - - for(auto it = this->m_graph->begin_adjacent(next->first); it != this->m_graph->end_adjacent(next->first); ++it) { - if(this->m_discovered.count(*it) == 0) { - this->m_waiting.push(this->m_graph->find_vertex(*it)); - this->m_discovered.emplace(*it); - } - } - - return next; - } -}; - -#endif // GRAPHSEARCHITERATOR diff --git a/libs/undirected_graph/source/undirected_graph.h b/libs/undirected_graph/source/undirected_graph.h deleted file mode 100644 index 5e7119c3e5..0000000000 --- a/libs/undirected_graph/source/undirected_graph.h +++ /dev/null @@ -1,428 +0,0 @@ -/* - Header file for undirected_graph container - Copyright (C) 2015 Fabian Löschner - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -*/ - -#ifndef UNDIRECTEDGRAPH -#define UNDIRECTEDGRAPH - -#include -#include -#include -#include - -/** - * @brief Undirected graph with vertices and edges - * - * A basic implementation of undirected graphs containing vertices connected by unique edges. - * There may be multiple vertices with the same value but only one edge between two vertices. - * The container stores data elements for the vertices and edges. The vertices and edges are identified - * and sorted by ids which have to be unique in the container. - * @tparam Key_vertex Type for the ids of vertices. Has to support the default comparison operators. - * @tparam T_vertex The type that should be used for the data elements at the vertices. - * @tparam Key_edge The type for the ids of edges. Has to provide a constructor taking two vertex ids as well as - * @tparam T_edge The type that should be used for the data elments at the edges - * support of the default comparison operators where the order of the two vertices is not important. - * Furthermore it has to provide access to the two vertex ids with a public a and b member variable. - */ -template -class undirected_graph -{ -public: - typedef T_vertex vertex_data_type; - typedef Key_vertex vertex_id_type; - typedef T_edge edge_data_type; - typedef Key_edge edge_id_type; - -private: - // TODO: Replace forward_list with map because of slow erasing. Use unique_ptr to store them. - - //! Type of the current graph - typedef undirected_graph graph_type; - //! Type for the container of vertex data in the graph - typedef std::unordered_map vertex_container; - //! Type for the container of edge data in the graph - typedef std::unordered_map edge_container; - //! Type for the adjacency lists of the graph - typedef std::forward_list adjacency_list; - //! Type for the container of the adjacency lists in the graph - typedef std::unordered_map adjacency_container; - - vertex_container vertices; /*!< Container for the vertex data */ - edge_container edges; /*!< Container for the edge data */ - adjacency_container adjacency; /*!< Container for adjacency lists */ - -public: - // TODO: Erase by iterator - - //! Iterator for vertices in the graph. It behaves like a std::unordered_map iterator with it->first being the vertex id and it->second being the mapped vertex data. - typedef typename vertex_container::iterator graph_vertex_iterator; - //! Const iterator for vertices in the graph. It behaves like a std::unordered_map iterator with it->first being the vertex id and it->second being the mapped vertex data. - typedef typename vertex_container::const_iterator graph_vertex_const_iterator; - //! Iterator for edges in the graph. It behaves like a std::unordered_map iterator with it->first being the edge id and it->second being the mapped edge data. - typedef typename edge_container::iterator graph_edge_iterator; - //! Const iterator for edges in the graph. It behaves like a std::unordered_map iterator with it->first being the edge id and it->second being the mapped edge data. - typedef typename edge_container::const_iterator graph_edge_const_iterator; - //! Iterator for adjacencent vertices in the graph. It behaves like a std::forward_list iterator dereferencing to the id of the adjacent vertex. - typedef typename adjacency_list::iterator graph_adjacency_iterator; - //! Const iterator for adjacencent vertices in the graph. It behaves like a std::forward_list iterator dereferencing to the id of the adjacent vertex. - typedef typename adjacency_list::const_iterator graph_adjacency_const_iterator; - - /** - * @brief Make edge id - * - * Creates the edge id for the edge between the two specified vertex ids. The two vertex ids - * don't have to be present in the graph. - * @param a First vertex id. - * @param b Second vertex id. - * @return Edge id for the edge between the two vertex ids. - */ - static edge_id_type make_edge_id(const vertex_id_type& a, const vertex_id_type &b) - { - return edge_id_type(a,b); - } - - /** - * @brief Test whether graph is empty - * - * Returns whether the graph is empty (i.e. whether there are no vertices). - * This function does not modify the graph in any way. To clear the content - * of a graph container, see undirected_graph::clear. - * @return true if the there no vertices, false otherwise. - */ - bool empty() const - { - return vertices.empty(); - } - - /** - * @brief Return vertex container size - * - * Returns the number of vertices in the undirected_graph container. - * @return The number of vertices in the graph. - */ - size_t size_vertices() const - { - return vertices.size(); - } - - /** - * @brief Return edge container size - * - * Returns the number of edges in the undirected_graph container. - * @return The number of edges in the graph. - */ - size_t size_edges() const - { - return edges.size(); - } - - /** - * @brief Access vertex - * - * Returns a reference to the mapped data of the vertex identified with the specified id. - * If it does not match the id of any vertex in the container, the function throws an out_of_range exception. - * @param id The id of the vertex whose mapped data is accessed. - * @return A reference to the mapped data of the vertex. - */ - vertex_data_type& at_vertex(const vertex_id_type& id) - { - return vertices.at(id); - } - - const vertex_data_type& at_vertex(const vertex_id_type& id) const { return vertices.at(id); } - - /** - * @brief Access edge - * - * Returns a reference to the mapped data of the edge identified with the specified id. - * If it does not match the id of any edge in the container, the function throws an out_of_range exception. - * @param id The id of the edge whose mapped data is accessed. - * @return A reference to the mapped data of the edge. - */ - edge_data_type& at_edge(const edge_id_type& id) - { - return edges.at(id); - } - - const edge_data_type& at_edge(const edge_id_type& id) const { return edges.at(id); } - - /** - * @brief Get iterator to vertex - * - * Searches the container for a vertex with an id equivalent to the one specified and returns an iterator - * to it if found, otherwise it returns an iterator to undirected_graph::end_vertices. - * Two ids are considered equivalent if the container's comparison object returns false reflexively - * (i.e., no matter the order in which the ids are passed as arguments). - * @param id Id to be searched for. - * @return An iterator to the vertex, if a vertex with specified id is found, or undirected_graph::end_vertices otherwise. - */ - graph_vertex_iterator find_vertex(const vertex_id_type& id) - { - return vertices.find(id); - } - - graph_vertex_const_iterator find_vertex(const vertex_id_type& id) const { return vertices.find(id); } - - /** - * @brief Get iterator to edge - * - * Searches the container for an edge with an id equivalent to the one specified and returns an iterator - * to it if found, otherwise it returns an iterator to undirected_graph::end_edges. - * Two ids are considered equivalent if the container's comparison object returns false reflexively - * (i.e., no matter the order in which the ids are passed as arguments). - * @param id Id to be searched for. - * @return An iterator to the edge, if an edge with specified id is found, or undirected_graph::end_edges otherwise. - */ - graph_edge_iterator find_edge(const edge_id_type& id) - { - return edges.find(id); - } - - graph_edge_const_iterator find_edge(const edge_id_type& id) const { return edges.find(id); } - - /** - * @brief Return iterator to beginning of vertices - * - * Returns an iterator referring to the first vertex in the graph container. - * If the container is empty, the returned iterator value shall not be dereferenced. - * @return An iterator to the first vertex in the container. - */ - graph_vertex_iterator begin_vertices() { return vertices.begin(); } - graph_vertex_const_iterator begin_vertices() const { return vertices.begin(); } - - /** - * @brief Return iterator to end of vertices - * - * Returns an iterator referring to the past-the-end vertex in the graph container. - * It does not point to any element, and thus shall not be dereferenced. - * If the container is empty, this function returns the same as undirected_graph::begin_vertices. - * @return An iterator to the past-the-end vertex in the container. - */ - graph_vertex_iterator end_vertices() { return vertices.end(); } - graph_vertex_const_iterator end_vertices() const { return vertices.end(); } - - /** - * @brief Return iterator to beginning of edges - * - * Returns an iterator referring to the first edge in the graph container. - * If the container is empty, the returned iterator value shall not be dereferenced. - * @return An iterator to the first edge in the container. - */ - graph_edge_iterator begin_edges() { return edges.begin(); } - graph_edge_const_iterator begin_edges() const { return edges.begin(); } - - /** - * @brief Return iterator to end of edges - * - * Returns an iterator referring to the past-the-end edges in the graph container. - * It does not point to any element, and thus shall not be dereferenced. - * If the container is empty, this function returns the same as undirected_graph::begin_edges. - * @return An iterator to the past-the-end edge in the container. - */ - graph_edge_iterator end_edges() { return edges.end(); } - graph_edge_const_iterator end_edges() const { return edges.end(); } - - /** - * @brief Return iterator to beginning of adjacent vertices - * - * Returns an iterator referring to the first adjacent vertex of the specified vertex. - * If the adjacency list is empty, the returned iterator value shall not be dereferenced. - * It behaves like a forward_list iterator. - * @return An iterator to the first adjacent vertex in the container. - */ - graph_adjacency_iterator begin_adjacent(const vertex_id_type& vertex) - { - return adjacency.at(vertex).begin(); - } - graph_adjacency_const_iterator begin_adjacent(const vertex_id_type& vertex) const { return adjacency.at(vertex).begin(); } - - /** - * @brief Return iterator to end of adjacent vertices - * - * Returns an iterator referring to the past-the-end adjacent vertex to the specified vertex. - * It does not point to any element, and thus shall not be dereferenced. - * If the adjacency list is empty, this function returns the same as undirected_graph::begin_adjacent. - * It behaves like a forward_list iterator. - * @return An iterator to the past-the-end adjacent vertex of the specified vertex. - */ - graph_adjacency_iterator end_adjacent(const vertex_id_type& vertex) - { - return adjacency.at(vertex).end(); - } - graph_adjacency_const_iterator end_adjacent(const vertex_id_type& vertex) const { return adjacency.at(vertex).end(); } - - /** - * @brief Clear content - * - * Removes all elements from the graph container (which are destroyed), leaving the container with a size of 0. - */ - void clear() - { - vertices.clear(); - edges.clear(); - adjacency.clear(); - } - - /** - * @brief Insert vertex - * - * Inserts a new vertex to the graph, effectively increasing the container size by one. Multiple vertices with the same - * value may exist in one graph but ids have to be unique. - * @param vertex_id The id of the vertex. - * @param vertex_data Value to be copied to the inserted vertex. - * @return Returns a pair with an iterator to the inserted vertex and a bool indicating whether the vertex was newly inserted or not. - */ - std::pair insert_vertex(const vertex_id_type& vertex_id, const vertex_data_type& vertex_data) - { - auto pair = vertices.emplace(vertex_id, vertex_data); - adjacency.emplace(vertex_id, adjacency_list()); - return pair; - } - - /** - * @brief Erase vertex - * - * Removes a single vertex from the graph container. This effectively reduces the vertex container - * size by one and the vertex data is destroyed. Also all edge data connected to this vertex - * is destroyed. WARNING: Linear in the number of adjacency entries of connected vertices. - * @param vertex_id Id of the vertex that should be removed. - * @return Returns whether the vertex was removed. - */ - bool erase_vertex(const vertex_id_type& vertex_id) - { - // Try to remove vertex - auto count = vertices.erase(vertex_id); - if(count == 0) return false; - - /* - * Check if there is a self-adjacency entry because this - * would invalidate iterators in the second loop - */ - if(edges.count(edge_id_type(vertex_id, vertex_id)) == 1) { - // Delete edge - edges.erase(edge_id_type(vertex_id, vertex_id)); - - // Find and delete adjacency entry - auto& adj_list = adjacency.at(vertex_id); - auto it_prev = adj_list.before_begin(); - for(auto it = adj_list.begin(); it != adj_list.end(); ++it) { - if(*it == vertex_id) { - adj_list.erase_after(it_prev); - break; - } - it_prev = it; - } - } - - // Loop through all adjacency entries - auto& adj_list = adjacency.at(vertex_id); - for(auto it = adj_list.begin(); it != adj_list.end(); ++it) { - const auto other_id = *it; - - // Remove edge data - edges.erase(edge_id_type(vertex_id, other_id)); - - // Delete reverse adjacency entries - auto& other_adj_list = adjacency.at(other_id); - auto it_prev = other_adj_list.before_begin(); - for(auto other_it = other_adj_list.begin(); other_it != other_adj_list.end(); ++other_it) { - if(*other_it == vertex_id) { - other_adj_list.erase_after(it_prev); - break; - } - it_prev = other_it; - } - } - - // Remove adjacency list of the vertex - adjacency.erase(vertex_id); - - return true; - } - - /** - * @brief Erase edge - * - * Removes a single edge from the graph container. This effectively reduces the edge container - * size by one and the edge data is destroyed. WARNING: Linear in the number of adjacent vertices - * of the two connected vertices. - * @param edge_id Id of the edge that should be removed. - * @return Returns whether the edge was removed. - */ - bool erase_edge(const edge_id_type& edge_id) - { - // Try to remove edge - auto count = edges.erase(edge_id); - if(count == 0) return false; - - // Remove adjacency of b from a - auto& adjacency_list_a = adjacency.at(edge_id.a); - auto it_prev_a = adjacency_list_a.before_begin(); - for(auto it = adjacency_list_a.begin(); it != adjacency_list_a.end(); ++it) { - if(*it == edge_id.b) { - adjacency_list_a.erase_after(it_prev_a); - break; - } - it_prev_a = it; - } - - // Remove adjacency of a from b - auto& adjacency_list_b = adjacency.at(edge_id.b); - auto it_prev_b = adjacency_list_b.before_begin(); - for(auto it = adjacency_list_b.begin(); it != adjacency_list_b.end(); ++it) { - if(*it == edge_id.a) { - adjacency_list_b.erase_after(it_prev_b); - break; - } - it_prev_b = it; - } - - return true; - } - - /** - * @brief Add edge between two vertices - * - * This method inserts a new edge data element to the graph connecting two vertices. - * If the edge already exists the edge will not be modified. - * @param vertex_a Id of the first vertex to connect. - * @param vertex_b Id of the second vertex to connect. - * @param edge_data The data element for the edge between the two vertices. - * @return Returns a pair with an iterator to the inserted edge and a bool indicating whether the edge was inserted or not. - */ - std::pair insert_edge(const vertex_id_type& vertex_a, const vertex_id_type& vertex_b, edge_data_type edge_data) { - // Check if vertices are in the graph - if(vertices.count(vertex_a) == 0 || vertices.count(vertex_b) == 0) { - return std::make_pair(edges.end(), false); - } - - // Try to place new edge - auto pair = edges.emplace(edge_id_type(vertex_a, vertex_b), edge_data); - - // Create adjacency entries if the edge was added - if(pair.second) { - adjacency.at(vertex_a).push_front(vertex_b); - if(vertex_a != vertex_b) adjacency.at(vertex_b).push_front(vertex_a); - } - - // Return iterator - return pair; - } -}; - -#endif // UNDIRECTEDGRAPH diff --git a/libs/undirected_graph/source/undirected_pair.h b/libs/undirected_graph/source/undirected_pair.h deleted file mode 100644 index 4628e9c765..0000000000 --- a/libs/undirected_graph/source/undirected_pair.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - Header file for undirected_pair container - Copyright (C) 2015 Fabian Löschner - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -*/ - -#ifndef UNDIRECTEDPAIR -#define UNDIRECTEDPAIR - -#include -#include - -/** - * @brief Undirected pair - * - * Pair whose comparison operators does not differentiate between pair(a,b) and pair(b,a). - * Suitable as the edge id type for an undirected graph - */ -template -class undirected_pair -{ -public: - T a; /*!< Element a of the undirected pair */ - T b; /*!< Element b of the undirected pair */ - - //! Constructs an empty undirected pair - undirected_pair() = default; - //! Constructs an undirected pair with the specified objects as data - undirected_pair(T a_in, T b_in) : a(a_in), b(b_in) {} - - //! Returns a reference to the other data element if compare is the same as one of the elements in the undirected pair - T& other_element(const T& compare) - { - if(compare == a) return b; - if(compare == b) return a; - throw std::invalid_argument(""); - } - //! Returns a const reference to the other data element if compare is the same as one of the elements in the undirected pair - const T& other_element(const T& compare) const { if(compare == a) return b; if(compare == b) return a; throw std::invalid_argument(""); } - - //! Returns a reference to the smaller of the two elements in the undirected pair or element a - T& smaller_element() - { - if(b < a) return b; - return a; - } - //! Returns a const reference to the smaller of the two elements in the undirected pair or element a - const T& smaller_element() const { if(b < a) return b; return a; } - - //! Returns a reference to the bigger of the two elements in the undirected pair or element b - T& bigger_element() - { - if(a > b) return a; - return b; - } - //! Returns a const reference to the bigger of the two elements in the undirected pair or element b - const T& bigger_element() const { if(a > b) return a; return b; } - - //! Returns whether the two undirected pairs contain the same data elements - inline bool operator==(const undirected_pair& rhs) const - { - return ((a == rhs.a) && (b == rhs.b)) - || ((a == rhs.b) && (b == rhs.a)); - } - //! Returns whether the two pairs contain different data elements - inline bool operator!=(const undirected_pair& rhs) const {return !(*this == rhs);} - - //! Strict weak ordering for undirected pairs by the smallest element in the pairs - inline bool operator<(const undirected_pair& rhs) const - { - if(smaller_element() < rhs.smaller_element()) return true; - if(rhs.smaller_element() < smaller_element()) return false; - if(bigger_element() < rhs.bigger_element()) return true; - return false; - } - inline bool operator> (const undirected_pair& rhs) const {return rhs < *this;} - inline bool operator<=(const undirected_pair& rhs) const {return !(*this > rhs);} - inline bool operator>=(const undirected_pair& rhs) const {return !(*this < rhs);} -}; - -namespace std { - - /** - * @brief Hash functor for undirected_pair - * - * This functor provides a specialization of the std::hash functor for an undirected_pair - * to store them in containers like an unordered_map. To use this functor, the type T has - * to provide a std::hash specialization itself. - */ - template - struct hash> - { - inline std::size_t operator()(const undirected_pair& obj) const - { - return (std::hash()(obj.smaller_element()) ^ (std::hash()(obj.bigger_element()) << 1)); - } - }; -} - -#endif //UNDIRECTEDPAIR diff --git a/noxfile.py b/noxfile.py old mode 100644 new mode 100755 index fc57d2fdc1..73f3e5c729 --- a/noxfile.py +++ b/noxfile.py @@ -1,35 +1,46 @@ +#!/usr/bin/env -S uv run --script --quiet +# /// script +# dependencies = ["nox"] +# /// + """Nox sessions.""" from __future__ import annotations +import contextlib import os import shutil -import sys +import tempfile from typing import TYPE_CHECKING import nox if TYPE_CHECKING: - from collections.abc import Sequence + from collections.abc import Generator, Sequence -nox.needs_version = ">=2024.3.2" -nox.options.default_venv_backend = "uv|virtualenv" +nox.needs_version = ">=2025.10.16" +nox.options.default_venv_backend = "uv" -nox.options.sessions = ["lint", "tests", "minimums"] +nox.options.sessions = ["lint", "tests"] PYTHON_ALL_VERSIONS = ["3.10", "3.11", "3.12", "3.13", "3.14"] -# The following lists all the build requirements for building the package. -# Note that this includes transitive build dependencies of package dependencies, -# since we use `--no-build-isolation` to install the package in editable mode -# and get better caching performance. This only concerns dependencies that are -# not available via wheels on PyPI (i.e., only as source distributions). -BUILD_REQUIREMENTS = ["scikit-build-core>=0.10.1", "setuptools_scm>=8.1"] if os.environ.get("CI", None): nox.options.error_on_missing_interpreters = True +@contextlib.contextmanager +def preserve_lockfile() -> Generator[None]: + """Preserve the lockfile by moving it to a temporary directory.""" + with tempfile.TemporaryDirectory() as temp_dir_name: + shutil.move("uv.lock", f"{temp_dir_name}/uv.lock") + try: + yield + finally: + shutil.move(f"{temp_dir_name}/uv.lock", "uv.lock") + + @nox.session(reuse_venv=True) def lint(session: nox.Session) -> None: """Run the linter.""" @@ -43,47 +54,57 @@ def _run_tests( session: nox.Session, *, install_args: Sequence[str] = (), - run_args: Sequence[str] = (), - extras: Sequence[str] = (), + extra_command: Sequence[str] = (), + pytest_run_args: Sequence[str] = (), ) -> None: - posargs = list(session.posargs) - env: dict[str, str] = {} - if os.environ.get("CI", None) and sys.platform == "win32": - env["SKBUILD_CMAKE_ARGS"] = "-T ClangCL" - + env = {"UV_PROJECT_ENVIRONMENT": session.virtualenv.location} if shutil.which("cmake") is None and shutil.which("cmake3") is None: session.install("cmake") if shutil.which("ninja") is None: session.install("ninja") - extras_ = ["test", *extras] - if "--cov" in posargs: - extras_.append("coverage") - posargs.append("--cov-config=pyproject.toml") - - session.install(*BUILD_REQUIREMENTS, *install_args, env=env) - install_arg = f"-ve.[{','.join(extras_)}]" - session.install("--no-build-isolation", install_arg, *install_args, env=env) - session.run("pytest", *run_args, *posargs, env=env) + # install build and test dependencies on top of the existing environment + session.run( + "uv", + "sync", + "--inexact", + "--only-group", + "build", + "--only-group", + "test", + *install_args, + env=env, + ) + session.run( + "uv", + "sync", + "--inexact", + "--no-dev", # do not auto-install dev dependencies + "--no-build-isolation-package", + "mnt-pyfiction", # build the project without isolation + *install_args, + env=env, + ) + if extra_command: + session.run(*extra_command, env=env) + session.run( + "uv", + "run", + "--no-sync", # do not sync as everything is already installed + *install_args, + "pytest", + *pytest_run_args, + *session.posargs, + env=env, + ) -@nox.session(reuse_venv=True, python=PYTHON_ALL_VERSIONS) +@nox.session(python=PYTHON_ALL_VERSIONS, reuse_venv=True, default=True) def tests(session: nox.Session) -> None: """Run the test suite.""" _run_tests(session) -@nox.session(reuse_venv=True, venv_backend="uv", python=PYTHON_ALL_VERSIONS) -def minimums(session: nox.Session) -> None: - """Test the minimum versions of dependencies.""" - _run_tests( - session, - install_args=["--resolution=lowest-direct"], - run_args=["-Wdefault"], - ) - session.run("uv", "pip", "list") - - # @nox.session(reuse_venv=True) # def docs(session: nox.Session) -> None: # """Build the docs. Use "--non-interactive" to avoid serving. Pass "-b linkcheck" to check links.""" diff --git a/pyproject.toml b/pyproject.toml index 437bdf3ab5..f6e389f663 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,9 +45,6 @@ dependencies = [ "z3-solver>=4.8.0" ] -[project.optional-dependencies] -test = ["pytest>=7.2", "mnt.pyfiction"] - [project.urls] Source = 'https://github.com/cda-tum/fiction' Tracker = 'https://github.com/cda-tum/fiction/issues' @@ -88,7 +85,6 @@ sdist.exclude = [ "**/experiments", "**/examples", "**/include", - "**/libs", "**/cmake-build-**", "**/plots", "**/test", @@ -112,8 +108,8 @@ FICTION_CLI = "OFF" FICTION_TEST = "OFF" FICTION_EXPERIMENTS = "OFF" FICTION_ENABLE_IPO = "ON" -FICTION_ENABLE_PCH = "ON" -FICTION_ENABLE_UNITY_BUILD = "ON" +FICTION_ENABLE_PCH = "OFF" +FICTION_ENABLE_UNITY_BUILD = "OFF" FICTION_Z3 = "ON" FICTION_ALGLIB = "ON" FICTION_PYTHON_BINDINGS = "ON" @@ -126,12 +122,15 @@ inherit.cmake.define = "append" cmake.define.DISABLE_GIL = "1" -[tool.pytest.ini_options] -minversion = "7.2" -testpaths = ["bindings/mnt/pyfiction/test/"] -addopts = ["-ra", "--strict-markers", "--strict-config", "--showlocals"] -log_cli_level = "INFO" -xfail_strict = true +[tool.pytest] +minversion = "9.0" +strict = true +addopts = [ + "-ra", + "--numprocesses=auto", # Automatically use all available CPU cores for parallel testing +] +log_level = "INFO" +testpaths = ["bindings/mnt/pyfiction/test"] [tool.cibuildwheel] @@ -144,7 +143,7 @@ build-frontend = "build[uv]" manylinux-x86_64-image = "manylinux_2_28" [tool.cibuildwheel.linux] -environment = { Z3_ROOT = "/opt/python/cp311-cp311/lib/python3.11/site-packages/z3", ALGLIB_DIR = "libs/alglib-cmake/src/cpp/src" } +environment = { Z3_ROOT = "/opt/python/cp311-cp311/lib/python3.11/site-packages/z3" } before-all = "/opt/python/cp311-cp311/bin/pip install z3-solver>=4.8.0" repair-wheel-command = [ "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/python/cp311-cp311/lib/python3.11/site-packages/z3/lib", @@ -152,7 +151,7 @@ repair-wheel-command = [ ] [tool.cibuildwheel.macos] -environment = { MACOSX_DEPLOYMENT_TARGET = "11.0" } +environment = { MACOSX_DEPLOYMENT_TARGET = "13.0" } [tool.cibuildwheel.windows] before-build = "pip install delvewheel>=1.7.3" @@ -161,13 +160,16 @@ environment = { CMAKE_GENERATOR = "Ninja" } [[tool.cibuildwheel.overrides]] select = "*-macosx_arm64" -environment = { MACOSX_DEPLOYMENT_TARGET = "11.0" } +environment = { MACOSX_DEPLOYMENT_TARGET = "13.0" } [tool.uv] required-version = ">=0.5.20" -reinstall-package = ["mnt.pyfiction"] - +cache-keys = [ + { file = "pyproject.toml" }, + { git = { commit = true, tags = true } }, + { file = "bindings/**/*.hpp" }, +] [tool.mypy] files = ["bindings/mnt/pyfiction", "noxfile.py"] @@ -240,10 +242,12 @@ docs = [ "sphinx-tabs==3.4.7", ] test = [ - "pytest>=8.3.3", + "pytest>=9.0.1", + "pytest-sugar>=1.1.1", + "pytest-xdist>=3.8.0", ] dev = [ { include-group = "build" }, - { include-group = "docs" }, { include-group = "test" }, + "nox>=2025.11.12", ] diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 1b3e136e4d..c9aed6923b 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,36 +1,37 @@ # Benchmarking (depends on Catch2) option( - FICTION_BENCHMARK - "Build fiction benchmarks, which can evaluate the performance of certain code fragments" - OFF) -if (FICTION_BENCHMARK) - message(STATUS "Building fiction benchmarks") - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/benchmark) -endif () + FICTION_BENCHMARK + "Build fiction benchmarks, which can evaluate the performance of certain code fragments" + OFF) +if(FICTION_BENCHMARK) + message(STATUS "Building fiction benchmarks") + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/benchmark) +endif() # Define libfiction_test as an interface target add_library(libfiction_test INTERFACE) # Apply compile definitions to libfiction_test that point to the test folder target_compile_definitions( - libfiction_test - INTERFACE "TEST_PATH=\"${CMAKE_CURRENT_SOURCE_DIR}/\"") + libfiction_test INTERFACE "TEST_PATH=\"${CMAKE_CURRENT_SOURCE_DIR}/\"") include_directories(.) file(GLOB_RECURSE FILENAMES */*.cpp) list(FILTER FILENAMES EXCLUDE REGEX "benchmark/.*$") -foreach (FILE IN LISTS FILENAMES) - get_filename_component(NAME ${FILE} NAME_WE) - set(TEST_NAME test_${NAME}) - add_executable(${TEST_NAME} ${FILE}) +foreach(FILE IN LISTS FILENAMES) + get_filename_component(NAME ${FILE} NAME_WE) + set(TEST_NAME test_${NAME}) + add_executable(${TEST_NAME} ${FILE}) - # Link the test executable against libfiction and Catch2 - target_link_libraries(${TEST_NAME} PRIVATE libfiction_test libfiction Catch2::Catch2WithMain) + # Link the test executable against libfiction and Catch2 + target_link_libraries( + ${TEST_NAME} PRIVATE libfiction_test fiction::libfiction + Catch2::Catch2WithMain) - # Make Catch2 ignore SIGTERMs sent to applications when timeouts are reached - target_compile_definitions(${TEST_NAME} INTERFACE CATCH_CONFIG_NO_POSIX_SIGNALS) + # Make Catch2 ignore SIGTERMs sent to applications when timeouts are reached + target_compile_definitions(${TEST_NAME} PRIVATE CATCH_CONFIG_NO_POSIX_SIGNALS) - add_test(NAME ${NAME} COMMAND ${TEST_NAME}) # group tests by file -endforeach () + add_test(NAME ${NAME} COMMAND ${TEST_NAME}) # group tests by file +endforeach() diff --git a/test/algorithms/iter/aspect_ratio_iterator.cpp b/test/algorithms/iter/aspect_ratio_iterator.cpp index fb2140c96e..0368a39cdf 100644 --- a/test/algorithms/iter/aspect_ratio_iterator.cpp +++ b/test/algorithms/iter/aspect_ratio_iterator.cpp @@ -12,7 +12,7 @@ using namespace fiction; -TEST_CASE("Traits", "[bdl-input-iterator]") +TEST_CASE("Aspect Ratio Iterator Traits", "[bdl-input-iterator]") { CHECK(std::is_same_v>::iterator_category, std::forward_iterator_tag>); diff --git a/test/algorithms/iter/bdl_input_iterator.cpp b/test/algorithms/iter/bdl_input_iterator.cpp index 598630fe83..60c04794e5 100644 --- a/test/algorithms/iter/bdl_input_iterator.cpp +++ b/test/algorithms/iter/bdl_input_iterator.cpp @@ -19,7 +19,7 @@ using namespace fiction; -TEST_CASE("Traits", "[bdl-input-iterator]") +TEST_CASE("BDL Input Iterator Traits", "[bdl-input-iterator]") { using layout = sidb_100_cell_clk_lyt_siqad; diff --git a/test/algorithms/physical_design/apply_gate_library.cpp b/test/algorithms/physical_design/apply_gate_library.cpp index a924fd34ad..9f68961949 100644 --- a/test/algorithms/physical_design/apply_gate_library.cpp +++ b/test/algorithms/physical_design/apply_gate_library.cpp @@ -2,7 +2,6 @@ // Created by Jan Drewniok on 08.01.25. // -#include #include #include "utils/blueprints/layout_blueprints.hpp" @@ -27,6 +26,8 @@ using namespace fiction; +namespace +{ template void check_equivalence(const Lyt& layout_designed, const std::string& path_layout_correct) { @@ -48,6 +49,7 @@ void check_equivalence(const Lyt& layout_designed, const std::string& path_layou } }); } +} // namespace using cell_lyt = sidb_100_cell_clk_lyt_cube; diff --git a/test/algorithms/physical_design/exact.cpp b/test/algorithms/physical_design/exact.cpp index 4cba550432..06a4fc0241 100644 --- a/test/algorithms/physical_design/exact.cpp +++ b/test/algorithms/physical_design/exact.cpp @@ -14,23 +14,31 @@ #include #include #include +#include #include #include #include +#include #include #include #include +#include #include #include +#include #include -#include -#include +#include +#include +#include #include using namespace fiction; +namespace +{ + exact_physical_design_params configuration() noexcept { return {}; @@ -177,13 +185,6 @@ surface_black_list&& blacklist_or(const tile& return std::move(sbl); } -exact_physical_design_params&& async(const std::size_t t, exact_physical_design_params&& ps) noexcept -{ - ps.num_threads = t; - - return std::move(ps); -} - exact_physical_design_params&& minimize_wires(exact_physical_design_params&& ps) noexcept { ps.minimize_wires = true; @@ -236,10 +237,10 @@ Lyt generate_layout(const Ntk& ntk, const exact_physical_design_params& ps) REQUIRE(layout.has_value()); - check_drvs(*layout); + check_drvs(layout.value()); // NOLINT(bugprone-unchecked-optional-access) check_stats(stats); - return *layout; + return layout.value(); // NOLINT(bugprone-unchecked-optional-access) } template Lyt generate_layout_with_black_list(const Ntk& ntk, const surface_black_list& black_list, @@ -251,10 +252,10 @@ Lyt generate_layout_with_black_list(const Ntk& ntk, const surface_black_list @@ -327,6 +328,8 @@ bool has_straight_inverters(const Lyt& lyt) noexcept return only_straight_inverters; } +} // namespace + TEST_CASE("Exact Cartesian physical design", "[exact]") { SECTION("Open clocking") @@ -690,7 +693,10 @@ TEST_CASE("Exact physical design with upper bounds", "[exact]") REQUIRE(layout.has_value()); - CHECK(layout->y() <= 3); + if (layout) + { + CHECK(layout->y() <= 3); + } upper_bound_config.upper_bound_x = 2u; // additionally, allow only 2 tiles in x direction; this will now fail @@ -721,16 +727,19 @@ TEST_CASE("Name conservation after exact physical design", "[exact]") REQUIRE(layout.has_value()); - // network name - CHECK(layout->get_layout_name() == "maj"); + if (layout) + { + // network name + CHECK(layout->get_layout_name() == "maj"); - // PI names - CHECK(layout->get_name(layout->pi_at(0)) == "a"); // first PI - CHECK(layout->get_name(layout->pi_at(1)) == "b"); // second PI - CHECK(layout->get_name(layout->pi_at(2)) == "c"); // third PI + // PI names + CHECK(layout->get_name(layout->pi_at(0)) == "a"); // first PI + CHECK(layout->get_name(layout->pi_at(1)) == "b"); // second PI + CHECK(layout->get_name(layout->pi_at(2)) == "c"); // third PI - // PO names - CHECK(layout->get_output_name(0) == "f"); + // PO names + CHECK(layout->get_output_name(0) == "f"); + } } #else // FICTION_Z3_SOLVER diff --git a/test/algorithms/physical_design/one_pass_synthesis.cpp b/test/algorithms/physical_design/one_pass_synthesis.cpp index 4b282ed108..82c8bfb460 100644 --- a/test/algorithms/physical_design/one_pass_synthesis.cpp +++ b/test/algorithms/physical_design/one_pass_synthesis.cpp @@ -20,13 +20,14 @@ #include #include -#include #include -#include #include using namespace fiction; +namespace +{ + one_pass_synthesis_params configuration() noexcept { one_pass_synthesis_params ps{}; @@ -75,14 +76,6 @@ one_pass_synthesis_params&& maj(one_pass_synthesis_params&& ps) noexcept return std::move(ps); } -one_pass_synthesis_params&& async(const std::size_t t, one_pass_synthesis_params&& ps) noexcept -{ -#if !defined(__APPLE__) - ps.num_threads = t; -#endif - return std::move(ps); -} - void check_stats(const one_pass_synthesis_stats& st) noexcept { CHECK(std::chrono::duration_cast(st.time_total).count() > 0); @@ -102,15 +95,15 @@ Lyt generate_layout(const Ntk& ntk, const one_pass_synthesis_params& ps) REQUIRE(layout.has_value()); check_stats(stats); - print_gate_level_layout(std::cout, *layout); + print_gate_level_layout(std::cout, layout.value()); // NOLINT(bugprone-unchecked-optional-access) - return *layout; + return layout.value(); // NOLINT(bugprone-unchecked-optional-access) } template void apply_gate_library(const Lyt& lyt) { - CHECK_NOTHROW(apply_gate_library(lyt)); + CHECK_NOTHROW((fiction::apply_gate_library(lyt))); } template @@ -122,6 +115,8 @@ void check(const Ntk& ntk, const one_pass_synthesis_params& ps) apply_gate_library(layout); } +} // namespace + TEST_CASE("One-pass synthesis", "[one-pass]") { SECTION("2DDWave clocking") @@ -188,6 +183,8 @@ TEST_CASE("Name conservation after one-pass synthesis", "[one-pass]") REQUIRE(layout.has_value()); + // NOLINTBEGIN(bugprone-unchecked-optional-access) + // network name CHECK(layout->get_layout_name() == "maj"); @@ -198,6 +195,8 @@ TEST_CASE("Name conservation after one-pass synthesis", "[one-pass]") // PO names CHECK(layout->get_output_name(0) == "f"); + + // NOLINTEND(bugprone-unchecked-optional-access) } #else // MUGEN diff --git a/test/algorithms/simulation/sidb/band_bending_resilience.cpp b/test/algorithms/simulation/sidb/band_bending_resilience.cpp index 7674e95d18..2d23074528 100644 --- a/test/algorithms/simulation/sidb/band_bending_resilience.cpp +++ b/test/algorithms/simulation/sidb/band_bending_resilience.cpp @@ -3,6 +3,7 @@ // #include +#include #include #include "utils/blueprints/layout_blueprints.hpp" @@ -18,42 +19,42 @@ using namespace fiction; -using layout = sidb_cell_clk_lyt_siqad; +using test_layout = sidb_cell_clk_lyt_siqad; TEST_CASE("Single SiDB", "[band-bending-resilience]") { - const auto lyt = blueprints::bestagon_and_gate(); + const auto lyt = blueprints::bestagon_and_gate(); - const auto params = + constexpr auto params = band_bending_resilience_params{physical_population_stability_params{sidb_simulation_parameters{2, -0.32}, 2}}; SECTION("Minimal potential required to conduct a charge change from neutral to negative") { - const auto min_potential = band_bending_resilience(lyt, std::vector{create_and_tt()}, params, - transition_type::NEUTRAL_TO_NEGATIVE); + const auto min_potential = + band_bending_resilience(lyt, std::vector{create_and_tt()}, params, transition_type::NEUTRAL_TO_NEGATIVE); CHECK_THAT(min_potential, Catch::Matchers::WithinAbs(0.020652, constants::ERROR_MARGIN)); } SECTION("Minimal potential required to conduct a charge change from negative to neutral") { - const auto min_potential = band_bending_resilience(lyt, std::vector{create_and_tt()}, params, - transition_type::NEGATIVE_TO_NEUTRAL); + const auto min_potential = + band_bending_resilience(lyt, std::vector{create_and_tt()}, params, transition_type::NEGATIVE_TO_NEUTRAL); CHECK_THAT(min_potential, Catch::Matchers::WithinAbs(0.087417, constants::ERROR_MARGIN)); } SECTION("Minimal potential required to conduct a charge change from positive to neutral") { - const auto min_potential = band_bending_resilience(lyt, std::vector{create_and_tt()}, params, - transition_type::NEUTRAL_TO_POSITIVE); + const auto min_potential = + band_bending_resilience(lyt, std::vector{create_and_tt()}, params, transition_type::NEUTRAL_TO_POSITIVE); CHECK_THAT(min_potential, Catch::Matchers::WithinAbs(0.413859, constants::ERROR_MARGIN)); } SECTION("Minimal potential required to conduct a charge change") { - const auto min_potential = band_bending_resilience(lyt, std::vector{create_and_tt()}, params); + const auto min_potential = band_bending_resilience(lyt, std::vector{create_and_tt()}, params); // the minimal potential for any charge change is the same as for neutral to negative CHECK_THAT(min_potential, Catch::Matchers::WithinAbs(0.020652, constants::ERROR_MARGIN)); diff --git a/test/algorithms/simulation/sidb/critical_temperature.cpp b/test/algorithms/simulation/sidb/critical_temperature.cpp index 9e49f4d3e5..c019f62b95 100644 --- a/test/algorithms/simulation/sidb/critical_temperature.cpp +++ b/test/algorithms/simulation/sidb/critical_temperature.cpp @@ -3,6 +3,8 @@ // #include +#include +#include #include #include "utils/blueprints/layout_blueprints.hpp" @@ -16,7 +18,6 @@ #include #include -#include #include using namespace fiction; @@ -46,7 +47,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.alpha = 0.0; const auto ct = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(critical_stats.num_valid_lyt == 0); CHECK(ct == 0.0); @@ -61,8 +62,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); CHECK(critical_stats.num_valid_lyt == 0); CHECK(ct_qe == 0.0); @@ -71,8 +71,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); CHECK(critical_stats.num_valid_lyt == 0); CHECK(ct_cc == 0.0); @@ -110,7 +109,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 80; params.alpha = 0.7; - const auto ct = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(ct == 0.0); @@ -118,8 +117,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(ct_cc == 0.0); @@ -144,8 +142,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] const auto ct_qe = critical_temperature_non_gate_based(lyt, params, &critical_stats); CHECK(critical_stats.num_valid_lyt == 2); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_qe == 350); #if (FICTION_ALGLIB_ENABLED) @@ -155,8 +152,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] const auto ct_cc = critical_temperature_non_gate_based(lyt, params, &critical_stats); CHECK(critical_stats.num_valid_lyt == 2); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_cc == 350); #endif // FICTION_ALGLIB_ENABLED @@ -190,22 +186,18 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_qe == 350); #if (FICTION_ALGLIB_ENABLED) params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_cc == 350); #endif // FICTION_ALGLIB_ENABLED @@ -224,10 +216,9 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.input_bdl_iterator_params.bdl_wire_params.threshold_bdl_interdistance = 1.5; const auto ct_qe = - critical_temperature_gate_based(lyt_or_gate, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt_or_gate, std::vector{create_or_tt()}, params, &critical_stats); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_qe == 400); #if (FICTION_ALGLIB_ENABLED) @@ -235,10 +226,9 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt_or_gate, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt_or_gate, std::vector{create_or_tt()}, params, &critical_stats); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_cc == 400); #endif // FICTION_ALGLIB_ENABLED @@ -285,7 +275,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] SECTION("Kinks are allowed") { const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), Catch::Matchers::WithinAbs(26.02, 0.01)); @@ -296,7 +286,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), Catch::Matchers::WithinAbs(26.02, 0.01)); @@ -309,7 +299,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.op_condition = is_operational_params::operational_condition::REJECT_KINKS; params.operational_params.input_bdl_iterator_params.bdl_wire_params.threshold_bdl_interdistance = 2.5; const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), Catch::Matchers::WithinAbs(5.1153718076, 0.01)); @@ -320,7 +310,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), Catch::Matchers::WithinAbs(5.1153718076, 0.01)); @@ -368,8 +358,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 500; params.alpha = 0.6; - const auto ct_qs = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_qs = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK(ct_qs > 0); @@ -377,8 +366,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK(ct_cc > 0); @@ -425,7 +413,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] SECTION("Kinks are allowed") { const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.56), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -436,7 +424,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.56), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -448,7 +436,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] { params.operational_params.op_condition = is_operational_params::operational_condition::REJECT_KINKS; const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.56), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -459,7 +447,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_fan_out_tt()}, params, &critical_stats); CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.56), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -479,8 +467,8 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = critical_temperature_gate_based(crossing_lyt, std::vector{create_crossing_wire_tt()}, - params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(crossing_lyt, std::vector{create_crossing_wire_tt()}, params, + &critical_stats); CHECK_THAT(std::fabs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.32), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -490,8 +478,8 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = critical_temperature_gate_based(crossing_lyt, std::vector{create_crossing_wire_tt()}, - params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(crossing_lyt, std::vector{create_crossing_wire_tt()}, params, + &critical_stats); CHECK_THAT(std::fabs(critical_stats.energy_between_ground_state_and_first_erroneous - 0.32), Catch::Matchers::WithinAbs(0.00, 0.01)); @@ -536,7 +524,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] SECTION("Kinks are allowed") { const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); CHECK(ct_qe < 350); @@ -545,7 +533,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); CHECK(ct_cc < 350); @@ -555,7 +543,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] { params.operational_params.op_condition = is_operational_params::operational_condition::REJECT_KINKS; const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); CHECK(ct_qe < 350); @@ -564,7 +552,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_or_tt()}, params, &critical_stats); CHECK(ct_cc < 350); @@ -600,8 +588,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "QuickExact"); @@ -613,8 +600,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function", "[critical-temperature] params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "ClusterComplete"); @@ -690,7 +676,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates params.alpha = 0.0; const auto ct_qs = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "QuickSim"); @@ -707,7 +693,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates params.iteration_steps = 80; params.alpha = 0.7; - const auto ct = critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); + const auto ct = critical_temperature_gate_based(lyt, std::vector{tt{}}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "QuickExact"); @@ -745,8 +731,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(ct_qe == 0.0); @@ -754,8 +739,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_id_tt()}, params, &critical_stats); CHECK(ct_cc == 0.0); @@ -782,8 +766,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates CHECK(critical_stats.algorithm_name == "QuickExact"); CHECK(critical_stats.num_valid_lyt == 2); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_qe == 350); #if (FICTION_ALGLIB_ENABLED) @@ -795,8 +778,7 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates CHECK(critical_stats.algorithm_name == "ClusterComplete"); CHECK(critical_stats.num_valid_lyt == 2); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_cc == 350); #endif // FICTION_ALGLIB_ENABLED @@ -829,26 +811,22 @@ TEMPLATE_TEST_CASE("Test critical_temperature function, using offset coordinates params.iteration_steps = 80; params.alpha = 0.7; - const auto ct_qe = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_qe = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "QuickExact"); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_qe == 350); #if (FICTION_ALGLIB_ENABLED) params.operational_params.sim_engine = sidb_simulation_engine::CLUSTERCOMPLETE; - const auto ct_cc = - critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); + const auto ct_cc = critical_temperature_gate_based(lyt, std::vector{create_and_tt()}, params, &critical_stats); CHECK(critical_stats.algorithm_name == "ClusterComplete"); - CHECK_THAT(std::abs(critical_stats.energy_between_ground_state_and_first_erroneous), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.01)); + CHECK(std::isinf(critical_stats.energy_between_ground_state_and_first_erroneous)); CHECK(ct_cc == 350); #endif // FICTION_ALGLIB_ENABLED diff --git a/test/algorithms/simulation/sidb/is_ground_state.cpp b/test/algorithms/simulation/sidb/is_ground_state.cpp index 566eb61c2f..2ffa2e63a1 100644 --- a/test/algorithms/simulation/sidb/is_ground_state.cpp +++ b/test/algorithms/simulation/sidb/is_ground_state.cpp @@ -3,13 +3,13 @@ // #include +#include #include #include #include #include #include -#include #include #include #include @@ -51,7 +51,7 @@ TEMPLATE_TEST_CASE("check if ground state is found", "[is-ground-state]", sidb_1 CHECK(!is_ground_state(simulation_result_heuristic, simulation_result_exhaustive)); } - SECTION("two idential ground states are stored in the simulation results") + SECTION("two identical ground states are stored in the simulation results") { simulation_result_exhaustive.charge_distributions = {{charge_layout_first, charge_layout_first}}; sidb_simulation_result simulation_result_heuristic{}; @@ -73,9 +73,9 @@ TEMPLATE_TEST_CASE("check if ground state is found", "[is-ground-state]", sidb_1 SECTION("layout with no SiDB placed") { - TestType lyt{}; - const charge_distribution_surface charge_layout{lyt}; - const sidb_simulation_parameters params{2, -0.32}; + TestType lyt{}; + const charge_distribution_surface charge_layout{lyt}; + constexpr sidb_simulation_parameters params{2, -0.32}; const auto simulation_results_exgs = exhaustive_ground_state_simulation(charge_layout, params); const quicksim_params quicksim_params{params}; const auto simulation_results_quicksim = quicksim(charge_layout, quicksim_params); @@ -97,10 +97,11 @@ TEMPLATE_TEST_CASE("check if ground state is found", "[is-ground-state]", sidb_1 lyt.assign_cell_type({6, 10, 0}, TestType::cell_type::NORMAL); lyt.assign_cell_type({7, 10, 0}, TestType::cell_type::NORMAL); - const charge_distribution_surface charge_layout{lyt}; - const sidb_simulation_parameters params{2, -0.32}; + const charge_distribution_surface charge_layout{lyt}; + constexpr sidb_simulation_parameters params{2, -0.32}; - auto simulation_results_exgs = exhaustive_ground_state_simulation(charge_layout, params); + sidb_simulation_result simulation_results_exgs = + exhaustive_ground_state_simulation(charge_layout, params); // assign different charge index on purpose to see if the algorithm still works as desired for (auto& cds : simulation_results_exgs.charge_distributions) @@ -117,18 +118,19 @@ TEMPLATE_TEST_CASE("check if ground state is found", "[is-ground-state]", sidb_1 auto simulation_results_quicksim = quicksim(charge_layout, quicksim_params); REQUIRE(simulation_results_quicksim.has_value()); + auto& quicksim_res = simulation_results_quicksim.value(); // NOLINT(bugprone-unchecked-optional-access) // assign different charge index on purpose to see if the algorithm still works as desired - for (auto& cds : simulation_results_quicksim.value().charge_distributions) + for (auto& cds : quicksim_res.charge_distributions) { cds.assign_charge_index(0, charge_distribution_mode::KEEP_CHARGE_DISTRIBUTION); } - for (auto& cds : simulation_results_quicksim.value().charge_distributions) + for (auto& cds : quicksim_res.charge_distributions) { CHECK(cds.get_charge_index_and_base().first == 0); } - CHECK(is_ground_state(simulation_results_exgs, simulation_results_quicksim.value())); + CHECK(is_ground_state(simulation_results_exgs, quicksim_res)); } } diff --git a/test/algorithms/simulation/sidb/minimum_energy.cpp b/test/algorithms/simulation/sidb/minimum_energy.cpp index 8ae00c8eef..a45821e4f8 100644 --- a/test/algorithms/simulation/sidb/minimum_energy.cpp +++ b/test/algorithms/simulation/sidb/minimum_energy.cpp @@ -2,23 +2,16 @@ // Created by Jan Drewniok on 18.01.23. // -#include +#include +#include #include -#include #include -#include -#include -#include -#include -#include #include #include -#include #include #include -#include #include using namespace fiction; @@ -34,8 +27,7 @@ TEST_CASE("Test minimum energy function", "[minimum-energy]") const charge_distribution_surface charge_layout{lyt}; std::vector> all_lyts{}; - CHECK_THAT(minimum_energy(all_lyts.begin(), all_lyts.end()), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.00001)); + CHECK(std::isinf(minimum_energy(all_lyts.begin(), all_lyts.end()))); all_lyts.push_back(charge_layout); @@ -49,8 +41,7 @@ TEST_CASE("Test minimum energy function", "[minimum-energy]") const charge_distribution_surface charge_layout{lyt}; std::vector> all_lyts{}; - CHECK_THAT(minimum_energy(all_lyts.cbegin(), all_lyts.cend()), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.00001)); + CHECK(std::isinf(minimum_energy(all_lyts.cbegin(), all_lyts.cend()))); all_lyts.push_back(charge_layout); @@ -66,8 +57,7 @@ TEST_CASE("Test minimum energy function", "[minimum-energy]") charge_distribution_surface charge_layout_first{lyt}; std::vector> all_lyts{}; - CHECK_THAT(minimum_energy(all_lyts.cbegin(), all_lyts.cend()), - Catch::Matchers::WithinAbs(std::numeric_limits::infinity(), 0.00001)); + CHECK(std::isinf(minimum_energy(all_lyts.cbegin(), all_lyts.cend()))); charge_layout_first.assign_charge_state({0, 0}, sidb_charge_state::NEUTRAL); diff --git a/test/algorithms/simulation/sidb/operational_domain.cpp b/test/algorithms/simulation/sidb/operational_domain.cpp index d29db42760..fa2cedd7de 100644 --- a/test/algorithms/simulation/sidb/operational_domain.cpp +++ b/test/algorithms/simulation/sidb/operational_domain.cpp @@ -3,6 +3,8 @@ // #include +#include +#include #include #include "utils/blueprints/layout_blueprints.hpp" @@ -37,7 +39,9 @@ static void check_op_domain_params_and_operational_status(const OpDomain& } op_domain.for_each( - [&op_domain, ¶ms, &status](const auto& coord, const auto& op_value [[maybe_unused]]) + [&op_domain, // NOLINT(clang-diagnostic-unused-lambda-capture) + ¶ms, &status](const auto& coord, + const auto& op_value) // NOLINT(misc-unused-parameters) { for (auto d = 0u; d < params.sweep_dimensions.size(); ++d) { @@ -177,7 +181,7 @@ TEST_CASE("Error handling of operational domain algorithms", "[operational-domai // flood fill operates on 2-dimensional and 3-dimensional parameter spaces for (const auto& params : {zero_dimensional_params, one_dimensional_params, four_dimensional_params}) { - CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } } @@ -187,7 +191,7 @@ TEST_CASE("Error handling of operational domain algorithms", "[operational-domai for (const auto& params : {zero_dimensional_params, one_dimensional_params, three_dimensional_params, four_dimensional_params}) { - CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } } @@ -225,23 +229,22 @@ TEST_CASE("Error handling of operational domain algorithms", "[operational-domai { SECTION("grid_search") { - CHECK_THROWS_AS(operational_domain_grid_search(lat, std::vector{create_id_tt()}, params), + CHECK_THROWS_AS(operational_domain_grid_search(lat, std::vector{create_id_tt()}, params), std::invalid_argument); } SECTION("random_sampling") { - CHECK_THROWS_AS( - operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, params), - std::invalid_argument); + CHECK_THROWS_AS(operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, params), + std::invalid_argument); } SECTION("flood_fill") { - CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } SECTION("contour_tracing") { - CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } } @@ -273,23 +276,22 @@ TEST_CASE("Error handling of operational domain algorithms", "[operational-domai { SECTION("grid_search") { - CHECK_THROWS_AS(operational_domain_grid_search(lat, std::vector{create_id_tt()}, params), + CHECK_THROWS_AS(operational_domain_grid_search(lat, std::vector{create_id_tt()}, params), std::invalid_argument); } SECTION("random_sampling") { - CHECK_THROWS_AS( - operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, params), - std::invalid_argument); + CHECK_THROWS_AS(operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, params), + std::invalid_argument); } SECTION("flood_fill") { - CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } SECTION("contour_tracing") { - CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), + CHECK_THROWS_AS(operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, params), std::invalid_argument); } } @@ -314,11 +316,12 @@ TEST_CASE("SiQAD OR gate", "[operational-domain]") op_domain_params.operational_params.op_condition = is_operational_params::operational_condition::TOLERATE_KINKS; const auto op_domain = - operational_domain_grid_search(lyt, std::vector{create_or_tt()}, op_domain_params, &op_domain_stats); + operational_domain_grid_search(lyt, std::vector{create_or_tt()}, op_domain_params, &op_domain_stats); check_op_domain_params_and_operational_status(op_domain, op_domain_params, operational_status::OPERATIONAL); } +// NOLINTNEXTLINE(*-function-size) TEST_CASE("BDL wire operational domain computation", "[operational-domain]") { using layout = sidb_cell_clk_lyt_siqad; @@ -376,8 +379,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(lat, std::vector{create_id_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 1); @@ -396,7 +399,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") op_domain_params.operational_params.op_condition = is_operational_params::operational_condition::REJECT_KINKS; - const auto op_domain_kinks = operational_domain_grid_search(lat, std::vector{create_id_tt()}, + const auto op_domain_kinks = operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -415,11 +418,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, + const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -438,7 +442,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -456,11 +460,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -480,8 +485,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("flood_fill") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 1); @@ -497,13 +502,14 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.32, -0.32, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); SECTION("one random sample") { - const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, + const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -523,7 +529,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -554,8 +560,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(lat, std::vector{create_id_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 100); @@ -571,7 +577,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -589,8 +595,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("flood_fill") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 100); @@ -606,7 +612,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -637,8 +643,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(lat, std::vector{create_id_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 50); @@ -654,11 +660,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, + const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -677,7 +684,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -695,11 +702,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -719,8 +727,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("flood_fill") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size CHECK(op_domain.size() == 50); @@ -736,13 +744,14 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.35, -0.29, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); SECTION("one random sample") { - const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 100, + const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -762,7 +771,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -793,8 +802,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(lat, std::vector{create_id_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 100); @@ -811,11 +820,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, + const auto op_domain_3d = operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -834,7 +844,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 5000, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 5000, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size @@ -852,11 +862,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 5000, + const auto op_domain_3d = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 5000, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size @@ -875,8 +886,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("flood_fill") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 25, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 25, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size CHECK(op_domain.size() <= 100); @@ -893,11 +904,12 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("3-dimensional") { - const auto z_dimension = operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; + constexpr auto z_dimension = + operational_domain_value_range{sweep_parameter::MU_MINUS, -0.14, -0.10, 0.01}; op_domain_params.sweep_dimensions.push_back(z_dimension); - const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 25, + const auto op_domain_3d = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 25, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size @@ -916,7 +928,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 25, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 25, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size @@ -947,7 +959,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("flood_fill") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 10000, + const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 10000, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -971,8 +983,8 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(lat, std::vector{create_id_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(lat, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (16 steps in each dimension) CHECK(op_domain.size() == 256); @@ -989,7 +1001,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_id_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct maximum size @@ -1009,7 +1021,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") { SECTION("random sample to find operational parameter points") { - const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 50, + const auto op_domain = operational_domain_flood_fill(lat, std::vector{create_id_tt()}, 50, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size @@ -1027,7 +1039,7 @@ TEST_CASE("BDL wire operational domain computation", "[operational-domain]") } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 50, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_id_tt()}, 50, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1084,7 +1096,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation", "[operational-domai SECTION("grid_search") { const auto op_domain = - operational_domain_grid_search(lat, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); + operational_domain_grid_search(lat, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 100); @@ -1100,7 +1112,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation", "[operational-domai } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_and_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_and_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1118,7 +1130,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation", "[operational-domai SECTION("flood_fill") { const auto op_domain = - operational_domain_flood_fill(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); + operational_domain_flood_fill(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 100); @@ -1134,7 +1146,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation", "[operational-domai } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_and_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1201,7 +1213,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation, using cube coordinat SECTION("grid_search") { const auto op_domain = - operational_domain_grid_search(lat, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); + operational_domain_grid_search(lat, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 100); @@ -1217,7 +1229,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation, using cube coordinat } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_and_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(lat, std::vector{create_and_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1235,7 +1247,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation, using cube coordinat SECTION("flood_fill") { const auto op_domain = - operational_domain_flood_fill(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); + operational_domain_flood_fill(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 100); @@ -1251,7 +1263,7 @@ TEST_CASE("SiQAD's AND gate operational domain computation, using cube coordinat } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_and_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(lat, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1285,8 +1297,8 @@ TEMPLATE_TEST_CASE("AND gate on the H-Si(111)-1x1 surface", "[operational-domain SECTION("grid_search") { - const auto op_domain = operational_domain_grid_search(layout, std::vector{create_and_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(layout, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 4); @@ -1302,7 +1314,7 @@ TEMPLATE_TEST_CASE("AND gate on the H-Si(111)-1x1 surface", "[operational-domain } SECTION("random_sampling") { - const auto op_domain = operational_domain_random_sampling(layout, std::vector{create_and_tt()}, 100, + const auto op_domain = operational_domain_random_sampling(layout, std::vector{create_and_tt()}, 100, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1321,7 +1333,7 @@ TEMPLATE_TEST_CASE("AND gate on the H-Si(111)-1x1 surface", "[operational-domain { SECTION("one random sample") { - const auto op_domain = operational_domain_flood_fill(layout, std::vector{create_and_tt()}, 1, + const auto op_domain = operational_domain_flood_fill(layout, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) @@ -1339,7 +1351,7 @@ TEMPLATE_TEST_CASE("AND gate on the H-Si(111)-1x1 surface", "[operational-domain } SECTION("contour_tracing") { - const auto op_domain = operational_domain_contour_tracing(layout, std::vector{create_and_tt()}, 1, + const auto op_domain = operational_domain_contour_tracing(layout, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1374,8 +1386,8 @@ TEMPLATE_TEST_CASE("AND gate with Bestagon shape and kink states at default phys SECTION("grid_search, allow kinks") { - const auto op_domain = operational_domain_grid_search(layout, std::vector{create_and_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(layout, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 36); @@ -1389,8 +1401,8 @@ TEMPLATE_TEST_CASE("AND gate with Bestagon shape and kink states at default phys { op_domain_params.operational_params.op_condition = is_operational_params::operational_condition::REJECT_KINKS; - const auto op_domain = operational_domain_grid_search(layout, std::vector{create_and_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(layout, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 36); @@ -1422,8 +1434,8 @@ TEMPLATE_TEST_CASE("Grid search to determine the operational domain. The operati SECTION("grid search, determine operational status with physical simulation") { - const auto op_domain = operational_domain_grid_search(layout, std::vector{create_and_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(layout, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 36); @@ -1438,8 +1450,8 @@ TEMPLATE_TEST_CASE("Grid search to determine the operational domain. The operati op_domain_params.operational_params.strategy_to_analyze_operational_status = is_operational_params::operational_analysis_strategy::FILTER_ONLY; - const auto op_domain = operational_domain_grid_search(layout, std::vector{create_and_tt()}, - op_domain_params, &op_domain_stats); + const auto op_domain = + operational_domain_grid_search(layout, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) CHECK(op_domain.size() == 36); @@ -1451,7 +1463,7 @@ TEMPLATE_TEST_CASE("Grid search to determine the operational domain. The operati // this test was created to cover a special case: Strange behavior was observed when no clone was used in the // `is_physical_validity_feasible` function. op_domain.for_each( - [](const auto& pp, const auto& status) + [](const auto& pp, [[maybe_unused]] const auto& status) { CHECK(pp.get_parameters()[0] >= 4.0); CHECK(pp.get_parameters()[1] >= 4.0); @@ -1504,7 +1516,7 @@ TEST_CASE("Bestagon AND gate operational domain and temperature computation, usi SECTION("grid_search") { - const auto op_domain = critical_temperature_domain_grid_search(lyt, std::vector{create_and_tt()}, + const auto op_domain = critical_temperature_domain_grid_search(lyt, std::vector{create_and_tt()}, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) @@ -1526,7 +1538,7 @@ TEST_CASE("Bestagon AND gate operational domain and temperature computation, usi op_domain_params.sweep_dimensions = {{sweep_parameter::EPSILON_R, 5.0, 5.2, 0.1}, {sweep_parameter::LAMBDA_TF, 4.9, 5.1, 0.1}}; - const auto op_domain = critical_temperature_domain_random_sampling(lyt, std::vector{create_and_tt()}, 10, + const auto op_domain = critical_temperature_domain_random_sampling(lyt, std::vector{create_and_tt()}, 10, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (max 10 steps in each dimension) @@ -1546,7 +1558,7 @@ TEST_CASE("Bestagon AND gate operational domain and temperature computation, usi op_domain_params.sweep_dimensions = {{sweep_parameter::EPSILON_R, 5.6, 5.8, 0.1}, {sweep_parameter::LAMBDA_TF, 4.9, 5.1, 0.1}}; - const auto op_domain = critical_temperature_domain_flood_fill(lyt, std::vector{create_and_tt()}, 1, + const auto op_domain = critical_temperature_domain_flood_fill(lyt, std::vector{create_and_tt()}, 1, op_domain_params, &op_domain_stats); // check if the operational domain has the correct size (10 steps in each dimension) @@ -1593,7 +1605,7 @@ TEST_CASE("Two BDL pair wire with degeneracy for input 1", "[operational-domain] bdl_input_iterator_params::input_bdl_configuration::PERTURBER_DISTANCE_ENCODED; const auto op_domain = - operational_domain_grid_search(lyt, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); + operational_domain_grid_search(lyt, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); check_op_domain_params_and_operational_status(op_domain, op_domain_params, operational_status::NON_OPERATIONAL); @@ -1603,7 +1615,7 @@ TEST_CASE("Two BDL pair wire with degeneracy for input 1", "[operational-domain] CHECK(op_domain_stats.num_operational_parameter_combinations == 0); CHECK(op_domain_stats.num_non_operational_parameter_combinations == 8281); } - SECTION("grid search, input is set via the absense of perturbers") + SECTION("grid search, input is set via the absence of perturbers") { operational_domain_stats op_domain_stats{}; @@ -1611,7 +1623,7 @@ TEST_CASE("Two BDL pair wire with degeneracy for input 1", "[operational-domain] bdl_input_iterator_params::input_bdl_configuration::PERTURBER_ABSENCE_ENCODED; const auto op_domain = - operational_domain_grid_search(lyt, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); + operational_domain_grid_search(lyt, std::vector{create_id_tt()}, op_domain_params, &op_domain_stats); check_op_domain_params_and_operational_status(op_domain, op_domain_params, operational_status::NON_OPERATIONAL); diff --git a/test/algorithms/simulation/sidb/physical_population_stability.cpp b/test/algorithms/simulation/sidb/physical_population_stability.cpp index c0554333f2..a24e89dcea 100644 --- a/test/algorithms/simulation/sidb/physical_population_stability.cpp +++ b/test/algorithms/simulation/sidb/physical_population_stability.cpp @@ -3,6 +3,7 @@ // #include +#include #include #include @@ -14,7 +15,7 @@ #include #include -#include +#include using namespace fiction; @@ -25,8 +26,8 @@ TEST_CASE("Single SiDB", "[assess-physical-population-stability]") SECTION("Precision of distance_corresponding_to_potential is two") { - const auto params = physical_population_stability_params{sidb_simulation_parameters{2, -0.29}, 2}; - const auto result = physical_population_stability(lyt, params); + constexpr auto params = physical_population_stability_params{sidb_simulation_parameters{2, -0.29}, 2}; + const auto result = physical_population_stability(lyt, params); REQUIRE(result.size() == 1); const auto& population_stability_detail = result[0]; CHECK(population_stability_detail.critical_cell == siqad::coord_t{1, 1, 0}); @@ -35,10 +36,10 @@ TEST_CASE("Single SiDB", "[assess-physical-population-stability]") siqad::coord_t{1, 1, 0}); CHECK(population_stability_detail.transition_potentials.at(transition_type::NEGATIVE_TO_NEUTRAL).second == 0.29); - CHECK(population_stability_detail.transition_potentials.at(transition_type::NEUTRAL_TO_POSITIVE).second == - std::numeric_limits::infinity()); - CHECK(population_stability_detail.transition_potentials.at(transition_type::POSITIVE_TO_NEUTRAL).second == - std::numeric_limits::infinity()); + CHECK(std::isinf( + population_stability_detail.transition_potentials.at(transition_type::NEUTRAL_TO_POSITIVE).second)); + CHECK(std::isinf( + population_stability_detail.transition_potentials.at(transition_type::POSITIVE_TO_NEUTRAL).second)); REQUIRE_THAT( population_stability_detail.distance_corresponding_to_potential.at(transition_type::NEGATIVE_TO_NEUTRAL), @@ -47,8 +48,8 @@ TEST_CASE("Single SiDB", "[assess-physical-population-stability]") SECTION("Precision of distance_corresponding_to_potential is three") { - const auto params = physical_population_stability_params{sidb_simulation_parameters{2, -0.29}, 3}; - const auto result = physical_population_stability(lyt, params); + constexpr auto params = physical_population_stability_params{sidb_simulation_parameters{2, -0.29}, 3}; + const auto result = physical_population_stability(lyt, params); REQUIRE(result.size() == 1); const auto& population_stability_detail = result[0]; REQUIRE_THAT( @@ -62,7 +63,7 @@ TEMPLATE_TEST_CASE("Three SiDBs with positive charge states", "[assess-physical- { TestType lyt{}; - const auto params = physical_population_stability_params{}; + constexpr auto params = physical_population_stability_params{}; lyt.assign_cell_type({1, 1, 0}, sidb_technology::cell_type::NORMAL); lyt.assign_cell_type({1, 1, 1}, sidb_technology::cell_type::NORMAL); lyt.assign_cell_type({2, 1, 0}, sidb_technology::cell_type::NORMAL); @@ -117,7 +118,7 @@ TEMPLATE_TEST_CASE("Bestagon AND gate", "[assess-physical-population-stability]" { TestType lyt{}; - const auto params = physical_population_stability_params{}; + constexpr auto params = physical_population_stability_params{}; lyt.assign_cell_type({36, 1, 0}, sidb_technology::cell_type::INPUT); lyt.assign_cell_type({2, 1, 0}, sidb_technology::cell_type::INPUT); @@ -226,7 +227,7 @@ TEMPLATE_TEST_CASE("Bestagon AND gate", "[assess-physical-population-stability]" TEST_CASE("Bestagon CX gate input 11", "[assess-physical-population-stability], [quality]") { - const auto population_stability_params = physical_population_stability_params{}; + constexpr auto population_stability_params = physical_population_stability_params{}; SECTION("using cube coordinates") { @@ -313,7 +314,7 @@ TEST_CASE("Bestagon CX gate input 11", "[assess-physical-population-stability], { sidb_100_cell_clk_lyt lyt{}; - const auto params = physical_population_stability_params{}; + constexpr auto params = physical_population_stability_params{}; lyt.assign_cell_type(siqad::to_fiction_coord(siqad::coord_t{36, 1, 0}), sidb_technology::cell_type::INPUT); lyt.assign_cell_type(siqad::to_fiction_coord(siqad::coord_t{2, 1, 0}), diff --git a/test/algorithms/simulation/sidb/potential_to_distance_conversion.cpp b/test/algorithms/simulation/sidb/potential_to_distance_conversion.cpp index 21a958b392..e3395a456a 100644 --- a/test/algorithms/simulation/sidb/potential_to_distance_conversion.cpp +++ b/test/algorithms/simulation/sidb/potential_to_distance_conversion.cpp @@ -2,7 +2,8 @@ // Created by Jan Drewniok on 10.11.23. // -#include +#include +#include #include #include @@ -19,60 +20,60 @@ TEST_CASE("Conversion of potential to distance", "[potential_to_distance_convers SECTION("Valid conversion with default parameters") { - uint64_t const precision = 1; - const double potential_value = 5.0; - const double expected_distance = 0.1; + constexpr uint64_t precision = 1; + constexpr double potential_value = 5.0; + constexpr double expected_distance = 0.1; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } SECTION("Valid conversion with custom parameters, precision is 1") { - params.epsilon_r = 2.0; - params.lambda_tf = 1.0; - const uint64_t precision = 1; - const double potential_value = 0.01; - const double expected_distance = 3.2; + params.epsilon_r = 2.0; + params.lambda_tf = 1.0; + constexpr uint64_t precision = 1; + constexpr double potential_value = 0.01; + constexpr double expected_distance = 3.2; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } SECTION("Valid conversion with custom parameters, precision is 2") { - params.epsilon_r = 2.0; - params.lambda_tf = 1.0; - const uint64_t precision = 2; - const double potential_value = 0.01; - const double expected_distance = 3.14; + params.epsilon_r = 2.0; + params.lambda_tf = 1.0; + constexpr uint64_t precision = 2; + constexpr double potential_value = 0.01; + constexpr double expected_distance = 3.14; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } SECTION("Valid conversion with custom parameters, precision is 3") { - params.epsilon_r = 2.0; - params.lambda_tf = 1.0; - const uint64_t precision = 3; - const double potential_value = 0.01; - const double expected_distance = 3.135; + params.epsilon_r = 2.0; + params.lambda_tf = 1.0; + constexpr uint64_t precision = 3; + constexpr double potential_value = 0.01; + constexpr double expected_distance = 3.135; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } SECTION("Valid conversion with custom parameters, precision is 0") { - const uint64_t precision = 0; - const double potential_value = 0.03; - const double expected_distance = 4; + constexpr uint64_t precision = 0; + constexpr double potential_value = 0.03; + constexpr double expected_distance = 4; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } SECTION("Conversion with infinite potential") { - const uint64_t precision = 3; - const double potential_value = std::numeric_limits::infinity(); - const double expected_distance = 0.001; + constexpr uint64_t precision = 3; + constexpr double potential_value = std::numeric_limits::infinity(); + constexpr double expected_distance = 0.001; REQUIRE_THAT(potential_to_distance_conversion(potential_value, params, precision), Catch::Matchers::WithinAbs(expected_distance, 1e-5)); } diff --git a/test/algorithms/simulation/sidb/time_to_solution.cpp b/test/algorithms/simulation/sidb/time_to_solution.cpp index 3ac6aed275..0ced4dc81a 100644 --- a/test/algorithms/simulation/sidb/time_to_solution.cpp +++ b/test/algorithms/simulation/sidb/time_to_solution.cpp @@ -3,6 +3,8 @@ // #include +#include +#include #include #include @@ -30,28 +32,26 @@ TEMPLATE_TEST_CASE("Basic time-to-solution test with varying layouts", "[time-to SECTION("layout with no SiDB placed") { - const sidb_simulation_parameters params{2, -0.30}; - const quicksim_params quicksim_params{params}; - time_to_solution_stats tts_stat_quickexact{}; - const time_to_solution_params tts_params_quickexact{exact_sidb_simulation_engine::QUICKEXACT}; + constexpr sidb_simulation_parameters params{2, -0.30}; + const quicksim_params quicksim_params{params}; + time_to_solution_stats tts_stat_quickexact{}; + constexpr time_to_solution_params tts_params_quickexact{exact_sidb_simulation_engine::QUICKEXACT}; time_to_solution(lyt, quicksim_params, tts_params_quickexact, &tts_stat_quickexact); CHECK(tts_stat_quickexact.algorithm == "QuickExact"); CHECK_THAT(tts_stat_quickexact.acc, Catch::Matchers::WithinAbs(0.0, 0.00001)); - CHECK_THAT(tts_stat_quickexact.time_to_solution, - Catch::Matchers::WithinAbs(std::numeric_limits::max(), 0.00001)); + CHECK(std::isinf(tts_stat_quickexact.time_to_solution)); CHECK_THAT(tts_stat_quickexact.mean_single_runtime, Catch::Matchers::WithinAbs(0.0, 0.00001)); #if (FICTION_ALGLIB_ENABLED) - time_to_solution_stats tts_stat_clustercomplete{}; - const time_to_solution_params tts_params_clustercomplete{exact_sidb_simulation_engine::CLUSTERCOMPLETE}; + time_to_solution_stats tts_stat_clustercomplete{}; + constexpr time_to_solution_params tts_params_clustercomplete{exact_sidb_simulation_engine::CLUSTERCOMPLETE}; time_to_solution(lyt, quicksim_params, tts_params_clustercomplete, &tts_stat_clustercomplete); CHECK(tts_stat_clustercomplete.algorithm == "ClusterComplete"); CHECK_THAT(tts_stat_clustercomplete.acc, Catch::Matchers::WithinAbs(0.0, 0.00001)); - CHECK_THAT(tts_stat_clustercomplete.time_to_solution, - Catch::Matchers::WithinAbs(std::numeric_limits::max(), 0.00001)); + CHECK(std::isinf(tts_stat_clustercomplete.time_to_solution)); CHECK_THAT(tts_stat_clustercomplete.mean_single_runtime, Catch::Matchers::WithinAbs(0.0, 0.00001)); #endif // FICTION_ALGLIB_ENABLED @@ -62,8 +62,7 @@ TEMPLATE_TEST_CASE("Basic time-to-solution test with varying layouts", "[time-to CHECK(tts_stat_exgs.algorithm == "ExGS"); CHECK_THAT(tts_stat_exgs.acc, Catch::Matchers::WithinAbs(0.0, 0.00001)); - CHECK_THAT(tts_stat_exgs.time_to_solution, - Catch::Matchers::WithinAbs(std::numeric_limits::max(), 0.00001)); + CHECK(std::isinf(tts_stat_exgs.time_to_solution)); CHECK_THAT(tts_stat_exgs.mean_single_runtime, Catch::Matchers::WithinAbs(0.0, 0.00001)); } @@ -76,11 +75,11 @@ TEMPLATE_TEST_CASE("Basic time-to-solution test with varying layouts", "[time-to lyt.assign_cell_type({10, 3, 0}, TestType::cell_type::NORMAL); lyt.assign_cell_type({12, 3, 0}, TestType::cell_type::NORMAL); - const sidb_simulation_parameters params{2, -0.30}; - const quicksim_params quicksim_params{params}; + constexpr sidb_simulation_parameters params{2, -0.30}; + const quicksim_params quicksim_params{params}; - const time_to_solution_params tts_params_exgs{exact_sidb_simulation_engine::EXGS}; - time_to_solution_stats tts_stat_exgs{}; + constexpr time_to_solution_params tts_params_exgs{exact_sidb_simulation_engine::EXGS}; + time_to_solution_stats tts_stat_exgs{}; time_to_solution(lyt, quicksim_params, tts_params_exgs, &tts_stat_exgs); CHECK(tts_stat_exgs.acc == 100.0); @@ -106,8 +105,8 @@ TEMPLATE_TEST_CASE("Basic time-to-solution test with varying layouts", "[time-to #if (FICTION_ALGLIB_ENABLED) - time_to_solution_stats tts_stat_clustercomplete{}; - const time_to_solution_params tts_params_clustercomplete{exact_sidb_simulation_engine::CLUSTERCOMPLETE}; + time_to_solution_stats tts_stat_clustercomplete{}; + constexpr time_to_solution_params tts_params_clustercomplete{exact_sidb_simulation_engine::CLUSTERCOMPLETE}; time_to_solution(lyt, quicksim_params, tts_params_clustercomplete, &tts_stat_clustercomplete); REQUIRE(tts_stat_clustercomplete.acc == 100); @@ -147,26 +146,26 @@ TEMPLATE_TEST_CASE("time-to-solution test with offset coordinates", "[time-to-so lyt.assign_cell_type({20, 9, 0}, TestType::cell_type::NORMAL); lyt.assign_cell_type({3, 3, 0}, TestType::cell_type::NORMAL); - const sidb_simulation_parameters params{2, -0.32}; + constexpr sidb_simulation_parameters params{2, -0.32}; quicksim_params quicksim_params{params}; quicksim_params.iteration_steps = 10; - const time_to_solution_params tts_params_exgs{exact_sidb_simulation_engine::EXGS}; - time_to_solution_stats tts_stat_exgs{}; + constexpr time_to_solution_params tts_params_exgs{exact_sidb_simulation_engine::EXGS}; + time_to_solution_stats tts_stat_exgs{}; time_to_solution(lyt, quicksim_params, tts_params_exgs, &tts_stat_exgs); CHECK(tts_stat_exgs.time_to_solution > 0.0); CHECK(tts_stat_exgs.mean_single_runtime > 0.0); - time_to_solution_stats tts_stat_quickexact{}; - const time_to_solution_params tts_params{exact_sidb_simulation_engine::QUICKEXACT}; + time_to_solution_stats tts_stat_quickexact{}; + constexpr time_to_solution_params tts_params{exact_sidb_simulation_engine::QUICKEXACT}; time_to_solution(lyt, quicksim_params, tts_params, &tts_stat_quickexact); CHECK(tts_stat_quickexact.time_to_solution > 0.0); CHECK(tts_stat_quickexact.mean_single_runtime > 0.0); - auto tts_calculated = std::numeric_limits::max(); + auto tts_calculated = std::numeric_limits::infinity(); if (tts_stat_quickexact.acc == 100) { @@ -198,17 +197,16 @@ TEMPLATE_TEST_CASE("time-to-solution test with simulation results", "[time-to-so lyt.assign_cell_type({10, 6, 0}, TestType::cell_type::NORMAL); lyt.assign_cell_type({12, 6, 0}, TestType::cell_type::NORMAL); - const sidb_simulation_parameters params{3, -0.32}; - const quicksim_params quicksim_params{params}; + constexpr sidb_simulation_parameters params{3, -0.32}; + const quicksim_params quicksim_params{params}; - const std::size_t number_of_repetitions = 100; + constexpr std::size_t number_of_repetitions = 100; std::vector> simulation_results_quicksim{}; simulation_results_quicksim.reserve(number_of_repetitions); for (auto i = 0u; i < number_of_repetitions; i++) { - const auto simulation_result = quicksim(lyt, quicksim_params); - if (simulation_result.has_value()) + if (const auto simulation_result = quicksim(lyt, quicksim_params); simulation_result.has_value()) { simulation_results_quicksim.push_back(simulation_result.value()); } @@ -247,8 +245,8 @@ TEMPLATE_TEST_CASE("time-to-solution test with fewer negatively charged SiDBs in lyt.assign_cell_type({3, 3, 0}, TestType::cell_type::NORMAL); lyt.assign_cell_type({6, 3, 0}, TestType::cell_type::NORMAL); - const sidb_simulation_parameters params{2, -0.05}; - const quicksim_params quicksim_params{params}; + constexpr sidb_simulation_parameters params{2, -0.05}; + const quicksim_params quicksim_params{params}; auto tts_stats_quicksim = time_to_solution_stats{}; diff --git a/test/io/dot_drawers.cpp b/test/io/dot_drawers.cpp index 1a1a41e623..ba6705873c 100644 --- a/test/io/dot_drawers.cpp +++ b/test/io/dot_drawers.cpp @@ -4,12 +4,13 @@ #include +#include "fiction/utils/version_info.hpp" #include "utils/blueprints/layout_blueprints.hpp" -#include "utils/version_info.hpp" #include #include #include +#include #include #include #include @@ -22,6 +23,9 @@ using namespace fiction; +namespace +{ + template void compare_dot_layout(const Lyt& lyt, const std::string_view& layout_print) { @@ -32,11 +36,13 @@ void compare_dot_layout(const Lyt& lyt, const std::string_view& layout_print) CHECK(dot_stream.str() == layout_print); } +} // namespace + TEST_CASE("Draw empty Cartesian layout", "[dot-drawers]") { using gate_layout = gate_level_layout>>>; - gate_layout layout{gate_layout::aspect_ratio{2, 2}}; + const gate_layout layout{gate_layout::aspect_ratio{2, 2}}; static const std::string layout_print = fmt::format("digraph layout {{ // Generated by {} ({})\n" @@ -221,7 +227,7 @@ TEST_CASE("Draw empty hexagonal layouts", "[dot-drawers]") using gate_layout = gate_level_layout>>>; - gate_layout layout{gate_layout::aspect_ratio{2, 2}}; + const gate_layout layout{gate_layout::aspect_ratio{2, 2}}; static const std::string layout_print = fmt::format("digraph layout {{ // Generated by {} ({})\n" @@ -271,7 +277,7 @@ TEST_CASE("Draw empty hexagonal layouts", "[dot-drawers]") using gate_layout = gate_level_layout>>>; - gate_layout layout{gate_layout::aspect_ratio{2, 2}}; + const gate_layout layout{gate_layout::aspect_ratio{2, 2}}; static const std::string layout_print = fmt::format("digraph layout {{ // Generated by {} ({})\n" @@ -322,7 +328,7 @@ TEST_CASE("Draw empty hexagonal layouts", "[dot-drawers]") using gate_layout = gate_level_layout>>>; - gate_layout layout{gate_layout::aspect_ratio{2, 2}}; + const gate_layout layout{gate_layout::aspect_ratio{2, 2}}; static const std::string layout_print = fmt::format("digraph layout {{ // Generated by {} ({})\n" @@ -372,7 +378,7 @@ TEST_CASE("Draw empty hexagonal layouts", "[dot-drawers]") using gate_layout = gate_level_layout>>>; - gate_layout layout{gate_layout::aspect_ratio{2, 2}}; + const gate_layout layout{gate_layout::aspect_ratio{2, 2}}; static const std::string layout_print = fmt::format("digraph layout {{ // Generated by {} ({})\n" diff --git a/test/io/write_fqca_layout.cpp b/test/io/write_fqca_layout.cpp index 84bf4cce74..007a59f7d7 100644 --- a/test/io/write_fqca_layout.cpp +++ b/test/io/write_fqca_layout.cpp @@ -4,13 +4,14 @@ #include +#include "fiction/utils/version_info.hpp" #include "utils/blueprints/layout_blueprints.hpp" -#include "utils/version_info.hpp" #include #include #include #include +#include #include #include @@ -51,7 +52,7 @@ TEST_CASE("Write empty FQCA layout", "[fqca]") { using qca_layout = cell_level_layout>>; - qca_layout layout{{2, 2, 1}, "empty layout"}; + const qca_layout layout{{2, 2, 1}, "empty layout"}; write_fqca_layout(layout, layout_stream, {true}); @@ -61,7 +62,7 @@ TEST_CASE("Write empty FQCA layout", "[fqca]") { using qca_layout = cell_level_layout>>; - qca_layout layout{{2, 2, 1}, "empty layout"}; + const qca_layout layout{{2, 2, 1}, "empty layout"}; write_fqca_layout(layout, layout_stream, {true}); @@ -93,7 +94,7 @@ TEST_CASE("Write empty FQCA layout", "[fqca]") { using qca_layout = cell_level_layout>>; - qca_layout layout{{2, 2, 1}, "empty layout"}; + const qca_layout layout{{2, 2, 1}, "empty layout"}; write_fqca_layout(layout, layout_stream, {false}); @@ -103,7 +104,7 @@ TEST_CASE("Write empty FQCA layout", "[fqca]") { using qca_layout = cell_level_layout>>; - qca_layout layout{{2, 2, 1}, "empty layout"}; + const qca_layout layout{{2, 2, 1}, "empty layout"}; write_fqca_layout(layout, layout_stream, {false}); diff --git a/test/io/write_qca_layout.cpp b/test/io/write_qca_layout.cpp index e46e9e5c74..14bb9e623a 100644 --- a/test/io/write_qca_layout.cpp +++ b/test/io/write_qca_layout.cpp @@ -4,12 +4,13 @@ #include -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include #include #include +#include #include #include @@ -23,7 +24,7 @@ TEST_CASE("Write empty QCAD layout", "[qcad]") { using qca_layout = cell_level_layout>>; - qca_layout layout{{2, 2, 1}, "empty layout"}; + const qca_layout layout{{2, 2, 1}, "empty layout"}; SECTION("with vias") { diff --git a/test/io/write_qll_layout.cpp b/test/io/write_qll_layout.cpp index 0216c1651a..a5d4fc422c 100644 --- a/test/io/write_qll_layout.cpp +++ b/test/io/write_qll_layout.cpp @@ -4,8 +4,8 @@ #include +#include "fiction/utils/version_info.hpp" #include "utils/blueprints/layout_blueprints.hpp" -#include "utils/version_info.hpp" #include #include @@ -14,6 +14,7 @@ #include #include +#include #include using namespace fiction; diff --git a/test/io/write_sqd_sim_result.cpp b/test/io/write_sqd_sim_result.cpp index d704737bf4..58b36762e2 100644 --- a/test/io/write_sqd_sim_result.cpp +++ b/test/io/write_sqd_sim_result.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -21,8 +22,6 @@ #include #include -#include - using namespace fiction; TEST_CASE("Utility function: any_to_string", "[sqd-sim-result]") diff --git a/test/io/write_svg_layout.cpp b/test/io/write_svg_layout.cpp index 4c312fdb37..8a3ed25f5b 100644 --- a/test/io/write_svg_layout.cpp +++ b/test/io/write_svg_layout.cpp @@ -5,7 +5,7 @@ #include #include -#include "utils/version_info.hpp" +#include "fiction/utils/version_info.hpp" #include #include diff --git a/test/layouts/bounding_box.cpp b/test/layouts/bounding_box.cpp index e27f8a12a9..8f048f7390 100644 --- a/test/layouts/bounding_box.cpp +++ b/test/layouts/bounding_box.cpp @@ -2,6 +2,7 @@ // Created by marcel on 13.01.22. // #include +#include #include "utils/blueprints/layout_blueprints.hpp" @@ -137,7 +138,7 @@ TEST_CASE("Update 2D cell-level bounding box", "[bounding-box]") TEMPLATE_TEST_CASE("2D bounding box for siqad layout", "[bounding-box]", sidb_cell_clk_lyt_siqad, sidb_111_cell_clk_lyt_siqad, sidb_100_cell_clk_lyt_siqad) { - SECTION("empyt layout") + SECTION("empty layout") { const TestType lyt{}; @@ -227,7 +228,7 @@ TEMPLATE_TEST_CASE("2D bounding box for siqad layout", "[bounding-box]", sidb_ce TEMPLATE_TEST_CASE("2D bounding box for siqad layout with atomic defect", "[bounding-box]", sidb_defect_cell_clk_lyt_siqad, sidb_111_cell_clk_lyt_siqad, sidb_defect_100_cell_clk_lyt_siqad) { - SECTION("empyt layout") + SECTION("empty layout") { const TestType lyt{}; @@ -272,7 +273,7 @@ TEMPLATE_TEST_CASE("2D bounding box for siqad layout with atomic defect", "[boun TEMPLATE_TEST_CASE("2D bounding box for layout with atomic defect", "[bounding-box]", sidb_defect_cell_clk_lyt) { - SECTION("empyt layout") + SECTION("empty layout") { const TestType lyt{}; @@ -318,7 +319,7 @@ TEMPLATE_TEST_CASE("2D bounding box for layout with atomic defect", "[bounding-b TEMPLATE_TEST_CASE("2D bounding box for cube layout with atomic defect", "[bounding-box]", sidb_cell_clk_lyt_cube, sidb_111_cell_clk_lyt_cube, sidb_100_cell_clk_lyt_cube) { - SECTION("empyt layout") + SECTION("empty layout") { const TestType lyt{}; @@ -368,7 +369,7 @@ TEMPLATE_TEST_CASE("2D bounding box for cube layout with atomic defect", "[bound lyt.assign_sidb_defect({-3, 0}, sidb_defect{}); lyt.assign_sidb_defect({2, 0}, sidb_defect{}); - const bounding_box_2d bb{static_cast(lyt)}; + const bounding_box_2d bb{static_cast(lyt)}; // NOLINT(cppcoreguidelines-slicing) const auto nw = bb.get_min(); const auto se = bb.get_max(); diff --git a/test/networks/technology_network.cpp b/test/networks/technology_network.cpp index 35e162f622..1840df28e2 100644 --- a/test/networks/technology_network.cpp +++ b/test/networks/technology_network.cpp @@ -14,7 +14,9 @@ #include #include #include +#include +#include #include using namespace fiction; @@ -453,7 +455,7 @@ TEST_CASE("hash nodes in technology network", "[technology-network]") CHECK(tec.size() == 8); // no structural hashing } -TEST_CASE("subsitute node by another", "[technology-network]") +TEST_CASE("substitute node by another", "[technology-network]") { technology_network tec{}; diff --git a/uv.lock b/uv.lock index 34d04fec6a..80c2330dac 100644 --- a/uv.lock +++ b/uv.lock @@ -15,13 +15,31 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, ] +[[package]] +name = "argcomplete" +version = "3.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/61/0b9ae6399dd4a58d8c1b1dc5a27d6f2808023d0b5dd3104bb99f45a33ff6/argcomplete-3.6.3.tar.gz", hash = "sha256:62e8ed4fd6a45864acc8235409461b72c9a28ee785a2011cc5eb78318786c89c", size = 73754, upload-time = "2025-10-20T03:33:34.741Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/f5/9373290775639cb67a2fce7f629a1c240dce9f12fe927bc32b2736e16dfc/argcomplete-3.6.3-py3-none-any.whl", hash = "sha256:f5007b3a600ccac5d25bbce33089211dfd49eab4a7718da3f10e3082525a92ce", size = 43846, upload-time = "2025-10-20T03:33:33.021Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + [[package]] name = "babel" -version = "2.17.0" +version = "2.18.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, + { url = "https://files.pythonhosted.org/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" }, ] [[package]] @@ -39,11 +57,11 @@ wheels = [ [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" }, ] [[package]] @@ -144,6 +162,40 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] +[[package]] +name = "colorlog" +version = "6.10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/61/f083b5ac52e505dfc1c624eafbf8c7589a0d7f32daa398d2e7590efa5fda/colorlog-6.10.1.tar.gz", hash = "sha256:eb4ae5cb65fe7fec7773c2306061a8e63e02efc2c72eba9d27b0fa23c94f1321", size = 17162, upload-time = "2025-10-16T16:14:11.978Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/c1/e419ef3723a074172b68aaa89c9f3de486ed4c2399e2dbd8113a4fdcaf9e/colorlog-6.10.1-py3-none-any.whl", hash = "sha256:2d7e8348291948af66122cff006c9f8da6255d224e7cf8e37d8de2df3bad8c9c", size = 11743, upload-time = "2025-10-16T16:14:10.512Z" }, +] + +[[package]] +name = "dependency-groups" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/55/f054de99871e7beb81935dea8a10b90cd5ce42122b1c3081d5282fdb3621/dependency_groups-1.3.1.tar.gz", hash = "sha256:78078301090517fd938c19f64a53ce98c32834dfe0dee6b88004a569a6adfefd", size = 10093, upload-time = "2025-05-02T00:34:29.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/c7/d1ec24fb280caa5a79b6b950db565dab30210a66259d17d5bb2b3a9f878d/dependency_groups-1.3.1-py3-none-any.whl", hash = "sha256:51aeaa0dfad72430fcfb7bcdbefbd75f3792e5919563077f30bc0d73f4493030", size = 8664, upload-time = "2025-05-02T00:34:27.085Z" }, +] + +[[package]] +name = "distlib" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, +] + [[package]] name = "docutils" version = "0.21.2" @@ -165,6 +217,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] +[[package]] +name = "execnet" +version = "2.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, +] + +[[package]] +name = "filelock" +version = "3.20.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" }, +] + +[[package]] +name = "humanize" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/66/a3921783d54be8a6870ac4ccffcd15c4dc0dd7fcce51c6d63b8c63935276/humanize-4.15.0.tar.gz", hash = "sha256:1dd098483eb1c7ee8e32eb2e99ad1910baefa4b75c3aff3a82f4d78688993b10", size = 83599, upload-time = "2025-12-20T20:16:13.19Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/7b/bca5613a0c3b542420cf92bd5e5fb8ebd5435ce1011a091f66bb7693285e/humanize-4.15.0-py3-none-any.whl", hash = "sha256:b1186eb9f5a9749cd9cb8565aee77919dd7c8d076161cf44d70e59e3301e1769", size = 132203, upload-time = "2025-12-20T20:16:11.67Z" }, +] + [[package]] name = "idna" version = "3.11" @@ -297,24 +376,18 @@ dependencies = [ { name = "z3-solver" }, ] -[package.optional-dependencies] -test = [ - { name = "pytest" }, -] - [package.dev-dependencies] build = [ { name = "scikit-build-core" }, { name = "setuptools-scm" }, ] dev = [ - { name = "breathe" }, + { name = "nox" }, { name = "pytest" }, + { name = "pytest-sugar" }, + { name = "pytest-xdist" }, { name = "scikit-build-core" }, { name = "setuptools-scm" }, - { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "sphinx-rtd-theme" }, - { name = "sphinx-tabs" }, ] docs = [ { name = "breathe" }, @@ -325,15 +398,12 @@ docs = [ ] test = [ { name = "pytest" }, + { name = "pytest-sugar" }, + { name = "pytest-xdist" }, ] [package.metadata] -requires-dist = [ - { name = "mnt-pyfiction", marker = "extra == 'test'" }, - { name = "pytest", marker = "extra == 'test'", specifier = ">=7.2" }, - { name = "z3-solver", specifier = ">=4.8.0" }, -] -provides-extras = ["test"] +requires-dist = [{ name = "z3-solver", specifier = ">=4.8.0" }] [package.metadata.requires-dev] build = [ @@ -341,13 +411,12 @@ build = [ { name = "setuptools-scm", specifier = ">=8.1" }, ] dev = [ - { name = "breathe", specifier = "==4.36.0" }, - { name = "pytest", specifier = ">=8.3.3" }, + { name = "nox", specifier = ">=2025.11.12" }, + { name = "pytest", specifier = ">=9.0.1" }, + { name = "pytest-sugar", specifier = ">=1.1.1" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, { name = "scikit-build-core", specifier = ">=0.11.0" }, { name = "setuptools-scm", specifier = ">=8.1" }, - { name = "sphinx", marker = "python_full_version >= '3.11'", specifier = "==8.2.3" }, - { name = "sphinx-rtd-theme", specifier = "==3.0.2" }, - { name = "sphinx-tabs", specifier = "==3.4.7" }, ] docs = [ { name = "breathe", specifier = "==4.36.0" }, @@ -356,24 +425,56 @@ docs = [ { name = "sphinx-rtd-theme", specifier = "==3.0.2" }, { name = "sphinx-tabs", specifier = "==3.4.7" }, ] -test = [{ name = "pytest", specifier = ">=8.3.3" }] +test = [ + { name = "pytest", specifier = ">=9.0.1" }, + { name = "pytest-sugar", specifier = ">=1.1.1" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, +] + +[[package]] +name = "nox" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argcomplete" }, + { name = "attrs" }, + { name = "colorlog" }, + { name = "dependency-groups" }, + { name = "humanize" }, + { name = "packaging" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/a8/e169497599266d176832e2232c08557ffba97eef87bf8a18f9f918e0c6aa/nox-2025.11.12.tar.gz", hash = "sha256:3d317f9e61f49d6bde39cf2f59695bb4e1722960457eee3ae19dacfe03c07259", size = 4030561, upload-time = "2025-11-12T18:39:03.319Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/34/434c594e0125a16b05a7bedaea33e63c90abbfbe47e5729a735a8a8a90ea/nox-2025.11.12-py3-none-any.whl", hash = "sha256:707171f9f63bc685da9d00edd8c2ceec8405b8e38b5fb4e46114a860070ef0ff", size = 74447, upload-time = "2025-11-12T18:39:01.575Z" }, +] [[package]] name = "packaging" -version = "25.0" +version = "26.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] [[package]] name = "pathspec" -version = "0.12.1" +version = "1.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, ] [[package]] @@ -412,6 +513,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, ] +[[package]] +name = "pytest-sugar" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "termcolor" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/4e/60fed105549297ba1a700e1ea7b828044842ea27d72c898990510b79b0e2/pytest-sugar-1.1.1.tar.gz", hash = "sha256:73b8b65163ebf10f9f671efab9eed3d56f20d2ca68bda83fa64740a92c08f65d", size = 16533, upload-time = "2025-08-23T12:19:35.737Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/d5/81d38a91c1fdafb6711f053f5a9b92ff788013b19821257c2c38c1e132df/pytest_sugar-1.1.1-py3-none-any.whl", hash = "sha256:2f8319b907548d5b9d03a171515c1d43d2e38e32bd8182a1781eb20b43344cc8", size = 11440, upload-time = "2025-08-23T12:19:34.894Z" }, +] + +[[package]] +name = "pytest-xdist" +version = "3.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" }, +] + [[package]] name = "requests" version = "2.32.5" @@ -427,13 +554,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "roman-numerals" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/f9/41dc953bbeb056c17d5f7a519f50fdf010bd0553be2d630bc69d1e022703/roman_numerals-4.1.0.tar.gz", hash = "sha256:1af8b147eb1405d5839e78aeb93131690495fe9da5c91856cb33ad55a7f1e5b2", size = 9077, upload-time = "2025-12-17T18:25:34.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/54/6f679c435d28e0a568d8e8a7c0a93a09010818634c3c3907fc98d8983770/roman_numerals-4.1.0-py3-none-any.whl", hash = "sha256:647ba99caddc2cc1e55a51e4360689115551bf4476d90e8162cf8c345fe233c7", size = 7676, upload-time = "2025-12-17T18:25:33.098Z" }, +] + [[package]] name = "roman-numerals-py" -version = "3.1.0" +version = "4.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" } +dependencies = [ + { name = "roman-numerals", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/b5/de96fca640f4f656eb79bbee0e79aeec52e3e0e359f8a3e6a0d366378b64/roman_numerals_py-4.1.0.tar.gz", hash = "sha256:f5d7b2b4ca52dd855ef7ab8eb3590f428c0b1ea480736ce32b01fef2a5f8daf9", size = 4274, upload-time = "2025-12-17T18:25:41.153Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" }, + { url = "https://files.pythonhosted.org/packages/27/2c/daca29684cbe9fd4bc711f8246da3c10adca1ccc4d24436b17572eb2590e/roman_numerals_py-4.1.0-py3-none-any.whl", hash = "sha256:553114c1167141c1283a51743759723ecd05604a1b6b507225e91dc1a6df0780", size = 4547, upload-time = "2025-12-17T18:25:40.136Z" }, ] [[package]] @@ -453,11 +592,11 @@ wheels = [ [[package]] name = "setuptools" -version = "80.9.0" +version = "80.10.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +sdist = { url = "https://files.pythonhosted.org/packages/76/95/faf61eb8363f26aa7e1d762267a8d602a1b26d4f3a1e758e92cb3cb8b054/setuptools-80.10.2.tar.gz", hash = "sha256:8b0e9d10c784bf7d262c4e5ec5d4ec94127ce206e8738f29a437945fbc219b70", size = 1200343, upload-time = "2026-01-25T22:38:17.252Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, + { url = "https://files.pythonhosted.org/packages/94/b8/f1f62a5e3c0ad2ff1d189590bfa4c46b4f3b6e49cef6f26c6ee4e575394d/setuptools-80.10.2-py3-none-any.whl", hash = "sha256:95b30ddfb717250edb492926c92b5221f7ef3fbcc2b07579bcd4a27da21d0173", size = 1064234, upload-time = "2026-01-25T22:38:15.216Z" }, ] [[package]] @@ -643,52 +782,66 @@ wheels = [ ] [[package]] -name = "tomli" -version = "2.3.0" +name = "termcolor" +version = "3.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, - { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, - { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, - { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, - { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, - { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, - { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, - { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, - { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, - { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, - { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, - { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, - { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, - { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, - { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, - { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, - { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, - { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, - { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, - { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, - { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, - { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, - { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, - { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, - { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, - { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, - { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, - { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, - { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, - { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, - { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, - { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, - { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, - { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, - { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, - { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, - { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, - { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, - { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, - { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, +] + +[[package]] +name = "tomli" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] @@ -702,11 +855,26 @@ wheels = [ [[package]] name = "urllib3" -version = "2.6.0" +version = "2.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.36.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/43/554c2569b62f49350597348fc3ac70f786e3c32e7f19d266e19817812dd3/urllib3-2.6.0.tar.gz", hash = "sha256:cb9bcef5a4b345d5da5d145dc3e30834f58e8018828cbc724d30b4cb7d4d49f1", size = 432585, upload-time = "2025-12-05T15:08:47.885Z" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/1a/9ffe814d317c5224166b23e7c47f606d6e473712a2fad0f704ea9b99f246/urllib3-2.6.0-py3-none-any.whl", hash = "sha256:c90f7a39f716c572c4e3e58509581ebd83f9b59cced005b7db7ad2d22b0db99f", size = 131083, upload-time = "2025-12-05T15:08:45.983Z" }, + { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, ] [[package]] diff --git a/libs/.dcignore b/vendors/.dcignore similarity index 50% rename from libs/.dcignore rename to vendors/.dcignore index 41aa5cefad..53ce88166d 100644 --- a/libs/.dcignore +++ b/vendors/.dcignore @@ -1,2 +1,2 @@ -* +* ** diff --git a/vendors/CMakeLists.txt b/vendors/CMakeLists.txt new file mode 100644 index 0000000000..dc8b47fefd --- /dev/null +++ b/vendors/CMakeLists.txt @@ -0,0 +1,206 @@ +# Prevent CMake from finding a system-installed fmt package to avoid version conflicts. +# We use the bundled fmt version from alice/mockturtle submodules. +set(CMAKE_DISABLE_FIND_PACKAGE_fmt TRUE) + +# Include Dependencies (FetchContent) configuration +include(${PROJECT_SOURCE_DIR}/cmake/Dependencies.cmake) + +################################################################################ +# Fetched Dependencies (Managed via FetchContent in Dependencies.cmake) +################################################################################ + +# alice +target_link_system_libraries(libfiction INTERFACE + $ + $ +) +install(DIRECTORY ${alice_SOURCE_DIR}/include/ DESTINATION include) + +# mockturtle +target_link_system_libraries(libfiction INTERFACE + $ + $ +) +install(DIRECTORY ${mockturtle_SOURCE_DIR}/include/ DESTINATION include) + +# nlohmann_json +target_link_system_libraries(libfiction INTERFACE + $ + $ +) +install(DIRECTORY ${nlohmann_json_SOURCE_DIR}/include/ DESTINATION include) + +# parallel-hashmap +target_include_directories(libfiction SYSTEM INTERFACE + $ +) +install(DIRECTORY ${parallel-hashmap_SOURCE_DIR}/parallel_hashmap DESTINATION include) + +# tinyxml2 +set_property(TARGET tinyxml2 PROPERTY POSITION_INDEPENDENT_CODE ON) +target_link_system_libraries(libfiction INTERFACE + $ + $ +) +install(FILES ${tinyxml2_SOURCE_DIR}/tinyxml2.h DESTINATION include) + +# ALGLIB +option(FICTION_ALGLIB "Automatically download, include, and utilize ALGLIB by the ALGLIB project.") +if (FICTION_ALGLIB) + target_compile_definitions(libfiction INTERFACE FICTION_ALGLIB_ENABLED) + target_include_directories(libfiction SYSTEM INTERFACE + $ + ) + target_link_system_libraries(libfiction INTERFACE + $ + $ + ) + install(DIRECTORY ${alglib-cmake_SOURCE_DIR}/src/cpp/src/headers/ DESTINATION include/alglib) +endif () + + +################################################################################ +# Vendored Dependencies (Shipped with fiction in vendors/) +################################################################################ + +# Undirected Graph (Header-only) +target_include_directories(libfiction SYSTEM INTERFACE + $ +) +target_link_libraries(libfiction INTERFACE + $ +) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/undirected_graph/source/ DESTINATION include/undirected_graph) + +# Combinations (Header-only) +target_include_directories(libfiction SYSTEM INTERFACE + $ +) +target_link_libraries(libfiction INTERFACE + $ +) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/combinations/ DESTINATION include/combinations FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") + +# Graph Coloring +add_subdirectory(graph-coloring EXCLUDE_FROM_ALL) +target_include_directories(libfiction SYSTEM INTERFACE + $ +) +target_link_system_libraries(libfiction INTERFACE + $ + $ +) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/graph-coloring/Header/ DESTINATION include/graph-coloring) + +# Mugen (Python/C++ Hybrid) +if (NOT WIN32) + option(FICTION_ENABLE_MUGEN "Enable the usage of Mugen, a Python3 library by Winston Haaswijk for FCN one-pass synthesis, and its dependencies" OFF) + + if (FICTION_ENABLE_MUGEN) + target_compile_definitions(libfiction INTERFACE MUGEN) + + if (NOT APPLE) + message(STATUS "Building glucose for Mugen") + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/glucose-syrup + COMMAND make + COMMAND mv glucose-syrup ${PROJECT_BINARY_DIR}/glucose-syrup + COMMAND make clean + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/mugen/glucose-syrup-4.1/parallel/) + + add_custom_target(glucose_syrup ALL DEPENDS ${PROJECT_BINARY_DIR}/glucose-syrup) + endif () + + target_link_system_libraries(libfiction INTERFACE pybind11::embed) + + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/mugen/mugen_info.hpp.in utils/mugen_info.hpp) + target_include_directories(libfiction INTERFACE $) + + message(STATUS "Mugen was enabled. Please note that it relies on the Python3 libraries 'graphviz', 'PySAT v0.1.6.dev6', and 'wrapt_timeout_decorator' to be properly installed") + endif () +endif () + +################################################################################ +# System / Other Dependencies +################################################################################ + +# Z3 +option(FICTION_Z3 "Find, include, and utilize the Z3 solver by Microsoft Research. It needs to be installed manually." OFF) +if (FICTION_Z3) + message(STATUS "Usage of the Z3 solver was enabled. Make sure that it is installed on your system!") + list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/") + find_package(Z3 4.8.5) + + if (Z3_FOUND) + message(STATUS "Found Z3 solver version: ${Z3_VERSION_STRING}") + find_package(Threads REQUIRED) + target_compile_definitions(libfiction INTERFACE FICTION_Z3_SOLVER) + target_include_directories(libfiction SYSTEM INTERFACE ${Z3_CXX_INCLUDE_DIRS}) + target_link_system_libraries(libfiction INTERFACE ${Z3_LIBRARIES}) + if (APPLE) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") + endif () + else () + message(SEND_ERROR "Z3 solver could not be detected") + endif () +endif () + +# TBB +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") + list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/") + find_package(TBB) + if (TBB_FOUND) + if (${TBB_VERSION_MAJOR} GREATER_EQUAL 2021 AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.0.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0) + target_compile_definitions(libfiction INTERFACE _GLIBCXX_USE_TBB_PAR_BACKEND=0) + message(STATUS "TBB version ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} detected. Disabling parallel policies for GCC 9 and 10 due to incompatible interfaces.") + else () + message(STATUS "Found TBB version: ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") + message(STATUS "Parallel STL algorithms are enabled") + endif () + else () + message(STATUS "Found TBB version: ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") + message(STATUS "Parallel STL algorithms are enabled") + endif () + target_include_directories(libfiction INTERFACE ${TBB_INCLUDE_DIRS}) + target_link_system_libraries(libfiction INTERFACE TBB::tbb) + else () + message(STATUS "Parallel STL algorithms are disabled. If you want to use them, please install TBB and set the TBB_ROOT_DIR, TBB_INCLUDE_DIR, and TBB_LIBRARY variables accordingly.") + endif () +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + message(STATUS "Parallel STL algorithms are enabled on MSVC by default") +endif () + +# jemalloc +option(FICTION_ENABLE_JEMALLOC "Automatically download and link jemalloc by Jason Evans.") +if (FICTION_ENABLE_JEMALLOC) + if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + message(STATUS "Automatic downloading and linking of jemalloc is not supported on Windows. Make sure that it is installed on your system!") + find_package(jemalloc CONFIG REQUIRED) + target_link_libraries(libfiction INTERFACE jemalloc) + else () + find_package(jemalloc) + if (NOT jemalloc_FOUND) + message(STATUS "Building and installing jemalloc will proceed automatically") + include(${PROJECT_SOURCE_DIR}/cmake/FetchJemalloc.cmake) + # The imported target jemalloc already has INTERFACE_INCLUDE_DIRECTORIES set + if (APPLE) + target_link_system_libraries(libfiction INTERFACE jemalloc c++ dl pthread m) + elseif (UNIX) + target_link_system_libraries(libfiction INTERFACE jemalloc stdc++ dl pthread m) + else () + message(FATAL_ERROR "Unsupported environment") + endif () + else () + # System jemalloc found, use the variables set by Findjemalloc.cmake + if (APPLE) + target_link_system_libraries(libfiction INTERFACE ${JEMALLOC_LIBRARIES} c++ dl pthread m) + elseif (UNIX) + target_link_system_libraries(libfiction INTERFACE ${JEMALLOC_LIBRARIES} stdc++ dl pthread m) + else () + message(FATAL_ERROR "Unsupported environment") + endif () + target_include_directories(libfiction SYSTEM INTERFACE ${JEMALLOC_INCLUDE_DIRS}) + endif () + endif () +endif () diff --git a/libs/combinations/README.md b/vendors/combinations/README.md similarity index 100% rename from libs/combinations/README.md rename to vendors/combinations/README.md diff --git a/libs/combinations/combinations.h b/vendors/combinations/combinations.h similarity index 100% rename from libs/combinations/combinations.h rename to vendors/combinations/combinations.h diff --git a/libs/combinations/combinations.html b/vendors/combinations/combinations.html similarity index 99% rename from libs/combinations/combinations.html rename to vendors/combinations/combinations.html index 48eabb1916..d5ff3913c4 100644 --- a/libs/combinations/combinations.html +++ b/vendors/combinations/combinations.html @@ -1239,7 +1239,7 @@

Specification

holds the values not in the current permutation. If f returns true then returns immediately without permuting the sequence any futher. Otherwise, after the last call to f, and prior to returning, -the range [first, last) is restored to its original order. +the range [first, last) is restored to its original order. [Note: If f always returns false it is called count_each_reversible_circular_permutation(first, mid, last) times. — end note] @@ -1930,7 +1930,7 @@

Implementation

for_each_combination(first, mid, last, detail::circular_permutation<Function&, BidirIter>(f, std::distance(first, mid))); return f; -} +} template <class UInt> UInt @@ -2196,7 +2196,7 @@

Implementation

detail::reversible_permutation<Function&, D>(f, std::distance(first, mid))); return f; -} +} template <class UInt> UInt @@ -2280,7 +2280,7 @@

Implementation

for_each_combination(first, mid, last, detail::reverse_circular_permutation<Function&, BidirIter>(f, std::distance(first, mid))); return f; -} +} template <class UInt> UInt diff --git a/libs/graph-coloring/CMakeLists.txt b/vendors/graph-coloring/CMakeLists.txt similarity index 95% rename from libs/graph-coloring/CMakeLists.txt rename to vendors/graph-coloring/CMakeLists.txt index 16ef4b0396..e9cce8ce7b 100644 --- a/libs/graph-coloring/CMakeLists.txt +++ b/vendors/graph-coloring/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.9) +CMAKE_MINIMUM_REQUIRED(VERSION 3.11) PROJECT(graph-coloring) diff --git a/libs/graph-coloring/Header/coloring_algorithm.hpp b/vendors/graph-coloring/Header/coloring_algorithm.hpp similarity index 100% rename from libs/graph-coloring/Header/coloring_algorithm.hpp rename to vendors/graph-coloring/Header/coloring_algorithm.hpp diff --git a/libs/graph-coloring/Header/dsatur.hpp b/vendors/graph-coloring/Header/dsatur.hpp similarity index 97% rename from libs/graph-coloring/Header/dsatur.hpp rename to vendors/graph-coloring/Header/dsatur.hpp index 2798ef600b..134b9448c9 100644 --- a/libs/graph-coloring/Header/dsatur.hpp +++ b/vendors/graph-coloring/Header/dsatur.hpp @@ -13,7 +13,7 @@ class Dsatur : public GraphColor { public: /* Constructors */ - explicit Dsatur(const map>& g) : GraphColor(g){}; + explicit Dsatur(const map>& g) : GraphColor(g) {}; /* Mutators */ map color() override; diff --git a/libs/graph-coloring/Header/hybrid_dsatur.hpp b/vendors/graph-coloring/Header/hybrid_dsatur.hpp similarity index 100% rename from libs/graph-coloring/Header/hybrid_dsatur.hpp rename to vendors/graph-coloring/Header/hybrid_dsatur.hpp diff --git a/vendors/graph-coloring/Header/hybrid_lmxrlf.hpp b/vendors/graph-coloring/Header/hybrid_lmxrlf.hpp new file mode 100644 index 0000000000..2d0dc68978 --- /dev/null +++ b/vendors/graph-coloring/Header/hybrid_lmxrlf.hpp @@ -0,0 +1,42 @@ +#ifndef _HYBRID_LMXRLF_HPP_ +#define _HYBRID_LMXRLF_HPP_ + +#include "coloring_algorithm.hpp" +#include "lmxrlf.hpp" +#include "tabucol.hpp" + +using GraphColoring::GraphColor; +using GraphColoring::Lmxrlf; +using GraphColoring::Tabucol; + +namespace GraphColoring +{ +class HybridLmxrlf : public GraphColor +{ + private: + int condition; + map> get_subgraph(map coloring); + + public: + /* Constructors */ + explicit HybridLmxrlf(map> graph, int condition = 0) : GraphColor(graph) + { + this->condition = condition; + } + + /* Mutators */ + void set_condition(int condition) + { + this->condition = condition; + } + map color(); + + /* Accessors */ + string get_algorithm() + { + return "Hybrid LMXRLF"; + } +}; +} // namespace GraphColoring + +#endif //_HYBRID_LMXRLF_HPP_ diff --git a/libs/graph-coloring/Header/lmxrlf.hpp b/vendors/graph-coloring/Header/lmxrlf.hpp similarity index 100% rename from libs/graph-coloring/Header/lmxrlf.hpp rename to vendors/graph-coloring/Header/lmxrlf.hpp diff --git a/libs/graph-coloring/Header/mcs.hpp b/vendors/graph-coloring/Header/mcs.hpp similarity index 100% rename from libs/graph-coloring/Header/mcs.hpp rename to vendors/graph-coloring/Header/mcs.hpp diff --git a/libs/graph-coloring/Header/tabucol.hpp b/vendors/graph-coloring/Header/tabucol.hpp similarity index 100% rename from libs/graph-coloring/Header/tabucol.hpp rename to vendors/graph-coloring/Header/tabucol.hpp diff --git a/libs/graph-coloring/LICENSE.md b/vendors/graph-coloring/LICENSE.md similarity index 99% rename from libs/graph-coloring/LICENSE.md rename to vendors/graph-coloring/LICENSE.md index 60f2e09a1c..0c6de74800 100755 --- a/libs/graph-coloring/LICENSE.md +++ b/vendors/graph-coloring/LICENSE.md @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/libs/graph-coloring/README.md b/vendors/graph-coloring/README.md similarity index 96% rename from libs/graph-coloring/README.md rename to vendors/graph-coloring/README.md index 0ca80c94ba..41d557c566 100755 --- a/libs/graph-coloring/README.md +++ b/vendors/graph-coloring/README.md @@ -10,8 +10,8 @@ If you have questions or reqeusts about this repository please feel free to [rea This project has two primary uses: -* As an executable for finding the chromatic number for an input graph (in edge list or edge matrix format) -* As a library for finding the particular coloring of an input graph (represented as a `map>` edge list) +- As an executable for finding the chromatic number for an input graph (in edge list or edge matrix format) +- As a library for finding the particular coloring of an input graph (represented as a `map>` edge list) In either of these cases, the project requires the use of the [CMake](https://cmake.org/) build system to allow for easy integration into other projects. To generate an executable which can color an input graph file, simply run the following commands from the project root. @@ -22,9 +22,9 @@ make This will generate a `color` executable which can be used to find the chromatic number of an input graph. You can also test that the system is currently running by executing the `color-test` executable which will also be generated by the above commands. -If you want to use these algortihms as a library for another project that uses CMake, you can add it as [a subproject dependency](https://codingnest.com/basic-cmake-part-2/) to your project. +If you want to use these algortihms as a library for another project that uses CMake, you can add it as [a subproject dependency](https://codingnest.com/basic-cmake-part-2/) to your project. -## Graph Coloring Library +## Graph Coloring Library **The algorithms hybridLMXRLF and hybridDSATUR are currently broken, see #29** @@ -56,7 +56,7 @@ In order to aid in validating the colorings that are created using these algorit algorithm->verify(); Some of the algorithms that are available here are heuristics with variable conditions. While the algorithm objects are constructed with a reasonable default value, you can modifiy the condition variable of any algorithms that have them with a `set_condition(int)` function. - + algorithm->set_condition(new_value); The algorithm object has an internal flag to checking if the graph has been colored, and this flag is reset every time the underlying graph is reset or modified. You should check this flag before you call the `color()` function to make sure that the color map you receive is valid. @@ -75,7 +75,7 @@ You can also write the entire graph into a .dot file using the `write_graph(stri This package was refactored to be more easily extended. I encourage you to write new coloring algorithms utilizing this code as a base, and simply ask that you include this repo's LICENSE with any of your code, and cite/acknowledge this repo in any publications. If you do utilize this code in a publication or project, or you would like to contribute a new algorithm please reach out to me (brrcrites@gmail.com). -## Available Algorithms: +## Available Algorithms: Five coloring algorithms are currently provided in this package (See reference papers for descriptions): @@ -109,4 +109,3 @@ Edge Matrix Input File Format: 1 indicates an edge between column node and row node 0 indicates a lack of an edge - diff --git a/libs/graph-coloring/Source/coloring_algorithm.cpp b/vendors/graph-coloring/Source/coloring_algorithm.cpp similarity index 91% rename from libs/graph-coloring/Source/coloring_algorithm.cpp rename to vendors/graph-coloring/Source/coloring_algorithm.cpp index d8037d3b13..5b9e5591d4 100644 --- a/libs/graph-coloring/Source/coloring_algorithm.cpp +++ b/vendors/graph-coloring/Source/coloring_algorithm.cpp @@ -21,7 +21,10 @@ GraphColoring::GraphColor::GraphColor() GraphColoring::GraphColor::GraphColor(const map>& graph) { this->graph = graph; - for (auto& itr : graph) { this->graph_colors[itr.first] = -1; } + for (auto& itr : graph) + { + this->graph_colors[itr.first] = -1; + } } GraphColoring::GraphColor::~GraphColor() = default; @@ -70,7 +73,10 @@ void GraphColoring::GraphColor::print_chromatic() void GraphColoring::GraphColor::print_coloring() { std::cout << "----------" << this->get_algorithm() << " Colorings----------" << endl; - for (auto& graph_color : graph_colors) { std::cout << graph_color.first << " " << graph_color.second << endl; } + for (auto& graph_color : graph_colors) + { + std::cout << graph_color.first << " " << graph_color.second << endl; + } } int GraphColoring::GraphColor::get_num_colors() diff --git a/vendors/graph-coloring/Source/dsatur.cpp b/vendors/graph-coloring/Source/dsatur.cpp new file mode 100644 index 0000000000..079fb98753 --- /dev/null +++ b/vendors/graph-coloring/Source/dsatur.cpp @@ -0,0 +1,151 @@ + +#include "../Header/dsatur.hpp" + +#include + +#include + +using std::cerr; +using std::cout; +using std::endl; + +map GraphColoring::Dsatur::color() +{ + if (this->graph.size() == 0) + { + this->graph_colors = map(); + return map(); + } + + vector todo; + string max_degree = ""; + int degree = -1; + + // find maximal degree vertex to color first and color with 0 + for (map>::iterator i = this->graph.begin(); i != this->graph.end(); i++) + { + if ((int)i->second.size() > degree) + { + degree = i->second.size(); + max_degree = i->first; + } + } + if (max_degree == "") + { + cerr << "Error: Could not find a max degree node in the graph (reason unknown)" << endl; + this->graph_colors = map(); + return map(); + } + this->graph_colors[max_degree] = 0; + + // Create saturation_level so that we can see which graph nodes have the + // highest saturation without having to scan through the entire graph + // each time + map saturation_level; + + // Add all nodes and set their saturation level to 0 + for (map>::iterator i = this->graph.begin(); i != this->graph.end(); i++) + { + saturation_level[i->first] = 0; + } + + // For the single node that has been colored, increment its neighbors so + // that their current saturation level is correct + for (int i = 0; i < this->graph[max_degree].size(); i++) + { + saturation_level[this->graph[max_degree][i]] += 1; + } + + // Set the saturation level of the already completed node to -infinity so + // that it is not chosen and recolored + saturation_level[max_degree] = INT_MIN; + + // Populate the todo list with the rest of the vertices that need to be colored + for (map>::iterator i = this->graph.begin(); i != this->graph.end(); i++) + { + if (i->first != max_degree) + { + this->graph_colors[i->first] = -1; + todo.push_back(i->first); + } + } + + // Color all the remaining nodes in the todo list + while (!todo.empty()) + { + int saturation = -1; + string saturation_name = ""; + vector saturation_colors; + // Find the vertex with the highest saturation level, since we keep the + // saturation levels along the way we can do this in a single pass + for (map::iterator i = saturation_level.begin(); i != saturation_level.end(); i++) + { + // Find the highest saturated node and keep its name and neighbors colors + if (i->second > saturation) + { + saturation = i->second; + saturation_name = i->first; + + // Since we're in this loop it means we've found a new most saturated + // node, which means we need to clear the old list of neighbors colors + // and replace it with the new highest saturated nodes neighbors colors + // Since uncolored nodes are given a -1, we can add all neighbors and + // start the check for lowest available color at greater than 0 + saturation_colors.clear(); + for (int j = 0; j < this->graph[i->first].size(); j++) + { + saturation_colors.push_back(this->graph_colors[this->graph[i->first][j]]); + } + } + } + if (saturation_name == "") + { + cerr << "Error: Could not find a max saturated node in the graph (reason unknown)" << endl; + this->graph_colors = map(); + return graph_colors; + } + + // We now know the most saturated node, so we remove it from the todo list + for (vector::iterator itr = todo.begin(); itr != todo.end(); itr++) + { + if ((*itr) == saturation_name) + { + todo.erase(itr); + break; + } + } + + // Find the lowest color that is not being used by any of the most saturated + // nodes neighbors, then color the most saturated node + int lowest_color = 0; + int done = 0; + while (!done) + { + done = 1; + for (unsigned i = 0; i < saturation_colors.size(); i++) + { + if (saturation_colors[i] == lowest_color) + { + lowest_color += 1; + done = 0; + } + } + } + this->graph_colors[saturation_name] = lowest_color; + + // Since we have colored another node, that nodes neighbors have now + // become more saturated, so we increase each ones saturation level + // However we first check that that node has not already been colored + //(This check is only necessary for enormeous test cases, but is + // included here for robustness) + for (int i = 0; i < this->graph[saturation_name].size(); i++) + { + if (saturation_level[this->graph[saturation_name][i]] != INT_MIN) + { + saturation_level[this->graph[saturation_name][i]] += 1; + } + } + saturation_level[saturation_name] = INT_MIN; + } + return this->graph_colors; +} diff --git a/libs/graph-coloring/Source/hybrid_dsatur.cpp b/vendors/graph-coloring/Source/hybrid_dsatur.cpp similarity index 100% rename from libs/graph-coloring/Source/hybrid_dsatur.cpp rename to vendors/graph-coloring/Source/hybrid_dsatur.cpp diff --git a/libs/graph-coloring/Source/hybrid_lmxrlf.cpp b/vendors/graph-coloring/Source/hybrid_lmxrlf.cpp similarity index 58% rename from libs/graph-coloring/Source/hybrid_lmxrlf.cpp rename to vendors/graph-coloring/Source/hybrid_lmxrlf.cpp index aa70aea5dd..3c023014ec 100644 --- a/libs/graph-coloring/Source/hybrid_lmxrlf.cpp +++ b/vendors/graph-coloring/Source/hybrid_lmxrlf.cpp @@ -1,26 +1,30 @@ #include "../Header/hybrid_lmxrlf.hpp" -map GraphColoring::HybridLmxrlf::color() { - if(this->graph.size() == 0) { - this->graph_colors = map(); - return map(); +map GraphColoring::HybridLmxrlf::color() +{ + if (this->graph.size() == 0) + { + this->graph_colors = map(); + return map(); } Lmxrlf* lmxrlf_graph = new Lmxrlf(this->graph, this->condition); - this->graph_colors = lmxrlf_graph->color(); - if(!lmxrlf_graph->is_valid()) { + this->graph_colors = lmxrlf_graph->color(); + if (!lmxrlf_graph->is_valid()) + { // TODO(brrcrites): consider we should probably reset the graph to uncolored // and then return that rather than an empty color map - this->graph_colors = map(); - return map(); + this->graph_colors = map(); + return map(); } int largest = this->get_num_colors(); Tabucol* tabu_graph = new Tabucol(this->graph, largest); - map best = this->graph_colors; - map tabu_color = tabu_graph->color(); - while(tabu_graph->is_valid()) { + map best = this->graph_colors; + map tabu_color = tabu_graph->color(); + while (tabu_graph->is_valid()) + { best = tabu_color; // TODO(brrcrites): in theory this can go to 0 or negative, we should probably // put in some corrective measure to stop that @@ -31,4 +35,3 @@ map GraphColoring::HybridLmxrlf::color() { this->graph_colors = best; return this->graph_colors; } - diff --git a/libs/graph-coloring/Source/lmxrlf.cpp b/vendors/graph-coloring/Source/lmxrlf.cpp similarity index 96% rename from libs/graph-coloring/Source/lmxrlf.cpp rename to vendors/graph-coloring/Source/lmxrlf.cpp index c3ebe1e64d..e7e350b9bb 100644 --- a/libs/graph-coloring/Source/lmxrlf.cpp +++ b/vendors/graph-coloring/Source/lmxrlf.cpp @@ -25,7 +25,10 @@ vector GraphColoring::Lmxrlf::get_independent(const vector& set) for (auto& i : set) { delta.push_back(i); - for (auto& j : graph[i]) { delta.push_back(j); } + for (auto& j : graph[i]) + { + delta.push_back(j); + } } vector ret; for (auto& i : graph) @@ -235,7 +238,8 @@ map GraphColoring::Lmxrlf::lmxrlf_alg(const int endcond) { Color += 1; } - do { + do + { int global_iterations; if (Color == 0) { @@ -279,7 +283,10 @@ map GraphColoring::Lmxrlf::lmxrlf_alg(const int endcond) { // remove random vertices from S_star vector S_star = list_of_best_solution; - while (uncolored_neighbor(S_star).empty()) { S_star.erase(S_star.begin() + (rand() % S_star.size())); } + while (uncolored_neighbor(S_star).empty()) + { + S_star.erase(S_star.begin() + (rand() % S_star.size())); + } // Add randomly vertices which do not have colored neighbors to S_star vector ucn = uncolored_neighbor(S_star); while (!ucn.empty()) @@ -343,7 +350,10 @@ map GraphColoring::Lmxrlf::color() { this->condition = this->graph.size(); } - for (auto& i : graph) { this->graph_colors[i.first] = -1; } + for (auto& i : graph) + { + this->graph_colors[i.first] = -1; + } return lmxrlf_alg(this->condition); } diff --git a/libs/graph-coloring/Source/mcs.cpp b/vendors/graph-coloring/Source/mcs.cpp similarity index 94% rename from libs/graph-coloring/Source/mcs.cpp rename to vendors/graph-coloring/Source/mcs.cpp index 083466157d..2a357d55a6 100644 --- a/libs/graph-coloring/Source/mcs.cpp +++ b/vendors/graph-coloring/Source/mcs.cpp @@ -29,7 +29,10 @@ map GraphColoring::Mcs::color() queue ordering; // Initially set the weight of each node to 0 - for (auto& i : temp_graph) { weight[i.first] = 0; } + for (auto& i : temp_graph) + { + weight[i.first] = 0; + } // Work through all the nodes in the graph, choosing the node // with maximum weight, then add that node to the queue. Increase @@ -59,7 +62,10 @@ map GraphColoring::Mcs::color() // Add highest weight node to the queue and increment all of its // neighbors weights by 1 ordering.push(max_vertex); - for (unsigned j = 0; j < graph[max_vertex].size(); j++) { weight[temp_graph[max_vertex][j]] += 1; } + for (unsigned j = 0; j < graph[max_vertex].size(); j++) + { + weight[temp_graph[max_vertex][j]] += 1; + } // Remove the maximum weight node from the graph so that it won't // be accidentally added again diff --git a/libs/graph-coloring/Source/tabucol.cpp b/vendors/graph-coloring/Source/tabucol.cpp similarity index 97% rename from libs/graph-coloring/Source/tabucol.cpp rename to vendors/graph-coloring/Source/tabucol.cpp index 0fbcc53070..107aa675c9 100644 --- a/libs/graph-coloring/Source/tabucol.cpp +++ b/vendors/graph-coloring/Source/tabucol.cpp @@ -76,7 +76,10 @@ map GraphColoring::Tabucol::color() this->graph_colors = map(); return {}; } - for (auto& adj_tuple : this->graph) { this->graph_colors[adj_tuple.first] = rand() % this->condition; } + for (auto& adj_tuple : this->graph) + { + this->graph_colors[adj_tuple.first] = rand() % this->condition; + } srand(time(NULL)); queue tabu_color; diff --git a/libs/mugen/README.md b/vendors/mugen/README.md similarity index 100% rename from libs/mugen/README.md rename to vendors/mugen/README.md diff --git a/libs/mugen/glucose-syrup-4.1/Changelog b/vendors/mugen/glucose-syrup-4.1/Changelog similarity index 91% rename from libs/mugen/glucose-syrup-4.1/Changelog rename to vendors/mugen/glucose-syrup-4.1/Changelog index dbf045fbaa..9d2720c2b4 100644 --- a/libs/mugen/glucose-syrup-4.1/Changelog +++ b/vendors/mugen/glucose-syrup-4.1/Changelog @@ -8,18 +8,18 @@ Version 4.0 - Can work indepentently in sequential or with many cores Version 3.0 (2013) - - Add incremental features. + - Add incremental features. See SAT13 paper: Improving Glucose for Incremental SAT Solving with Assumptions: Application to MUS Extraction - Add certified UNSAT proof. Version 2.3 (2012) - - Add new restart strategy + - Add new restart strategy See CP12 paper: Refining Restarts Strategies For SAT and UNSAT - Add additionnal features to speed the search -Version 2.0 (2011) +Version 2.0 (2011) - Add additionnal features (freeze potential good clauses for one turn) - Based on Minisat 2.2 diff --git a/libs/mugen/glucose-syrup-4.1/LICENCE b/vendors/mugen/glucose-syrup-4.1/LICENCE similarity index 96% rename from libs/mugen/glucose-syrup-4.1/LICENCE rename to vendors/mugen/glucose-syrup-4.1/LICENCE index 510035063b..0348a676bd 100644 --- a/libs/mugen/glucose-syrup-4.1/LICENCE +++ b/vendors/mugen/glucose-syrup-4.1/LICENCE @@ -8,19 +8,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -44,4 +44,3 @@ NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPO NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/libs/mugen/glucose-syrup-4.1/README b/vendors/mugen/glucose-syrup-4.1/README similarity index 91% rename from libs/mugen/glucose-syrup-4.1/README rename to vendors/mugen/glucose-syrup-4.1/README index 19cd95f425..ff81ecf4db 100644 --- a/libs/mugen/glucose-syrup-4.1/README +++ b/vendors/mugen/glucose-syrup-4.1/README @@ -21,4 +21,4 @@ Usage: in simp directory: ./glucose --help -in parallel directory: ./glucose-syrup --help \ No newline at end of file +in parallel directory: ./glucose-syrup --help diff --git a/vendors/mugen/glucose-syrup-4.1/core/BoundedQueue.h b/vendors/mugen/glucose-syrup-4.1/core/BoundedQueue.h new file mode 100644 index 0000000000..c2f1a2c729 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/core/BoundedQueue.h @@ -0,0 +1,209 @@ +/***************************************************************************************[BoundedQueue.h] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#ifndef BoundedQueue_h +#define BoundedQueue_h + +#include "mtl/Vec.h" + +//================================================================================================= + +namespace Glucose +{ + +template +class bqueue +{ + vec elems; + int first; + int last; + unsigned long long sumofqueue; + int maxsize; + int queuesize; // Number of current elements (must be < maxsize !) + bool expComputed; + double exp, value; + + public: + bqueue(void) : first(0), last(0), sumofqueue(0), maxsize(0), queuesize(0), expComputed(false) {} + + void initSize(int size) + { + growTo(size); + exp = 2.0 / (size + 1); + } // Init size of bounded size queue + + void push(T x) + { + expComputed = false; + if (queuesize == maxsize) + { + assert(last == first); // The queue is full, next value to enter will replace oldest one + sumofqueue -= elems[last]; + if ((++last) == maxsize) + last = 0; + } + else + queuesize++; + sumofqueue += x; + elems[first] = x; + if ((++first) == maxsize) + { + first = 0; + last = 0; + } + } + + T peek() + { + assert(queuesize > 0); + return elems[last]; + } + void pop() + { + sumofqueue -= elems[last]; + queuesize--; + if ((++last) == maxsize) + last = 0; + } + + unsigned long long getsum() const + { + return sumofqueue; + } + unsigned int getavg() const + { + return (unsigned int)(sumofqueue / ((unsigned long long)queuesize)); + } + int maxSize() const + { + return maxsize; + } + double getavgDouble() const + { + double tmp = 0; + for (int i = 0; i < elems.size(); i++) + { + tmp += elems[i]; + } + return tmp / elems.size(); + } + int isvalid() const + { + return (queuesize == maxsize); + } + + void growTo(int size) + { + elems.growTo(size); + first = 0; + maxsize = size; + queuesize = 0; + last = 0; + for (int i = 0; i < size; i++) elems[i] = 0; + } + + double getAvgExp() + { + if (expComputed) + return value; + double a = exp; + value = elems[first]; + for (int i = first; i < maxsize; i++) + { + value += a * ((double)elems[i]); + a = a * exp; + } + for (int i = 0; i < last; i++) + { + value += a * ((double)elems[i]); + a = a * exp; + } + value = value * (1 - exp) / (1 - a); + expComputed = true; + return value; + } + void fastclear() + { + first = 0; + last = 0; + queuesize = 0; + sumofqueue = 0; + } // to be called after restarts... Discard the queue + + int size(void) + { + return queuesize; + } + + void clear(bool dealloc = false) + { + elems.clear(dealloc); + first = 0; + maxsize = 0; + queuesize = 0; + sumofqueue = 0; + } + + void copyTo(bqueue& dest) const + { + dest.last = last; + dest.sumofqueue = sumofqueue; + dest.maxsize = maxsize; + dest.queuesize = queuesize; + dest.expComputed = expComputed; + dest.exp = exp; + dest.value = value; + dest.first = first; + elems.copyTo(dest.elems); + } +}; +} // namespace Glucose +//================================================================================================= + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/core/Constants.h b/vendors/mugen/glucose-syrup-4.1/core/Constants.h similarity index 96% rename from libs/mugen/glucose-syrup-4.1/core/Constants.h rename to vendors/mugen/glucose-syrup-4.1/core/Constants.h index 259f1b3f92..f9c1d88e49 100644 --- a/libs/mugen/glucose-syrup-4.1/core/Constants.h +++ b/vendors/mugen/glucose-syrup-4.1/core/Constants.h @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -53,7 +53,5 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA // Constants for clauses reductions #define RATIOREMOVECLAUSES 2 - // Constants for restarts #define LOWER_BOUND_FOR_BLOCKING_RESTART 10000 - diff --git a/libs/mugen/glucose-syrup-4.1/core/Dimacs.h b/vendors/mugen/glucose-syrup-4.1/core/Dimacs.h similarity index 72% rename from libs/mugen/glucose-syrup-4.1/core/Dimacs.h rename to vendors/mugen/glucose-syrup-4.1/core/Dimacs.h index a2065f6d4b..eedef49cd8 100644 --- a/libs/mugen/glucose-syrup-4.1/core/Dimacs.h +++ b/vendors/mugen/glucose-syrup-4.1/core/Dimacs.h @@ -21,69 +21,85 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #ifndef Glucose_Dimacs_h #define Glucose_Dimacs_h -#include - -#include "utils/ParseUtils.h" #include "core/SolverTypes.h" +#include "utils/ParseUtils.h" -namespace Glucose { +#include + +namespace Glucose +{ //================================================================================================= // DIMACS Parser: -template -static void readClause(B& in, Solver& S, vec& lits) { - int parsed_lit, var; +template +static void readClause(B& in, Solver& S, vec& lits) +{ + int parsed_lit, var; lits.clear(); - for (;;){ + for (;;) + { parsed_lit = parseInt(in); - if (parsed_lit == 0) break; - var = abs(parsed_lit)-1; + if (parsed_lit == 0) + break; + var = abs(parsed_lit) - 1; while (var >= S.nVars()) S.newVar(); - lits.push( (parsed_lit > 0) ? mkLit(var) : ~mkLit(var) ); + lits.push((parsed_lit > 0) ? mkLit(var) : ~mkLit(var)); } } -template -static void parse_DIMACS_main(B& in, Solver& S) { +template +static void parse_DIMACS_main(B& in, Solver& S) +{ vec lits; - int vars = 0; - int clauses = 0; - int cnt = 0; - for (;;){ + int vars = 0; + int clauses = 0; + int cnt = 0; + for (;;) + { skipWhitespace(in); - if (*in == EOF) break; - else if (*in == 'p'){ - if (eagerMatch(in, "p cnf")){ + if (*in == EOF) + break; + else if (*in == 'p') + { + if (eagerMatch(in, "p cnf")) + { vars = parseInt(in); clauses = parseInt(in); // SATRACE'06 hack // if (clauses > 4000000) // S.eliminate(true); - }else{ + } + else + { printf("PARSE ERROR! Unexpected char: %c\n", *in), exit(3); } - } else if (*in == 'c' || *in == 'p') + } + else if (*in == 'c' || *in == 'p') skipLine(in); - else{ + else + { cnt++; readClause(in, S, lits); - S.addClause_(lits); } + S.addClause_(lits); + } } if (vars != S.nVars()) fprintf(stderr, "WARNING! DIMACS header mismatch: wrong number of variables.\n"); - if (cnt != clauses) + if (cnt != clauses) fprintf(stderr, "WARNING! DIMACS header mismatch: wrong number of clauses.\n"); } // Inserts problem into solver. // -template -static void parse_DIMACS(gzFile input_stream, Solver& S) { +template +static void parse_DIMACS(gzFile input_stream, Solver& S) +{ StreamBuffer in(input_stream); - parse_DIMACS_main(in, S); } + parse_DIMACS_main(in, S); +} //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/core/Makefile b/vendors/mugen/glucose-syrup-4.1/core/Makefile similarity index 99% rename from libs/mugen/glucose-syrup-4.1/core/Makefile rename to vendors/mugen/glucose-syrup-4.1/core/Makefile index 14c4e9cf17..1a450f9ee8 100644 --- a/libs/mugen/glucose-syrup-4.1/core/Makefile +++ b/vendors/mugen/glucose-syrup-4.1/core/Makefile @@ -1,3 +1,2 @@ PHONY: @echo "** Careful ** Since 4.0 you have to use the simp or parallel directory only for typing make" - diff --git a/vendors/mugen/glucose-syrup-4.1/core/Solver.cc b/vendors/mugen/glucose-syrup-4.1/core/Solver.cc new file mode 100644 index 0000000000..f1dd424059 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/core/Solver.cc @@ -0,0 +1,2234 @@ +/***************************************************************************************[Solver.cc] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#include "core/Solver.h" + +#include "core/Constants.h" +#include "mtl/Sort.h" +#include "simp/SimpSolver.h" +#include "utils/System.h" + +#include + +using namespace Glucose; + +//================================================================================================= +// Statistics +//================================================================================================= + +//================================================================================================= +// Options: + +static const char* _cat = "CORE"; +static const char* _cr = "CORE -- RESTART"; +static const char* _cred = "CORE -- REDUCE"; +static const char* _cm = "CORE -- MINIMIZE"; + +static DoubleOption opt_K(_cr, "K", "The constant used to force restart", 0.8, DoubleRange(0, false, 1, false)); +static DoubleOption opt_R(_cr, "R", "The constant used to block restart", 1.4, DoubleRange(1, false, 5, false)); +static IntOption opt_size_lbd_queue(_cr, "szLBDQueue", "The size of moving average for LBD (restarts)", 50, + IntRange(10, INT32_MAX)); +static IntOption opt_size_trail_queue(_cr, "szTrailQueue", "The size of moving average for trail (block restarts)", + 5000, IntRange(10, INT32_MAX)); + +static IntOption opt_first_reduce_db( + _cred, "firstReduceDB", + "The number of conflicts before the first reduce DB (or the size of leernts if chanseok is used)", 2000, + IntRange(0, INT32_MAX)); +static IntOption opt_inc_reduce_db(_cred, "incReduceDB", "Increment for reduce DB", 300, IntRange(0, INT32_MAX)); +static IntOption opt_spec_inc_reduce_db(_cred, "specialIncReduceDB", "Special increment for reduce DB", 1000, + IntRange(0, INT32_MAX)); +static IntOption opt_lb_lbd_frozen_clause(_cred, "minLBDFrozenClause", + "Protect clauses if their LBD decrease and is lower than (for one turn)", 30, + IntRange(0, INT32_MAX)); +static BoolOption opt_chanseok_hack( + _cred, "chanseok", + "Use Chanseok Oh strategy for LBD (keep all LBD<=co and remove half of firstreduceDB other learnt clauses", false); +static IntOption opt_chanseok_limit(_cred, "co", "Chanseok Oh: all learnt clauses with LBD<=co are permanent", 5, + IntRange(2, INT32_MAX)); + +static IntOption opt_lb_size_minimzing_clause(_cm, "minSizeMinimizingClause", + "The min size required to minimize clause", 30, IntRange(3, INT32_MAX)); +static IntOption opt_lb_lbd_minimzing_clause(_cm, "minLBDMinimizingClause", "The min LBD required to minimize clause", + 6, IntRange(3, INT32_MAX)); + +static DoubleOption opt_var_decay(_cat, "var-decay", "The variable activity decay factor (starting point)", 0.8, + DoubleRange(0, false, 1, false)); +static DoubleOption opt_max_var_decay(_cat, "max-var-decay", "The variable activity decay factor", 0.95, + DoubleRange(0, false, 1, false)); +static DoubleOption opt_clause_decay(_cat, "cla-decay", "The clause activity decay factor", 0.999, + DoubleRange(0, false, 1, false)); +static DoubleOption + opt_random_var_freq(_cat, "rnd-freq", + "The frequency with which the decision heuristic tries to choose a random variable", 0, + DoubleRange(0, true, 1, true)); +static DoubleOption opt_random_seed(_cat, "rnd-seed", "Used by the random variable selection", 91648253, + DoubleRange(0, false, HUGE_VAL, false)); +static IntOption opt_ccmin_mode(_cat, "ccmin-mode", "Controls conflict clause minimization (0=none, 1=basic, 2=deep)", + 2, IntRange(0, 2)); +static IntOption opt_phase_saving(_cat, "phase-saving", + "Controls the level of phase saving (0=none, 1=limited, 2=full)", 2, IntRange(0, 2)); +static BoolOption opt_rnd_init_act(_cat, "rnd-init", "Randomize the initial activity", false); +static DoubleOption opt_garbage_frac(_cat, "gc-frac", + "The fraction of wasted memory allowed before a garbage collection is triggered", + 0.20, DoubleRange(0, false, HUGE_VAL, false)); +static BoolOption opt_glu_reduction( + _cat, "gr", "glucose strategy to fire clause database reduction (must be false to fire Chanseok strategy)", true); +static BoolOption opt_luby_restart(_cat, "luby", "Use the Luby restart sequence", false); +static DoubleOption opt_restart_inc(_cat, "rinc", "Restart interval increase factor", 2, + DoubleRange(1, false, HUGE_VAL, false)); +static IntOption opt_luby_restart_factor(_cred, "luby-factor", "Luby restart factor", 100, IntRange(1, INT32_MAX)); + +static IntOption + opt_randomize_phase_on_restarts(_cat, "phase-restart", + "The amount of randomization for the phase at each restart (0=none, 1=first " + "branch, 2=first branch (no bad clauses), 3=first branch (only initial clauses)", + 0, IntRange(0, 3)); +static BoolOption opt_fixed_randomize_phase_on_restarts(_cat, "fix-phas-rest", + "Fixes the first 7 levels at random phase", false); + +static BoolOption opt_adapt(_cat, "adapt", "Adapt dynamically stategies after 100000 conflicts", true); + +static BoolOption opt_forceunsat(_cat, "forceunsat", "Force the phase for UNSAT", true); +//================================================================================================= +// Constructor/Destructor: + +Solver::Solver() : + + // Parameters (user settable): + // + verbosity(0), + showModel(0), + K(opt_K), + R(opt_R), + sizeLBDQueue(opt_size_lbd_queue), + sizeTrailQueue(opt_size_trail_queue), + firstReduceDB(opt_first_reduce_db), + incReduceDB(opt_chanseok_hack ? 0 : opt_inc_reduce_db), + specialIncReduceDB(opt_chanseok_hack ? 0 : opt_spec_inc_reduce_db), + lbLBDFrozenClause(opt_lb_lbd_frozen_clause), + chanseokStrategy(opt_chanseok_hack), + coLBDBound(opt_chanseok_limit), + lbSizeMinimizingClause(opt_lb_size_minimzing_clause), + lbLBDMinimizingClause(opt_lb_lbd_minimzing_clause), + var_decay(opt_var_decay), + max_var_decay(opt_max_var_decay), + clause_decay(opt_clause_decay), + random_var_freq(opt_random_var_freq), + random_seed(opt_random_seed), + ccmin_mode(opt_ccmin_mode), + phase_saving(opt_phase_saving), + rnd_pol(false), + rnd_init_act(opt_rnd_init_act), + randomizeFirstDescent(false), + garbage_frac(opt_garbage_frac), + certifiedOutput(NULL), + certifiedUNSAT(false) // Not in the first parallel version + , + vbyte(false), + panicModeLastRemoved(0), + panicModeLastRemovedShared(0), + useUnaryWatched(false), + promoteOneWatchedClause(true), + solves(0), + starts(0), + decisions(0), + propagations(0), + conflicts(0), + conflictsRestarts(0), + curRestart(1), + glureduce(opt_glu_reduction), + restart_inc(opt_restart_inc), + luby_restart(opt_luby_restart), + adaptStrategies(opt_adapt), + luby_restart_factor(opt_luby_restart_factor), + randomize_on_restarts(opt_randomize_phase_on_restarts), + fixed_randomize_on_restarts(opt_fixed_randomize_phase_on_restarts), + newDescent(0), + randomDescentAssignments(0), + forceUnsatOnNewDescent(opt_forceunsat) + + , + ok(true), + cla_inc(1), + var_inc(1), + watches(WatcherDeleted(ca)), + watchesBin(WatcherDeleted(ca)), + unaryWatches(WatcherDeleted(ca)), + qhead(0), + simpDB_assigns(-1), + simpDB_props(0), + order_heap(VarOrderLt(activity)), + progress_estimate(0), + remove_satisfied(true), + lastLearntClause(CRef_Undef) + // Resource constraints: + // + , + conflict_budget(-1), + propagation_budget(-1), + asynch_interrupt(false), + incremental(false), + nbVarsInitialFormula(INT32_MAX), + totalTime4Sat(0.), + totalTime4Unsat(0.), + nbSatCalls(0), + nbUnsatCalls(0) +{ + MYFLAG = 0; + // Initialize only first time. Useful for incremental solving (not in // version), useless otherwise + // Kept here for simplicity + lbdQueue.initSize(sizeLBDQueue); + trailQueue.initSize(sizeTrailQueue); + sumLBD = 0; + nbclausesbeforereduce = firstReduceDB; + stats.growTo(coreStatsSize, 0); +} + +//------------------------------------------------------- +// Special constructor used for cloning solvers +//------------------------------------------------------- + +Solver::Solver(const Solver& s) : + verbosity(s.verbosity), + showModel(s.showModel), + K(s.K), + R(s.R), + sizeLBDQueue(s.sizeLBDQueue), + sizeTrailQueue(s.sizeTrailQueue), + firstReduceDB(s.firstReduceDB), + incReduceDB(s.incReduceDB), + specialIncReduceDB(s.specialIncReduceDB), + lbLBDFrozenClause(s.lbLBDFrozenClause), + chanseokStrategy(opt_chanseok_hack), + coLBDBound(opt_chanseok_limit), + lbSizeMinimizingClause(s.lbSizeMinimizingClause), + lbLBDMinimizingClause(s.lbLBDMinimizingClause), + var_decay(s.var_decay), + max_var_decay(s.max_var_decay), + clause_decay(s.clause_decay), + random_var_freq(s.random_var_freq), + random_seed(s.random_seed), + ccmin_mode(s.ccmin_mode), + phase_saving(s.phase_saving), + rnd_pol(s.rnd_pol), + rnd_init_act(s.rnd_init_act), + randomizeFirstDescent(s.randomizeFirstDescent), + garbage_frac(s.garbage_frac), + certifiedOutput(NULL), + certifiedUNSAT(false) // Not in the first parallel version + , + panicModeLastRemoved(s.panicModeLastRemoved), + panicModeLastRemovedShared(s.panicModeLastRemovedShared), + useUnaryWatched(s.useUnaryWatched), + promoteOneWatchedClause(s.promoteOneWatchedClause) + // Statistics: (formerly in 'SolverStats') + // + , + solves(0), + starts(0), + decisions(0), + propagations(0), + conflicts(0), + conflictsRestarts(0) + + , + curRestart(s.curRestart), + glureduce(s.glureduce), + restart_inc(s.restart_inc), + luby_restart(s.luby_restart), + adaptStrategies(s.adaptStrategies), + luby_restart_factor(s.luby_restart_factor), + randomize_on_restarts(s.randomize_on_restarts), + fixed_randomize_on_restarts(s.fixed_randomize_on_restarts), + newDescent(s.newDescent), + randomDescentAssignments(s.randomDescentAssignments), + forceUnsatOnNewDescent(s.forceUnsatOnNewDescent), + ok(true), + cla_inc(s.cla_inc), + var_inc(s.var_inc), + watches(WatcherDeleted(ca)), + watchesBin(WatcherDeleted(ca)), + unaryWatches(WatcherDeleted(ca)), + qhead(s.qhead), + simpDB_assigns(s.simpDB_assigns), + simpDB_props(s.simpDB_props), + order_heap(VarOrderLt(activity)), + progress_estimate(s.progress_estimate), + remove_satisfied(s.remove_satisfied), + lastLearntClause(CRef_Undef) + // Resource constraints: + // + , + conflict_budget(s.conflict_budget), + propagation_budget(s.propagation_budget), + asynch_interrupt(s.asynch_interrupt), + incremental(s.incremental), + nbVarsInitialFormula(s.nbVarsInitialFormula), + totalTime4Sat(s.totalTime4Sat), + totalTime4Unsat(s.totalTime4Unsat), + nbSatCalls(s.nbSatCalls), + nbUnsatCalls(s.nbUnsatCalls) +{ + // Copy clauses. + s.ca.copyTo(ca); + ca.extra_clause_field = s.ca.extra_clause_field; + + // Initialize other variables + MYFLAG = 0; + // Initialize only first time. Useful for incremental solving (not in // version), useless otherwise + // Kept here for simplicity + sumLBD = s.sumLBD; + nbclausesbeforereduce = s.nbclausesbeforereduce; + + // Copy all search vectors + s.watches.copyTo(watches); + s.watchesBin.copyTo(watchesBin); + s.unaryWatches.copyTo(unaryWatches); + s.assigns.memCopyTo(assigns); + s.vardata.memCopyTo(vardata); + s.activity.memCopyTo(activity); + s.seen.memCopyTo(seen); + s.permDiff.memCopyTo(permDiff); + s.polarity.memCopyTo(polarity); + s.decision.memCopyTo(decision); + s.trail.memCopyTo(trail); + s.order_heap.copyTo(order_heap); + s.clauses.memCopyTo(clauses); + s.learnts.memCopyTo(learnts); + s.permanentLearnts.memCopyTo(permanentLearnts); + + s.lbdQueue.copyTo(lbdQueue); + s.trailQueue.copyTo(trailQueue); + s.forceUNSAT.copyTo(forceUNSAT); + s.stats.copyTo(stats); +} + +Solver::~Solver() {} + +/**************************************************************** + Certified UNSAT proof in binary format +****************************************************************/ + +void Solver::write_char(unsigned char ch) +{ + if (putc_unlocked((int)ch, certifiedOutput) == EOF) + exit(1); +} + +void Solver::write_lit(int n) +{ + for (; n > 127; n >>= 7) write_char(128 | (n & 127)); + write_char(n); +} + +/**************************************************************** + Set the incremental mode +****************************************************************/ + +// This function set the incremental mode to true. +// You can add special code for this mode here. + +void Solver::setIncrementalMode() +{ +#ifdef INCREMENTAL + incremental = true; +#else + fprintf(stderr, "c Trying to set incremental mode, but not compiled properly for this.\n"); + exit(1); +#endif +} + +// Number of variables without selectors +void Solver::initNbInitialVars(int nb) +{ + nbVarsInitialFormula = nb; +} + +bool Solver::isIncremental() +{ + return incremental; +} + +//================================================================================================= +// Minor methods: + +// Creates a new SAT variable in the solver. If 'decision' is cleared, variable will not be +// used as a decision variable (NOTE! This has effects on the meaning of a SATISFIABLE result). +// + +Var Solver::newVar(bool sign, bool dvar) +{ + int v = nVars(); + watches.init(mkLit(v, false)); + watches.init(mkLit(v, true)); + watchesBin.init(mkLit(v, false)); + watchesBin.init(mkLit(v, true)); + unaryWatches.init(mkLit(v, false)); + unaryWatches.init(mkLit(v, true)); + assigns.push(l_Undef); + vardata.push(mkVarData(CRef_Undef, 0)); + activity.push(rnd_init_act ? drand(random_seed) * 0.00001 : 0); + seen.push(0); + permDiff.push(0); + polarity.push(sign); + forceUNSAT.push(0); + decision.push(); + trail.capacity(v + 1); + setDecisionVar(v, dvar); + return v; +} + +bool Solver::addClause_(vec& ps) +{ + + assert(decisionLevel() == 0); + if (!ok) + return false; + + // Check if clause is satisfied and remove false/duplicate literals: + sort(ps); + + vec oc; + oc.clear(); + + Lit p; + int i, j, flag = 0; + if (certifiedUNSAT) + { + for (i = j = 0, p = lit_Undef; i < ps.size(); i++) + { + oc.push(ps[i]); + if (value(ps[i]) == l_True || ps[i] == ~p || value(ps[i]) == l_False) + flag = 1; + } + } + + for (i = j = 0, p = lit_Undef; i < ps.size(); i++) + if (value(ps[i]) == l_True || ps[i] == ~p) + return true; + else if (value(ps[i]) != l_False && ps[i] != p) + ps[j++] = p = ps[i]; + ps.shrink(i - j); + + if (flag && (certifiedUNSAT)) + { + if (vbyte) + { + write_char('a'); + for (i = j = 0, p = lit_Undef; i < ps.size(); i++) write_lit(2 * (var(ps[i]) + 1) + sign(ps[i])); + write_lit(0); + + write_char('d'); + for (i = j = 0, p = lit_Undef; i < oc.size(); i++) write_lit(2 * (var(oc[i]) + 1) + sign(oc[i])); + write_lit(0); + } + else + { + for (i = j = 0, p = lit_Undef; i < ps.size(); i++) + fprintf(certifiedOutput, "%i ", (var(ps[i]) + 1) * (-2 * sign(ps[i]) + 1)); + fprintf(certifiedOutput, "0\n"); + + fprintf(certifiedOutput, "d "); + for (i = j = 0, p = lit_Undef; i < oc.size(); i++) + fprintf(certifiedOutput, "%i ", (var(oc[i]) + 1) * (-2 * sign(oc[i]) + 1)); + fprintf(certifiedOutput, "0\n"); + } + } + + if (ps.size() == 0) + return ok = false; + else if (ps.size() == 1) + { + uncheckedEnqueue(ps[0]); + return ok = (propagate() == CRef_Undef); + } + else + { + CRef cr = ca.alloc(ps, false); + clauses.push(cr); + attachClause(cr); + } + + return true; +} + +void Solver::attachClause(CRef cr) +{ + const Clause& c = ca[cr]; + + assert(c.size() > 1); + if (c.size() == 2) + { + watchesBin[~c[0]].push(Watcher(cr, c[1])); + watchesBin[~c[1]].push(Watcher(cr, c[0])); + } + else + { + watches[~c[0]].push(Watcher(cr, c[1])); + watches[~c[1]].push(Watcher(cr, c[0])); + } + if (c.learnt()) + stats[learnts_literals] += c.size(); + else + stats[clauses_literals] += c.size(); +} + +void Solver::attachClausePurgatory(CRef cr) +{ + const Clause& c = ca[cr]; + + assert(c.size() > 1); + unaryWatches[~c[0]].push(Watcher(cr, c[1])); +} + +void Solver::detachClause(CRef cr, bool strict) +{ + const Clause& c = ca[cr]; + + assert(c.size() > 1); + if (c.size() == 2) + { + if (strict) + { + remove(watchesBin[~c[0]], Watcher(cr, c[1])); + remove(watchesBin[~c[1]], Watcher(cr, c[0])); + } + else + { + // Lazy detaching: (NOTE! Must clean all watcher lists before garbage collecting this clause) + watchesBin.smudge(~c[0]); + watchesBin.smudge(~c[1]); + } + } + else + { + if (strict) + { + remove(watches[~c[0]], Watcher(cr, c[1])); + remove(watches[~c[1]], Watcher(cr, c[0])); + } + else + { + // Lazy detaching: (NOTE! Must clean all watcher lists before garbage collecting this clause) + watches.smudge(~c[0]); + watches.smudge(~c[1]); + } + } + if (c.learnt()) + stats[learnts_literals] -= c.size(); + else + stats[clauses_literals] -= c.size(); +} + +// The purgatory is the 1-Watched scheme for imported clauses + +void Solver::detachClausePurgatory(CRef cr, bool strict) +{ + const Clause& c = ca[cr]; + + assert(c.size() > 1); + if (strict) + remove(unaryWatches[~c[0]], Watcher(cr, c[1])); + else + unaryWatches.smudge(~c[0]); +} + +void Solver::removeClause(CRef cr, bool inPurgatory) +{ + + Clause& c = ca[cr]; + + if (certifiedUNSAT) + { + if (vbyte) + { + write_char('d'); + for (int i = 0; i < c.size(); i++) write_lit(2 * (var(c[i]) + 1) + sign(c[i])); + write_lit(0); + } + else + { + fprintf(certifiedOutput, "d "); + for (int i = 0; i < c.size(); i++) fprintf(certifiedOutput, "%i ", (var(c[i]) + 1) * (-2 * sign(c[i]) + 1)); + fprintf(certifiedOutput, "0\n"); + } + } + + if (inPurgatory) + detachClausePurgatory(cr); + else + detachClause(cr); + // Don't leave pointers to free'd memory! + if (locked(c)) + vardata[var(c[0])].reason = CRef_Undef; + c.mark(1); + ca.free(cr); +} + +bool Solver::satisfied(const Clause& c) const +{ +#ifdef INCREMENTAL + if (incremental) + return (value(c[0]) == l_True) || (value(c[1]) == l_True); +#endif + + // Default mode + for (int i = 0; i < c.size(); i++) + if (value(c[i]) == l_True) + return true; + return false; +} + +/************************************************************ + * Compute LBD functions + *************************************************************/ + +template +inline unsigned int Solver::computeLBD(const T& lits, int end) +{ + int nblevels = 0; + MYFLAG++; +#ifdef INCREMENTAL + if (incremental) + { // ----------------- INCREMENTAL MODE + if (end == -1) + end = lits.size(); + int nbDone = 0; + for (int i = 0; i < lits.size(); i++) + { + if (nbDone >= end) + break; + if (isSelector(var(lits[i]))) + continue; + nbDone++; + int l = level(var(lits[i])); + if (permDiff[l] != MYFLAG) + { + permDiff[l] = MYFLAG; + nblevels++; + } + } + } + else + { // -------- DEFAULT MODE. NOT A LOT OF DIFFERENCES... BUT EASIER TO READ +#endif + for (int i = 0; i < lits.size(); i++) + { + int l = level(var(lits[i])); + if (permDiff[l] != MYFLAG) + { + permDiff[l] = MYFLAG; + nblevels++; + } + } +#ifdef INCREMENTAL + } +#endif + return nblevels; +} + +/****************************************************************** + * Minimisation with binary reolution + ******************************************************************/ +void Solver::minimisationWithBinaryResolution(vec& out_learnt) +{ + + // Find the LBD measure + unsigned int lbd = computeLBD(out_learnt); + Lit p = ~out_learnt[0]; + + if (lbd <= lbLBDMinimizingClause) + { + MYFLAG++; + + for (int i = 1; i < out_learnt.size(); i++) + { + permDiff[var(out_learnt[i])] = MYFLAG; + } + + vec& wbin = watchesBin[p]; + int nb = 0; + for (int k = 0; k < wbin.size(); k++) + { + Lit imp = wbin[k].blocker; + if (permDiff[var(imp)] == MYFLAG && value(imp) == l_True) + { + nb++; + permDiff[var(imp)] = MYFLAG - 1; + } + } + int l = out_learnt.size() - 1; + if (nb > 0) + { + stats[nbReducedClauses]++; + for (int i = 1; i < out_learnt.size() - nb; i++) + { + if (permDiff[var(out_learnt[i])] != MYFLAG) + { + Lit p = out_learnt[l]; + out_learnt[l] = out_learnt[i]; + out_learnt[i] = p; + l--; + i--; + } + } + + out_learnt.shrink(nb); + } + } +} + +// Revert to the state at given level (keeping all assignment at 'level' but not beyond). +// + +void Solver::cancelUntil(int level) +{ + if (decisionLevel() > level) + { + for (int c = trail.size() - 1; c >= trail_lim[level]; c--) + { + Var x = var(trail[c]); + assigns[x] = l_Undef; + if (phase_saving > 1 || ((phase_saving == 1) && c > trail_lim.last())) + { + polarity[x] = sign(trail[c]); + } + insertVarOrder(x); + } + qhead = trail_lim[level]; + trail.shrink(trail.size() - trail_lim[level]); + trail_lim.shrink(trail_lim.size() - level); + } +} + +//================================================================================================= +// Major methods: + +Lit Solver::pickBranchLit() +{ + Var next = var_Undef; + + // Random decision: + if (((randomizeFirstDescent && conflicts == 0) || drand(random_seed) < random_var_freq) && !order_heap.empty()) + { + next = order_heap[irand(random_seed, order_heap.size())]; + if (value(next) == l_Undef && decision[next]) + stats[rnd_decisions]++; + } + + // Activity based decision: + while (next == var_Undef || value(next) != l_Undef || !decision[next]) + if (order_heap.empty()) + { + next = var_Undef; + break; + } + else + { + next = order_heap.removeMin(); + } + + if (randomize_on_restarts && !fixed_randomize_on_restarts && newDescent && (decisionLevel() % 2 == 0)) + { + return mkLit(next, (randomDescentAssignments >> (decisionLevel() % 32)) & 1); + } + + if (fixed_randomize_on_restarts && decisionLevel() < 7) + { + return mkLit(next, (randomDescentAssignments >> (decisionLevel() % 32)) & 1); + } + + if (next == var_Undef) + return lit_Undef; + + if (forceUnsatOnNewDescent && newDescent) + { + if (forceUNSAT[next] != 0) + return mkLit(next, forceUNSAT[next] < 0); + return mkLit(next, polarity[next]); + } + + return next == var_Undef ? lit_Undef : mkLit(next, rnd_pol ? drand(random_seed) < 0.5 : polarity[next]); +} + +/*_________________________________________________________________________________________________ +| +| analyze : (confl : Clause*) (out_learnt : vec&) (out_btlevel : int&) -> [void] +| +| Description: +| Analyze conflict and produce a reason clause. +| +| Pre-conditions: +| * 'out_learnt' is assumed to be cleared. +| * Current decision level must be greater than root level. +| +| Post-conditions: +| * 'out_learnt[0]' is the asserting literal at level 'out_btlevel'. +| * If out_learnt.size() > 1 then 'out_learnt[1]' has the greatest decision level of the +| rest of literals. There may be others from the same level though. +| +|________________________________________________________________________________________________@*/ +void Solver::analyze(CRef confl, vec& out_learnt, vec& selectors, int& out_btlevel, unsigned int& lbd, + unsigned int& szWithoutSelectors) +{ + int pathC = 0; + Lit p = lit_Undef; + + // Generate conflict clause: + // + out_learnt.push(); // (leave room for the asserting literal) + int index = trail.size() - 1; + do + { + assert(confl != CRef_Undef); // (otherwise should be UIP) + Clause& c = ca[confl]; + // Special case for binary clauses + // The first one has to be SAT + if (p != lit_Undef && c.size() == 2 && value(c[0]) == l_False) + { + + assert(value(c[1]) == l_True); + Lit tmp = c[0]; + c[0] = c[1], c[1] = tmp; + } + + if (c.learnt()) + { + parallelImportClauseDuringConflictAnalysis(c, confl); + claBumpActivity(c); + } + else + { // original clause + if (!c.getSeen()) + { + stats[originalClausesSeen]++; + c.setSeen(true); + } + } + + // DYNAMIC NBLEVEL trick (see competition'09 companion paper) + if (c.learnt() && c.lbd() > 2) + { + unsigned int nblevels = computeLBD(c); + if (nblevels + 1 < c.lbd()) + { // improve the LBD + if (c.lbd() <= lbLBDFrozenClause) + { + // seems to be interesting : keep it for the next round + c.setCanBeDel(false); + } + if (chanseokStrategy && nblevels <= coLBDBound) + { + c.nolearnt(); + learnts.remove(confl); + permanentLearnts.push(confl); + stats[nbPermanentLearnts]++; + } + else + { + c.setLBD(nblevels); // Update it + } + } + } + + for (int j = (p == lit_Undef) ? 0 : 1; j < c.size(); j++) + { + Lit q = c[j]; + + if (!seen[var(q)]) + { + if (level(var(q)) == 0) + {} + else + { // Here, the old case + if (!isSelector(var(q))) + varBumpActivity(var(q)); + + // This variable was responsible for a conflict, + // consider it as a UNSAT assignation for this literal + bumpForceUNSAT(~q); // Negation because q is false here + + seen[var(q)] = 1; + if (level(var(q)) >= decisionLevel()) + { + pathC++; + // UPDATEVARACTIVITY trick (see competition'09 companion paper) + if (!isSelector(var(q)) && (reason(var(q)) != CRef_Undef) && ca[reason(var(q))].learnt()) + lastDecisionLevel.push(q); + } + else + { + if (isSelector(var(q))) + { + assert(value(q) == l_False); + selectors.push(q); + } + else + out_learnt.push(q); + } + } + } // else stats[sumResSeen]++; + } + + // Select next clause to look at: + while (!seen[var(trail[index--])]); + p = trail[index + 1]; + // stats[sumRes]++; + confl = reason(var(p)); + seen[var(p)] = 0; + pathC--; + + } while (pathC > 0); + out_learnt[0] = ~p; + + // Simplify conflict clause: + // + int i, j; + + for (int i = 0; i < selectors.size(); i++) out_learnt.push(selectors[i]); + + out_learnt.copyTo(analyze_toclear); + if (ccmin_mode == 2) + { + uint32_t abstract_level = 0; + for (i = 1; i < out_learnt.size(); i++) + abstract_level |= + abstractLevel(var(out_learnt[i])); // (maintain an abstraction of levels involved in conflict) + + for (i = j = 1; i < out_learnt.size(); i++) + if (reason(var(out_learnt[i])) == CRef_Undef || !litRedundant(out_learnt[i], abstract_level)) + out_learnt[j++] = out_learnt[i]; + } + else if (ccmin_mode == 1) + { + for (i = j = 1; i < out_learnt.size(); i++) + { + Var x = var(out_learnt[i]); + + if (reason(x) == CRef_Undef) + out_learnt[j++] = out_learnt[i]; + else + { + Clause& c = ca[reason(var(out_learnt[i]))]; + // Thanks to Siert Wieringa for this bug fix! + for (int k = ((c.size() == 2) ? 0 : 1); k < c.size(); k++) + if (!seen[var(c[k])] && level(var(c[k])) > 0) + { + out_learnt[j++] = out_learnt[i]; + break; + } + } + } + } + else + i = j = out_learnt.size(); + + // stats[max_literals]+=out_learnt.size(); + out_learnt.shrink(i - j); + // stats[tot_literals]+=out_learnt.size(); + + /* *************************************** + Minimisation with binary clauses of the asserting clause + First of all : we look for small clauses + Then, we reduce clauses with small LBD. + Otherwise, this can be useless + */ + if (!incremental && out_learnt.size() <= lbSizeMinimizingClause) + { + minimisationWithBinaryResolution(out_learnt); + } + // Find correct backtrack level: + // + if (out_learnt.size() == 1) + out_btlevel = 0; + else + { + int max_i = 1; + // Find the first literal assigned at the next-highest level: + for (int i = 2; i < out_learnt.size(); i++) + if (level(var(out_learnt[i])) > level(var(out_learnt[max_i]))) + max_i = i; + // Swap-in this literal at index 1: + Lit p = out_learnt[max_i]; + out_learnt[max_i] = out_learnt[1]; + out_learnt[1] = p; + out_btlevel = level(var(p)); + } +#ifdef INCREMENTAL + if (incremental) + { + szWithoutSelectors = 0; + for (int i = 0; i < out_learnt.size(); i++) + { + if (!isSelector(var((out_learnt[i])))) + szWithoutSelectors++; + else if (i > 0) + break; + } + } + else +#endif + szWithoutSelectors = out_learnt.size(); + + // Compute LBD + lbd = computeLBD(out_learnt, out_learnt.size() - selectors.size()); + + // UPDATEVARACTIVITY trick (see competition'09 companion paper) + if (lastDecisionLevel.size() > 0) + { + for (int i = 0; i < lastDecisionLevel.size(); i++) + { + if (ca[reason(var(lastDecisionLevel[i]))].lbd() < lbd) + varBumpActivity(var(lastDecisionLevel[i])); + } + lastDecisionLevel.clear(); + } + + for (int j = 0; j < analyze_toclear.size(); j++) seen[var(analyze_toclear[j])] = 0; // ('seen[]' is now cleared) + for (int j = 0; j < selectors.size(); j++) seen[var(selectors[j])] = 0; +} + +// Check if 'p' can be removed. 'abstract_levels' is used to abort early if the algorithm is +// visiting literals at levels that cannot be removed later. + +bool Solver::litRedundant(Lit p, uint32_t abstract_levels) +{ + analyze_stack.clear(); + analyze_stack.push(p); + int top = analyze_toclear.size(); + while (analyze_stack.size() > 0) + { + assert(reason(var(analyze_stack.last())) != CRef_Undef); + Clause& c = ca[reason(var(analyze_stack.last()))]; + analyze_stack.pop(); // + if (c.size() == 2 && value(c[0]) == l_False) + { + assert(value(c[1]) == l_True); + Lit tmp = c[0]; + c[0] = c[1], c[1] = tmp; + } + + for (int i = 1; i < c.size(); i++) + { + Lit p = c[i]; + if (!seen[var(p)]) + { + if (level(var(p)) > 0) + { + if (reason(var(p)) != CRef_Undef && (abstractLevel(var(p)) & abstract_levels) != 0) + { + seen[var(p)] = 1; + analyze_stack.push(p); + analyze_toclear.push(p); + } + else + { + for (int j = top; j < analyze_toclear.size(); j++) seen[var(analyze_toclear[j])] = 0; + analyze_toclear.shrink(analyze_toclear.size() - top); + return false; + } + } + } + } + } + + return true; +} + +/*_________________________________________________________________________________________________ +| +| analyzeFinal : (p : Lit) -> [void] +| +| Description: +| Specialized analysis procedure to express the final conflict in terms of assumptions. +| Calculates the (possibly empty) set of assumptions that led to the assignment of 'p', and +| stores the result in 'out_conflict'. +|________________________________________________________________________________________________@*/ +void Solver::analyzeFinal(Lit p, vec& out_conflict) +{ + out_conflict.clear(); + out_conflict.push(p); + + if (decisionLevel() == 0) + return; + + seen[var(p)] = 1; + + for (int i = trail.size() - 1; i >= trail_lim[0]; i--) + { + Var x = var(trail[i]); + if (seen[x]) + { + if (reason(x) == CRef_Undef) + { + assert(level(x) > 0); + out_conflict.push(~trail[i]); + } + else + { + Clause& c = ca[reason(x)]; + // for (int j = 1; j < c.size(); j++) Minisat (glucose 2.0) loop + // Bug in case of assumptions due to special data structures for Binary. + // Many thanks to Sam Bayless (sbayless@cs.ubc.ca) for discover this bug. + for (int j = ((c.size() == 2) ? 0 : 1); j < c.size(); j++) + if (level(var(c[j])) > 0) + seen[var(c[j])] = 1; + } + + seen[x] = 0; + } + } + + seen[var(p)] = 0; +} + +void Solver::uncheckedEnqueue(Lit p, CRef from) +{ + assert(value(p) == l_Undef); + assigns[var(p)] = lbool(!sign(p)); + vardata[var(p)] = mkVarData(from, decisionLevel()); + trail.push_(p); +} + +void Solver::bumpForceUNSAT(Lit q) +{ + forceUNSAT[var(q)] = sign(q) ? -1 : +1; + return; +} + +/*_________________________________________________________________________________________________ +| +| propagate : [void] -> [Clause*] +| +| Description: +| Propagates all enqueued facts. If a conflict arises, the conflicting clause is returned, +| otherwise CRef_Undef. +| +| Post-conditions: +| * the propagation queue is empty, even if there was a conflict. +|________________________________________________________________________________________________@*/ +CRef Solver::propagate() +{ + CRef confl = CRef_Undef; + int num_props = 0; + watches.cleanAll(); + watchesBin.cleanAll(); + unaryWatches.cleanAll(); + while (qhead < trail.size()) + { + Lit p = trail[qhead++]; // 'p' is enqueued fact to propagate. + vec& ws = watches[p]; + Watcher * i, *j, *end; + num_props++; + + // First, Propagate binary clauses + vec& wbin = watchesBin[p]; + for (int k = 0; k < wbin.size(); k++) + { + + Lit imp = wbin[k].blocker; + + if (value(imp) == l_False) + { + return wbin[k].cref; + } + + if (value(imp) == l_Undef) + { + uncheckedEnqueue(imp, wbin[k].cref); + } + } + + // Now propagate other 2-watched clauses + for (i = j = (Watcher*)ws, end = i + ws.size(); i != end;) + { + // Try to avoid inspecting the clause: + Lit blocker = i->blocker; + if (value(blocker) == l_True) + { + *j++ = *i++; + continue; + } + + // Make sure the false literal is data[1]: + CRef cr = i->cref; + Clause& c = ca[cr]; + assert(!c.getOneWatched()); + Lit false_lit = ~p; + if (c[0] == false_lit) + c[0] = c[1], c[1] = false_lit; + assert(c[1] == false_lit); + i++; + + // If 0th watch is true, then clause is already satisfied. + Lit first = c[0]; + Watcher w = Watcher(cr, first); + if (first != blocker && value(first) == l_True) + { + + *j++ = w; + continue; + } +#ifdef INCREMENTAL + if (incremental) + { // ----------------- INCREMENTAL MODE + int choosenPos = -1; + for (int k = 2; k < c.size(); k++) + { + + if (value(c[k]) != l_False) + { + if (decisionLevel() > assumptions.size()) + { + choosenPos = k; + break; + } + else + { + choosenPos = k; + + if (value(c[k]) == l_True || !isSelector(var(c[k]))) + { + break; + } + } + } + } + if (choosenPos != -1) + { + c[1] = c[choosenPos]; + c[choosenPos] = false_lit; + watches[~c[1]].push(w); + goto NextClause; + } + } + else + { // ----------------- DEFAULT MODE (NOT INCREMENTAL) +#endif + for (int k = 2; k < c.size(); k++) + { + + if (value(c[k]) != l_False) + { + c[1] = c[k]; + c[k] = false_lit; + watches[~c[1]].push(w); + goto NextClause; + } + } +#ifdef INCREMENTAL + } +#endif + // Did not find watch -- clause is unit under assignment: + *j++ = w; + if (value(first) == l_False) + { + confl = cr; + qhead = trail.size(); + // Copy the remaining watches: + while (i < end) *j++ = *i++; + } + else + { + uncheckedEnqueue(first, cr); + } + NextClause:; + } + ws.shrink(i - j); + + // unaryWatches "propagation" + if (useUnaryWatched && confl == CRef_Undef) + { + confl = propagateUnaryWatches(p); + } + } + + propagations += num_props; + simpDB_props -= num_props; + + return confl; +} + +/*_________________________________________________________________________________________________ +| +| propagateUnaryWatches : [Lit] -> [Clause*] +| +| Description: +| Propagates unary watches of Lit p, return a conflict +| otherwise CRef_Undef +| +|________________________________________________________________________________________________@*/ + +CRef Solver::propagateUnaryWatches(Lit p) +{ + CRef confl = CRef_Undef; + Watcher * i, *j, *end; + vec& ws = unaryWatches[p]; + for (i = j = (Watcher*)ws, end = i + ws.size(); i != end;) + { + // Try to avoid inspecting the clause: + Lit blocker = i->blocker; + if (value(blocker) == l_True) + { + *j++ = *i++; + continue; + } + + // Make sure the false literal is data[1]: + CRef cr = i->cref; + Clause& c = ca[cr]; + assert(c.getOneWatched()); + Lit false_lit = ~p; + assert(c[0] == false_lit); // this is unary watch... No other choice if "propagated" + // if (c[0] == false_lit) + // c[0] = c[1], c[1] = false_lit; + // assert(c[1] == false_lit); + i++; + Watcher w = Watcher(cr, c[0]); + for (int k = 1; k < c.size(); k++) + { + if (value(c[k]) != l_False) + { + c[0] = c[k]; + c[k] = false_lit; + unaryWatches[~c[0]].push(w); + goto NextClauseUnary; + } + } + + // Did not find watch -- clause is empty under assignment: + *j++ = w; + + confl = cr; + qhead = trail.size(); + // Copy the remaining watches: + while (i < end) *j++ = *i++; + + // We can add it now to the set of clauses when backtracking + // printf("*"); + if (promoteOneWatchedClause) + { + stats[nbPromoted]++; + // Let's find the two biggest decision levels in the clause s.t. it will correctly be propagated when we'll + // backtrack + int maxlevel = -1; + int index = -1; + for (int k = 1; k < c.size(); k++) + { + assert(value(c[k]) == l_False); + assert(level(var(c[k])) <= level(var(c[0]))); + if (level(var(c[k])) > maxlevel) + { + index = k; + maxlevel = level(var(c[k])); + } + } + detachClausePurgatory(cr, true); // TODO: check that the cleanAll is ok (use ",true" otherwise) + assert(index != -1); + Lit tmp = c[1]; + c[1] = c[index], c[index] = tmp; + attachClause(cr); + // TODO used in function ParallelSolver::reportProgressArrayImports + // Override :-( + // goodImportsFromThreads[ca[cr].importedFrom()]++; + ca[cr].setOneWatched(false); + ca[cr].setExported(2); + } + NextClauseUnary:; + } + ws.shrink(i - j); + + return confl; +} + +/*_________________________________________________________________________________________________ +| +| reduceDB : () -> [void] +| +| Description: +| Remove half of the learnt clauses, minus the clauses locked by the current assignment. Locked +| clauses are clauses that are reason to some assignment. Binary clauses are never removed. +|________________________________________________________________________________________________@*/ + +void Solver::reduceDB() +{ + + int i, j; + stats[nbReduceDB]++; + if (chanseokStrategy) + sort(learnts, reduceDBAct_lt(ca)); + else + { + sort(learnts, reduceDB_lt(ca)); + + // We have a lot of "good" clauses, it is difficult to compare them. Keep more ! + if (ca[learnts[learnts.size() / RATIOREMOVECLAUSES]].lbd() <= 3) + nbclausesbeforereduce += specialIncReduceDB; + // Useless :-) + if (ca[learnts.last()].lbd() <= 5) + nbclausesbeforereduce += specialIncReduceDB; + } + // Don't delete binary or locked clauses. From the rest, delete clauses from the first half + // Keep clauses which seem to be usefull (their lbd was reduce during this sequence) + + int limit = learnts.size() / 2; + + for (i = j = 0; i < learnts.size(); i++) + { + Clause& c = ca[learnts[i]]; + if (c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) + { + removeClause(learnts[i]); + stats[nbRemovedClauses]++; + } + else + { + if (!c.canBeDel()) + limit++; // we keep c, so we can delete an other clause + c.setCanBeDel(true); // At the next step, c can be delete + learnts[j++] = learnts[i]; + } + } + learnts.shrink(i - j); + checkGarbage(); +} + +void Solver::removeSatisfied(vec& cs) +{ + + int i, j; + for (i = j = 0; i < cs.size(); i++) + { + Clause& c = ca[cs[i]]; + + if (satisfied(c)) + if (c.getOneWatched()) + removeClause(cs[i], true); + else + removeClause(cs[i]); + else + cs[j++] = cs[i]; + } + cs.shrink(i - j); +} + +void Solver::rebuildOrderHeap() +{ + vec vs; + for (Var v = 0; v < nVars(); v++) + if (decision[v] && value(v) == l_Undef) + vs.push(v); + order_heap.build(vs); +} + +/*_________________________________________________________________________________________________ +| +| simplify : [void] -> [bool] +| +| Description: +| Simplify the clause database according to the current top-level assigment. Currently, the only +| thing done here is the removal of satisfied clauses, but more things can be put here. +|________________________________________________________________________________________________@*/ +bool Solver::simplify() +{ + assert(decisionLevel() == 0); + + if (!ok) + return ok = false; + else + { + CRef cr = propagate(); + if (cr != CRef_Undef) + { + return ok = false; + } + } + + if (nAssigns() == simpDB_assigns || (simpDB_props > 0)) + return true; + + // Remove satisfied clauses: + removeSatisfied(learnts); + removeSatisfied(permanentLearnts); + removeSatisfied(unaryWatchedClauses); + if (remove_satisfied) // Can be turned off. + removeSatisfied(clauses); + checkGarbage(); + rebuildOrderHeap(); + + simpDB_assigns = nAssigns(); + simpDB_props = stats[clauses_literals] + + stats[learnts_literals]; // (shouldn't depend on stats really, but it will do for now) + + return true; +} + +void Solver::adaptSolver() +{ + bool adjusted = false; + bool reinit = false; + printf("c\nc Try to adapt solver strategies\nc \n"); + /* printf("c Adjusting solver for the SAT Race 2015 (alpha feature)\n"); + printf("c key successive Conflicts : %" PRIu64"\n",stats[noDecisionConflict]); + printf("c nb unary clauses learnt : %" PRIu64"\n",stats[nbUn]); + printf("c key avg dec per conflicts : %.2f\n", (float)decisions / (float)conflicts);*/ + float decpc = (float)decisions / (float)conflicts; + if (decpc <= 1.2) + { + chanseokStrategy = true; + coLBDBound = 4; + glureduce = true; + adjusted = true; + printf("c Adjusting for low decision levels.\n"); + reinit = true; + firstReduceDB = 2000; + nbclausesbeforereduce = firstReduceDB; + curRestart = (conflicts / nbclausesbeforereduce) + 1; + incReduceDB = 0; + } + if (stats[noDecisionConflict] < 30000) + { + luby_restart = true; + luby_restart_factor = 100; + + var_decay = 0.999; + max_var_decay = 0.999; + adjusted = true; + printf("c Adjusting for low successive conflicts.\n"); + } + if (stats[noDecisionConflict] > 54400) + { + printf("c Adjusting for high successive conflicts.\n"); + chanseokStrategy = true; + glureduce = true; + coLBDBound = 3; + firstReduceDB = 30000; + var_decay = 0.99; + max_var_decay = 0.99; + randomize_on_restarts = 1; + adjusted = true; + } + if (stats[nbDL2] - stats[nbBin] > 20000) + { + var_decay = 0.91; + max_var_decay = 0.91; + adjusted = true; + printf("c Adjusting for a very large number of true glue clauses found.\n"); + } + if (!adjusted) + { + printf("c Nothing extreme in this problem, continue with glucose default strategies.\n"); + } + printf("c\n"); + if (adjusted) + { // Let's reinitialize the glucose restart strategy counters + lbdQueue.fastclear(); + sumLBD = 0; + conflictsRestarts = 0; + } + + if (chanseokStrategy && adjusted) + { + int moved = 0; + int i, j; + for (i = j = 0; i < learnts.size(); i++) + { + Clause& c = ca[learnts[i]]; + if (c.lbd() <= coLBDBound) + { + permanentLearnts.push(learnts[i]); + moved++; + } + else + { + learnts[j++] = learnts[i]; + } + } + learnts.shrink(i - j); + printf("c Activating Chanseok Strategy: moved %d clauses to the permanent set.\n", moved); + } + + if (reinit) + { + assert(decisionLevel() == 0); + for (int i = 0; i < learnts.size(); i++) + { + removeClause(learnts[i]); + } + learnts.shrink(learnts.size()); + checkGarbage(); + /* + order_heap.clear(); + for(int i=0;i [lbool] +| +| Description: +| Search for a model the specified number of conflicts. +| NOTE! Use negative value for 'nof_conflicts' indicate infinity. +| +| Output: +| 'l_True' if a partial assigment that is consistent with respect to the clauseset is found. If +| all variables are decision variables, this means that the clause set is satisfiable. 'l_False' +| if the clause set is unsatisfiable. 'l_Undef' if the bound on number of conflicts is reached. +|________________________________________________________________________________________________@*/ +lbool Solver::search(int nof_conflicts) +{ + assert(ok); + int backtrack_level; + int conflictC = 0; + vec learnt_clause, selectors; + unsigned int nblevels, szWithoutSelectors = 0; + bool blocked = false; + bool aDecisionWasMade = false; + + starts++; + for (;;) + { + if (decisionLevel() == 0) + { // We import clauses FIXME: ensure that we will import clauses enventually (restart after some point) + parallelImportUnaryClauses(); + + if (parallelImportClauses()) + return l_False; + } + CRef confl = propagate(); + + if (confl != CRef_Undef) + { + newDescent = false; + if (parallelJobIsFinished()) + return l_Undef; + + if (!aDecisionWasMade) + stats[noDecisionConflict]++; + aDecisionWasMade = false; + + stats[sumDecisionLevels] += decisionLevel(); + stats[sumTrail] += trail.size(); + // CONFLICT + conflicts++; + conflictC++; + conflictsRestarts++; + if (conflicts % 5000 == 0 && var_decay < max_var_decay) + var_decay += 0.01; + + if (verbosity >= 1 && starts > 0 && conflicts % verbEveryConflicts == 0) + { + printf("c | %8d %7d %5d | %7d %8d %8d | %5d %8d %6d %8d | %6.3f %% |\n", (int)starts, + (int)stats[nbstopsrestarts], (int)(conflicts / starts), + (int)stats[dec_vars] - (trail_lim.size() == 0 ? trail.size() : trail_lim[0]), nClauses(), + (int)stats[clauses_literals], (int)stats[nbReduceDB], nLearnts(), (int)stats[nbDL2], + (int)stats[nbRemovedClauses], progressEstimate() * 100); + } + if (decisionLevel() == 0) + { + return l_False; + } + if (adaptStrategies && conflicts == 100000) + { + cancelUntil(0); + adaptSolver(); + adaptStrategies = false; + return l_Undef; + } + + trailQueue.push(trail.size()); + // BLOCK RESTART (CP 2012 paper) + if (conflictsRestarts > LOWER_BOUND_FOR_BLOCKING_RESTART && lbdQueue.isvalid() && + trail.size() > R * trailQueue.getavg()) + { + lbdQueue.fastclear(); + stats[nbstopsrestarts]++; + if (!blocked) + { + stats[lastblockatrestart] = starts; + stats[nbstopsrestartssame]++; + blocked = true; + } + } + + learnt_clause.clear(); + selectors.clear(); + + analyze(confl, learnt_clause, selectors, backtrack_level, nblevels, szWithoutSelectors); + + lbdQueue.push(nblevels); + sumLBD += nblevels; + + cancelUntil(backtrack_level); + + if (certifiedUNSAT) + { + if (vbyte) + { + write_char('a'); + for (int i = 0; i < learnt_clause.size(); i++) + write_lit(2 * (var(learnt_clause[i]) + 1) + sign(learnt_clause[i])); + write_lit(0); + } + else + { + for (int i = 0; i < learnt_clause.size(); i++) + fprintf(certifiedOutput, "%i ", + (var(learnt_clause[i]) + 1) * (-2 * sign(learnt_clause[i]) + 1)); + fprintf(certifiedOutput, "0\n"); + } + } + + if (learnt_clause.size() == 1) + { + uncheckedEnqueue(learnt_clause[0]); + stats[nbUn]++; + parallelExportUnaryClause(learnt_clause[0]); + } + else + { + CRef cr; + if (chanseokStrategy && nblevels <= coLBDBound) + { + cr = ca.alloc(learnt_clause, false); + permanentLearnts.push(cr); + stats[nbPermanentLearnts]++; + } + else + { + cr = ca.alloc(learnt_clause, true); + ca[cr].setLBD(nblevels); + ca[cr].setOneWatched(false); + learnts.push(cr); + claBumpActivity(ca[cr]); + } +#ifdef INCREMENTAL + ca[cr].setSizeWithoutSelectors(szWithoutSelectors); +#endif + if (nblevels <= 2) + { + stats[nbDL2]++; + } // stats + if (ca[cr].size() == 2) + stats[nbBin]++; // stats + attachClause(cr); + lastLearntClause = cr; // Use in multithread (to hard to put inside ParallelSolver) + parallelExportClauseDuringSearch(ca[cr]); + uncheckedEnqueue(learnt_clause[0], cr); + } + varDecayActivity(); + claDecayActivity(); + } + else + { + // Our dynamic restart, see the SAT09 competition compagnion paper + if ((luby_restart && nof_conflicts <= conflictC) || + (!luby_restart && (lbdQueue.isvalid() && ((lbdQueue.getavg() * K) > (sumLBD / conflictsRestarts))))) + { + lbdQueue.fastclear(); + progress_estimate = progressEstimate(); + int bt = 0; +#ifdef INCREMENTAL + if (incremental) // DO NOT BACKTRACK UNTIL 0.. USELESS + bt = (decisionLevel() < assumptions.size()) ? decisionLevel() : assumptions.size(); +#endif + newDescent = true; + + if (randomize_on_restarts || fixed_randomize_on_restarts) + { + randomDescentAssignments = (uint32_t)drand(random_seed); + } + + cancelUntil(bt); + return l_Undef; + } + + // Simplify the set of problem clauses: + if (decisionLevel() == 0 && !simplify()) + { + return l_False; + } + // Perform clause database reduction ! + if ((chanseokStrategy && !glureduce && learnts.size() > firstReduceDB) || + (glureduce && conflicts >= ((unsigned int)curRestart * nbclausesbeforereduce))) + { + + if (learnts.size() > 0) + { + curRestart = (conflicts / nbclausesbeforereduce) + 1; + reduceDB(); + if (!panicModeIsEnabled()) + nbclausesbeforereduce += incReduceDB; + } + } + + lastLearntClause = CRef_Undef; + Lit next = lit_Undef; + while (decisionLevel() < assumptions.size()) + { + // Perform user provided assumption: + Lit p = assumptions[decisionLevel()]; + if (value(p) == l_True) + { + // Dummy decision level: + newDecisionLevel(); + } + else if (value(p) == l_False) + { + analyzeFinal(~p, conflict); + return l_False; + } + else + { + next = p; + break; + } + } + + if (next == lit_Undef) + { + // New variable decision: + decisions++; + next = pickBranchLit(); + if (next == lit_Undef) + { + printf("c last restart ## conflicts : %d %d \n", conflictC, decisionLevel()); + // Model found: + return l_True; + } + } + + // Increase decision level and enqueue 'next' + aDecisionWasMade = true; + newDecisionLevel(); + uncheckedEnqueue(next); + } + } +} + +double Solver::progressEstimate() const +{ + double progress = 0; + double F = 1.0 / nVars(); + + for (int i = 0; i <= decisionLevel(); i++) + { + int beg = i == 0 ? 0 : trail_lim[i - 1]; + int end = i == decisionLevel() ? trail.size() : trail_lim[i]; + progress += pow(F, i) * (end - beg); + } + + return progress / nVars(); +} + +void Solver::printIncrementalStats() +{ + + printf("c---------- Glucose Stats -------------------------\n"); + printf("c restarts : %" PRIu64 "\n", starts); + printf("c nb ReduceDB : %" PRIu64 "\n", stats[nbReduceDB]); + printf("c nb removed Clauses : %" PRIu64 "\n", stats[nbRemovedClauses]); + printf("c nb learnts DL2 : %" PRIu64 "\n", stats[nbDL2]); + printf("c nb learnts size 2 : %" PRIu64 "\n", stats[nbBin]); + printf("c nb learnts size 1 : %" PRIu64 "\n", stats[nbUn]); + + printf("c conflicts : %" PRIu64 "\n", conflicts); + printf("c decisions : %" PRIu64 "\n", decisions); + printf("c propagations : %" PRIu64 "\n", propagations); + + printf("\nc SAT Calls : %d in %g seconds\n", nbSatCalls, totalTime4Sat); + printf("c UNSAT Calls : %d in %g seconds\n", nbUnsatCalls, totalTime4Unsat); + + printf("c--------------------------------------------------\n"); +} + +double Solver::luby(double y, int x) +{ + + // Find the finite subsequence that contains index 'x', and the + // size of that subsequence: + int size, seq; + for (size = 1, seq = 0; size < x + 1; seq++, size = 2 * size + 1); + + while (size - 1 != x) + { + size = (size - 1) >> 1; + seq--; + x = x % size; + } + + return pow(y, seq); +} + +// NOTE: assumptions passed in member-variable 'assumptions'. + +lbool Solver::solve_(bool do_simp, bool turn_off_simp) // Parameters are useless in core but useful for SimpSolver.... +{ + + if (incremental && certifiedUNSAT) + { + printf("Can not use incremental and certified unsat in the same time\n"); + exit(-1); + } + + model.clear(); + conflict.clear(); + if (!ok) + return l_False; + double curTime = cpuTime(); + + solves++; + + lbool status = l_Undef; + if (!incremental && verbosity >= 1) + { + printf("c ========================================[ MAGIC CONSTANTS " + "]==============================================\n"); + printf("c | Constants are supposed to work well together :-) " + " |\n"); + printf("c | however, if you find better choices, please let us known... " + " |\n"); + printf("c " + "|------------------------------------------------------------------------------------------------------" + "-|\n"); + if (adaptStrategies) + { + printf("c | Adapt dynamically the solver after 100000 conflicts (restarts, reduction strategies...) " + " |\n"); + printf("c " + "|--------------------------------------------------------------------------------------------------" + "-----|\n"); + } + printf("c | | | " + " |\n"); + printf("c | - Restarts: | - Reduce Clause DB: | - Minimize Asserting: " + " |\n"); + if (chanseokStrategy) + { + printf("c | * LBD Queue : %6d | chanseok Strategy | * size < %3d " + " |\n", + lbdQueue.maxSize(), lbSizeMinimizingClause); + printf("c | * Trail Queue : %6d | * learnts size : %6d | * lbd < %3d " + " |\n", + trailQueue.maxSize(), firstReduceDB, lbLBDMinimizingClause); + printf("c | * K : %6.2f | * Bound LBD : %6d | " + " |\n", + K, coLBDBound); + printf("c | * R : %6.2f | * Protected : (lbd)< %2d | " + " |\n", + R, lbLBDFrozenClause); + } + else + { + printf("c | * LBD Queue : %6d | * First : %6d | * size < %3d " + " |\n", + lbdQueue.maxSize(), nbclausesbeforereduce, lbSizeMinimizingClause); + printf("c | * Trail Queue : %6d | * Inc : %6d | * lbd < %3d " + " |\n", + trailQueue.maxSize(), incReduceDB, lbLBDMinimizingClause); + printf("c | * K : %6.2f | * Special : %6d | " + " |\n", + K, specialIncReduceDB); + printf("c | * R : %6.2f | * Protected : (lbd)< %2d | " + " |\n", + R, lbLBDFrozenClause); + } + printf("c | | | " + " |\n"); + printf("c ==================================[ Search Statistics (every %6d conflicts) " + "]=========================\n", + verbEveryConflicts); + printf("c | " + " |\n"); + + printf("c | RESTARTS | ORIGINAL | LEARNT | " + "Progress |\n"); + printf("c | NB Blocked Avg Cfc | Vars Clauses Literals | Red Learnts LBD2 Removed | " + " |\n"); + printf("c " + "=======================================================================================================" + "==\n"); + } + + // Search: + int curr_restarts = 0; + while (status == l_Undef) + { + status = search(luby_restart ? luby(restart_inc, curr_restarts) * luby_restart_factor : + 0); // the parameter is useless in glucose, kept to allow modifications + + if (!withinBudget()) + break; + curr_restarts++; + } + + if (!incremental && verbosity >= 1) + printf("c " + "=======================================================================================================" + "==\n"); + + if (certifiedUNSAT) + { // Want certified output + if (status == l_False) + { + if (vbyte) + { + write_char('a'); + write_lit(0); + } + else + { + fprintf(certifiedOutput, "0\n"); + } + } + fclose(certifiedOutput); + } + + if (status == l_True) + { + // Extend & copy model: + model.growTo(nVars()); + for (int i = 0; i < nVars(); i++) model[i] = value(i); + } + else if (status == l_False && conflict.size() == 0) + ok = false; + + cancelUntil(0); + + double finalTime = cpuTime(); + if (status == l_True) + { + nbSatCalls++; + totalTime4Sat += (finalTime - curTime); + } + if (status == l_False) + { + nbUnsatCalls++; + totalTime4Unsat += (finalTime - curTime); + } + + return status; +} + +//================================================================================================= +// Writing CNF to DIMACS: +// +// FIXME: this needs to be rewritten completely. + +static Var mapVar(Var x, vec& map, Var& max) +{ + if (map.size() <= x || map[x] == -1) + { + map.growTo(x + 1, -1); + map[x] = max++; + } + return map[x]; +} + +void Solver::toDimacs(FILE* f, Clause& c, vec& map, Var& max) +{ + if (satisfied(c)) + return; + + for (int i = 0; i < c.size(); i++) + if (value(c[i]) != l_False) + fprintf(f, "%s%d ", sign(c[i]) ? "-" : "", mapVar(var(c[i]), map, max) + 1); + fprintf(f, "0\n"); +} + +void Solver::toDimacs(const char* file, const vec& assumps) +{ + FILE* f = fopen(file, "wr"); + if (f == NULL) + fprintf(stderr, "could not open file %s\n", file), exit(1); + toDimacs(f, assumps); + fclose(f); +} + +void Solver::toDimacs(FILE* f, const vec& assumps) +{ + // Handle case when solver is in contradictory state: + if (!ok) + { + fprintf(f, "p cnf 1 2\n1 0\n-1 0\n"); + return; + } + + vec map; + Var max = 0; + + // Cannot use removeClauses here because it is not safe + // to deallocate them at this point. Could be improved. + int cnt = 0; + for (int i = 0; i < clauses.size(); i++) + if (!satisfied(ca[clauses[i]])) + cnt++; + + for (int i = 0; i < clauses.size(); i++) + if (!satisfied(ca[clauses[i]])) + { + Clause& c = ca[clauses[i]]; + for (int j = 0; j < c.size(); j++) + if (value(c[j]) != l_False) + mapVar(var(c[j]), map, max); + } + + // Assumptions are added as unit clauses: + cnt += assumptions.size(); + + fprintf(f, "p cnf %d %d\n", max, cnt); + + for (int i = 0; i < assumptions.size(); i++) + { + assert(value(assumptions[i]) != l_False); + fprintf(f, "%s%d 0\n", sign(assumptions[i]) ? "-" : "", mapVar(var(assumptions[i]), map, max) + 1); + } + + for (int i = 0; i < clauses.size(); i++) toDimacs(f, ca[clauses[i]], map, max); + + if (verbosity > 0) + printf("Wrote %d clauses with %d variables.\n", cnt, max); +} + +//================================================================================================= +// Garbage Collection methods: + +void Solver::relocAll(ClauseAllocator& to) +{ + // All watchers: + // for (int i = 0; i < watches.size(); i++) + watches.cleanAll(); + watchesBin.cleanAll(); + unaryWatches.cleanAll(); + for (int v = 0; v < nVars(); v++) + for (int s = 0; s < 2; s++) + { + Lit p = mkLit(v, s); + // printf(" >>> RELOCING: %s%d\n", sign(p)?"-":"", var(p)+1); + vec& ws = watches[p]; + for (int j = 0; j < ws.size(); j++) ca.reloc(ws[j].cref, to); + vec& ws2 = watchesBin[p]; + for (int j = 0; j < ws2.size(); j++) ca.reloc(ws2[j].cref, to); + vec& ws3 = unaryWatches[p]; + for (int j = 0; j < ws3.size(); j++) ca.reloc(ws3[j].cref, to); + } + + // All reasons: + // + for (int i = 0; i < trail.size(); i++) + { + Var v = var(trail[i]); + + if (reason(v) != CRef_Undef && (ca[reason(v)].reloced() || locked(ca[reason(v)]))) + ca.reloc(vardata[v].reason, to); + } + + // All learnt: + // + for (int i = 0; i < learnts.size(); i++) ca.reloc(learnts[i], to); + + for (int i = 0; i < permanentLearnts.size(); i++) ca.reloc(permanentLearnts[i], to); + + // All original: + // + for (int i = 0; i < clauses.size(); i++) ca.reloc(clauses[i], to); + + for (int i = 0; i < unaryWatchedClauses.size(); i++) ca.reloc(unaryWatchedClauses[i], to); +} + +void Solver::garbageCollect() +{ + // Initialize the next region to a size corresponding to the estimated utilization degree. This + // is not precise but should avoid some unnecessary reallocations for the new region: + ClauseAllocator to(ca.size() - ca.wasted()); + relocAll(to); + if (verbosity >= 2) + printf("| Garbage collection: %12d bytes => %12d bytes |\n", + ca.size() * ClauseAllocator::Unit_Size, to.size() * ClauseAllocator::Unit_Size); + to.moveTo(ca); +} + +//-------------------------------------------------------------- +// Functions related to MultiThread. +// Useless in case of single core solver (aka original glucose) +// Keep them empty if you just use core solver +//-------------------------------------------------------------- + +bool Solver::panicModeIsEnabled() +{ + return false; +} + +void Solver::parallelImportUnaryClauses() {} + +bool Solver::parallelImportClauses() +{ + return false; +} + +void Solver::parallelExportUnaryClause(Lit p) {} + +void Solver::parallelExportClauseDuringSearch(Clause& c) {} + +bool Solver::parallelJobIsFinished() +{ + // Parallel: another job has finished let's quit + return false; +} + +void Solver::parallelImportClauseDuringConflictAnalysis(Clause& c, CRef confl) {} diff --git a/vendors/mugen/glucose-syrup-4.1/core/Solver.h b/vendors/mugen/glucose-syrup-4.1/core/Solver.h new file mode 100644 index 0000000000..c42f0df673 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/core/Solver.h @@ -0,0 +1,870 @@ +/***************************************************************************************[Solver.h] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#ifndef Glucose_Solver_h +#define Glucose_Solver_h + +#include "core/BoundedQueue.h" +#include "core/Constants.h" +#include "core/SolverStats.h" +#include "core/SolverTypes.h" +#include "mtl/Alg.h" +#include "mtl/Clone.h" +#include "mtl/Heap.h" +#include "utils/Options.h" + +namespace Glucose +{ +// Core stats + +enum CoreStats +{ + sumResSeen, + sumRes, + sumTrail, + nbPromoted, + originalClausesSeen, + sumDecisionLevels, + nbPermanentLearnts, + nbRemovedClauses, + nbRemovedUnaryWatchedClauses, + nbReducedClauses, + nbDL2, + nbBin, + nbUn, + nbReduceDB, + rnd_decisions, + nbstopsrestarts, + nbstopsrestartssame, + lastblockatrestart, + dec_vars, + clauses_literals, + learnts_literals, + max_literals, + tot_literals, + noDecisionConflict +}; + +#define coreStatsSize 24 +//================================================================================================= +// Solver -- the main class: + +class Solver : public Clone +{ + + friend class SolverConfiguration; + + public: + // Constructor/Destructor: + // + Solver(); + Solver(const Solver& s); + + virtual ~Solver(); + + /** + * Clone function + */ + virtual Clone* clone() const + { + return new Solver(*this); + } + + // Problem specification: + // + virtual Var newVar(bool polarity = true, + bool dvar = true); // Add a new variable with parameters specifying variable mode. + bool addClause(const vec& ps); // Add a clause to the solver. + bool addEmptyClause(); // Add the empty clause, making the solver contradictory. + bool addClause(Lit p); // Add a unit clause to the solver. + bool addClause(Lit p, Lit q); // Add a binary clause to the solver. + bool addClause(Lit p, Lit q, Lit r); // Add a ternary clause to the solver. + virtual bool addClause_(vec& ps); // Add a clause to the solver without making superflous internal copy. Will + // change the passed vector 'ps'. + // Solving: + // + bool simplify(); // Removes already satisfied clauses. + bool solve(const vec& assumps); // Search for a model that respects a given set of assumptions. + lbool solveLimited(const vec& assumps); // Search for a model that respects a given set of assumptions (With + // resource constraints). + bool solve(); // Search without assumptions. + bool solve(Lit p); // Search for a model that respects a single assumption. + bool solve(Lit p, Lit q); // Search for a model that respects two assumptions. + bool solve(Lit p, Lit q, Lit r); // Search for a model that respects three assumptions. + bool okay() const; // FALSE means solver is in a conflicting state + + // Convenience versions of 'toDimacs()': + void toDimacs(FILE* f, const vec& assumps); // Write CNF to file in DIMACS-format. + void toDimacs(const char* file, const vec& assumps); + void toDimacs(FILE* f, Clause& c, vec& map, Var& max); + void toDimacs(const char* file); + void toDimacs(const char* file, Lit p); + void toDimacs(const char* file, Lit p, Lit q); + void toDimacs(const char* file, Lit p, Lit q, Lit r); + + // Display clauses and literals + void printLit(Lit l); + void printClause(CRef c); + void printInitialClause(CRef c); + + // Variable mode: + // + void setPolarity(Var v, bool b); // Declare which polarity the decision heuristic should use for a variable. + // Requires mode 'polarity_user'. + void setDecisionVar(Var v, + bool b); // Declare if a variable should be eligible for selection in the decision heuristic. + + // Read state: + // + lbool value(Var x) const; // The current value of a variable. + lbool value(Lit p) const; // The current value of a literal. + lbool modelValue( + Var x) const; // The value of a variable in the last model. The last call to solve must have been satisfiable. + lbool modelValue( + Lit p) const; // The value of a literal in the last model. The last call to solve must have been satisfiable. + int nAssigns() const; // The current number of assigned literals. + int nClauses() const; // The current number of original clauses. + int nLearnts() const; // The current number of learnt clauses. + int nVars() const; // The current number of variables. + int nFreeVars(); + + inline char valuePhase(Var v) + { + return polarity[v]; + } + + // Incremental mode + void setIncrementalMode(); + void initNbInitialVars(int nb); + void printIncrementalStats(); + bool isIncremental(); + // Resource contraints: + // + void setConfBudget(int64_t x); + void setPropBudget(int64_t x); + void budgetOff(); + void interrupt(); // Trigger a (potentially asynchronous) interruption of the solver. + void clearInterrupt(); // Clear interrupt indicator flag. + + // Memory managment: + // + virtual void garbageCollect(); + void checkGarbage(double gf); + void checkGarbage(); + + // Extra results: (read-only member variable) + // + vec model; // If problem is satisfiable, this vector contains the model (if any). + vec conflict; // If problem is unsatisfiable (possibly under assumptions), + // this vector represent the final conflict clause expressed in the assumptions. + + // Mode of operation: + // + int verbosity; + int verbEveryConflicts; + int showModel; + + // Constants For restarts + double K; + double R; + double sizeLBDQueue; + double sizeTrailQueue; + + // Constants for reduce DB + int firstReduceDB; + int incReduceDB; + int specialIncReduceDB; + unsigned int lbLBDFrozenClause; + bool chanseokStrategy; + int coLBDBound; // Keep all learnts with lbd<=coLBDBound + // Constant for reducing clause + int lbSizeMinimizingClause; + unsigned int lbLBDMinimizingClause; + + // Constant for heuristic + double var_decay; + double max_var_decay; + double clause_decay; + double random_var_freq; + double random_seed; + int ccmin_mode; // Controls conflict clause minimization (0=none, 1=basic, 2=deep). + int phase_saving; // Controls the level of phase saving (0=none, 1=limited, 2=full). + bool rnd_pol; // Use random polarities for branching heuristics. + bool rnd_init_act; // Initialize variable activities with a small random value. + bool randomizeFirstDescent; // the first decisions (until first cnflict) are made randomly + // Useful for syrup! + + // Constant for Memory managment + double garbage_frac; // The fraction of wasted memory allowed before a garbage collection is triggered. + + // Certified UNSAT ( Thanks to Marijn Heule + // New in 2016 : proof in DRAT format, possibility to use binary output + FILE* certifiedOutput; + bool certifiedUNSAT; + bool vbyte; + + void write_char(unsigned char c); + void write_lit(int n); + + // Panic mode. + // Save memory + uint32_t panicModeLastRemoved, panicModeLastRemovedShared; + + bool useUnaryWatched; // Enable unary watched literals + bool promoteOneWatchedClause; // One watched clauses are promotted to two watched clauses if found empty + + // Functions useful for multithread solving + // Useless in the sequential case + // Overide in ParallelSolver + virtual void parallelImportClauseDuringConflictAnalysis(Clause& c, CRef confl); + virtual bool parallelImportClauses(); // true if the empty clause was received + virtual void parallelImportUnaryClauses(); + virtual void parallelExportUnaryClause(Lit p); + virtual void parallelExportClauseDuringSearch(Clause& c); + virtual bool parallelJobIsFinished(); + virtual bool panicModeIsEnabled(); + + double luby(double y, int x); + + // Statistics + vec stats; + + // Important stats completely related to search. Keep here + uint64_t solves, starts, decisions, propagations, conflicts, conflictsRestarts; + + protected: + long curRestart; + + // Alpha variables + bool glureduce; + uint32_t restart_inc; + bool luby_restart; + bool adaptStrategies; + uint32_t luby_restart_factor; + bool randomize_on_restarts, fixed_randomize_on_restarts, newDescent; + uint32_t randomDescentAssignments; + bool forceUnsatOnNewDescent; + // Helper structures: + // + struct VarData + { + CRef reason; + int level; + }; + static inline VarData mkVarData(CRef cr, int l) + { + VarData d = {cr, l}; + return d; + } + + struct Watcher + { + CRef cref; + Lit blocker; + Watcher(CRef cr, Lit p) : cref(cr), blocker(p) {} + bool operator==(const Watcher& w) const + { + return cref == w.cref; + } + bool operator!=(const Watcher& w) const + { + return cref != w.cref; + } + /* Watcher &operator=(Watcher w) { + this->cref = w.cref; + this->blocker = w.blocker; + return *this; + } + */ + }; + + struct WatcherDeleted + { + const ClauseAllocator& ca; + WatcherDeleted(const ClauseAllocator& _ca) : ca(_ca) {} + bool operator()(const Watcher& w) const + { + return ca[w.cref].mark() == 1; + } + }; + + struct VarOrderLt + { + const vec& activity; + bool operator()(Var x, Var y) const + { + return activity[x] > activity[y]; + } + VarOrderLt(const vec& act) : activity(act) {} + }; + + // Solver state: + // + int lastIndexRed; + bool ok; // If FALSE, the constraints are already unsatisfiable. No part of the solver state may be used! + double cla_inc; // Amount to bump next clause with. + vec activity; // A heuristic measurement of the activity of a variable. + double var_inc; // Amount to bump next variable with. + OccLists, WatcherDeleted> + watches; // 'watches[lit]' is a list of constraints watching 'lit' (will go there if literal becomes true). + OccLists, WatcherDeleted> + watchesBin; // 'watches[lit]' is a list of constraints watching 'lit' (will go there if literal becomes true). + OccLists, WatcherDeleted> + unaryWatches; // Unary watch scheme (clauses are seen when they become empty + vec clauses; // List of problem clauses. + vec learnts; // List of learnt clauses. + vec permanentLearnts; // The list of learnts clauses kept permanently + vec unaryWatchedClauses; // List of imported clauses (after the purgatory) // TODO put inside ParallelSolver + + vec assigns; // The current assignments. + vec polarity; // The preferred polarity of each variable. + vec forceUNSAT; + void bumpForceUNSAT(Lit q); // Handles the forces + + vec decision; // Declares if a variable is eligible for selection in the decision heuristic. + vec trail; // Assignment stack; stores all assigments made in the order they were made. + vec nbpos; + vec trail_lim; // Separator indices for different decision levels in 'trail'. + vec vardata; // Stores reason and level for each variable. + int qhead; // Head of queue (as index into the trail -- no more explicit propagation queue in MiniSat). + int simpDB_assigns; // Number of top-level assignments since last execution of 'simplify()'. + int64_t simpDB_props; // Remaining number of propagations that must be made before next execution of 'simplify()'. + vec assumptions; // Current set of assumptions provided to solve by the user. + Heap order_heap; // A priority queue of variables ordered with respect to the variable activity. + double progress_estimate; // Set by 'search()'. + bool remove_satisfied; // Indicates whether possibly inefficient linear scan for satisfied clauses should be + // performed in 'simplify'. + vec + permDiff; // permDiff[var] contains the current conflict number... Used to count the number of LBD + + // UPDATEVARACTIVITY trick (see competition'09 companion paper) + vec lastDecisionLevel; + + ClauseAllocator ca; + + int nbclausesbeforereduce; // To know when it is time to reduce clause database + + // Used for restart strategies + bqueue trailQueue, lbdQueue; // Bounded queues for restarts. + float sumLBD; // used to compute the global average of LBD. Restarts... + int sumAssumptions; + CRef lastLearntClause; + + // Temporaries (to reduce allocation overhead). Each variable is prefixed by the method in which it is + // used, exept 'seen' wich is used in several places. + // + vec seen; + vec analyze_stack; + vec analyze_toclear; + vec add_tmp; + unsigned int MYFLAG; + + // Initial reduceDB strategy + double max_learnts; + double learntsize_adjust_confl; + int learntsize_adjust_cnt; + + // Resource contraints: + // + int64_t conflict_budget; // -1 means no budget. + int64_t propagation_budget; // -1 means no budget. + bool asynch_interrupt; + + // Variables added for incremental mode + int incremental; // Use incremental SAT Solver + int nbVarsInitialFormula; // nb VAR in formula without assumptions (incremental SAT) + double totalTime4Sat, totalTime4Unsat; + int nbSatCalls, nbUnsatCalls; + vec assumptionPositions, initialPositions; + + // Main internal methods: + // + void insertVarOrder(Var x); // Insert a variable in the decision order priority queue. + Lit pickBranchLit(); // Return the next decision variable. + void newDecisionLevel(); // Begins a new decision level. + void uncheckedEnqueue(Lit p, CRef from = CRef_Undef); // Enqueue a literal. Assumes value of literal is undefined. + bool enqueue(Lit p, CRef from = CRef_Undef); // Test if fact 'p' contradicts current state, enqueue otherwise. + CRef propagate(); // Perform unit propagation. Returns possibly conflicting clause. + CRef propagateUnaryWatches(Lit p); // Perform propagation on unary watches of p, can find only conflicts + void cancelUntil(int level); // Backtrack until a certain level. + void analyze(CRef confl, vec& out_learnt, vec& selectors, int& out_btlevel, unsigned int& nblevels, + unsigned int& szWithoutSelectors); // (bt = backtrack) + void analyzeFinal(Lit p, vec& out_conflict); // COULD THIS BE IMPLEMENTED BY THE ORDINARIY "analyze" BY SOME + // REASONABLE GENERALIZATION? + bool litRedundant(Lit p, uint32_t abstract_levels); // (helper method for 'analyze()') + lbool search(int nof_conflicts); // Search for a given number of conflicts. + virtual lbool solve_(bool do_simp = true, + bool turn_off_simp = false); // Main solve method (assumptions given in 'assumptions'). + virtual void reduceDB(); // Reduce the set of learnt clauses. + void removeSatisfied(vec& cs); // Shrink 'cs' to contain only non-satisfied clauses. + void rebuildOrderHeap(); + + void adaptSolver(); // Adapt solver strategies + + // Maintaining Variable/Clause activity: + // + void varDecayActivity(); // Decay all variables with the specified factor. Implemented by increasing the 'bump' + // value instead. + void varBumpActivity(Var v, double inc); // Increase a variable with the current 'bump' value. + void varBumpActivity(Var v); // Increase a variable with the current 'bump' value. + void claDecayActivity(); // Decay all clauses with the specified factor. Implemented by increasing the 'bump' value + // instead. + void claBumpActivity(Clause& c); // Increase a clause with the current 'bump' value. + + // Operations on clauses: + // + void attachClause(CRef cr); // Attach a clause to watcher lists. + void detachClause(CRef cr, bool strict = false); // Detach a clause to watcher lists. + void detachClausePurgatory(CRef cr, bool strict = false); + void attachClausePurgatory(CRef cr); + void removeClause(CRef cr, bool inPurgatory = false); // Detach and free a clause. + bool + locked(const Clause& c) const; // Returns TRUE if a clause is a reason for some implication in the current state. + bool satisfied(const Clause& c) const; // Returns TRUE if a clause is satisfied in the current state. + + template + unsigned int computeLBD(const T& lits, int end = -1); + void minimisationWithBinaryResolution(vec& out_learnt); + + virtual void relocAll(ClauseAllocator& to); + + // Misc: + // + int decisionLevel() const; // Gives the current decisionlevel. + uint32_t abstractLevel(Var x) const; // Used to represent an abstraction of sets of decision levels. + CRef reason(Var x) const; + int level(Var x) const; + double progressEstimate() const; // DELETE THIS ?? IT'S NOT VERY USEFUL ... + bool withinBudget() const; + inline bool isSelector(Var v) + { + return (incremental && v > nbVarsInitialFormula); + } + + // Static helpers: + // + + // Returns a random float 0 <= x < 1. Seed must never be 0. + static inline double drand(double& seed) + { + seed *= 1389796; + int q = (int)(seed / 2147483647); + seed -= (double)q * 2147483647; + return seed / 2147483647; + } + + // Returns a random integer 0 <= x < size. Seed must never be 0. + static inline int irand(double& seed, int size) + { + return (int)(drand(seed) * size); + } +}; + +//================================================================================================= +// Implementation of inline methods: + +inline CRef Solver::reason(Var x) const +{ + return vardata[x].reason; +} +inline int Solver::level(Var x) const +{ + return vardata[x].level; +} + +inline void Solver::insertVarOrder(Var x) +{ + if (!order_heap.inHeap(x) && decision[x]) + order_heap.insert(x); +} + +inline void Solver::varDecayActivity() +{ + var_inc *= (1 / var_decay); +} +inline void Solver::varBumpActivity(Var v) +{ + varBumpActivity(v, var_inc); +} +inline void Solver::varBumpActivity(Var v, double inc) +{ + if ((activity[v] += inc) > 1e100) + { + // Rescale: + for (int i = 0; i < nVars(); i++) activity[i] *= 1e-100; + var_inc *= 1e-100; + } + + // Update order_heap with respect to new activity: + if (order_heap.inHeap(v)) + order_heap.decrease(v); +} + +inline void Solver::claDecayActivity() +{ + cla_inc *= (1 / clause_decay); +} +inline void Solver::claBumpActivity(Clause& c) +{ + if ((c.activity() += cla_inc) > 1e20) + { + // Rescale: + for (int i = 0; i < learnts.size(); i++) ca[learnts[i]].activity() *= 1e-20; + cla_inc *= 1e-20; + } +} + +inline void Solver::checkGarbage(void) +{ + return checkGarbage(garbage_frac); +} +inline void Solver::checkGarbage(double gf) +{ + if (ca.wasted() > ca.size() * gf) + garbageCollect(); +} + +// NOTE: enqueue does not set the ok flag! (only public methods do) +inline bool Solver::enqueue(Lit p, CRef from) +{ + return value(p) != l_Undef ? value(p) != l_False : (uncheckedEnqueue(p, from), true); +} +inline bool Solver::addClause(const vec& ps) +{ + ps.copyTo(add_tmp); + return addClause_(add_tmp); +} +inline bool Solver::addEmptyClause() +{ + add_tmp.clear(); + return addClause_(add_tmp); +} +inline bool Solver::addClause(Lit p) +{ + add_tmp.clear(); + add_tmp.push(p); + return addClause_(add_tmp); +} +inline bool Solver::addClause(Lit p, Lit q) +{ + add_tmp.clear(); + add_tmp.push(p); + add_tmp.push(q); + return addClause_(add_tmp); +} +inline bool Solver::addClause(Lit p, Lit q, Lit r) +{ + add_tmp.clear(); + add_tmp.push(p); + add_tmp.push(q); + add_tmp.push(r); + return addClause_(add_tmp); +} +inline bool Solver::locked(const Clause& c) const +{ + if (c.size() > 2) + return value(c[0]) == l_True && reason(var(c[0])) != CRef_Undef && ca.lea(reason(var(c[0]))) == &c; + return (value(c[0]) == l_True && reason(var(c[0])) != CRef_Undef && ca.lea(reason(var(c[0]))) == &c) || + (value(c[1]) == l_True && reason(var(c[1])) != CRef_Undef && ca.lea(reason(var(c[1]))) == &c); +} +inline void Solver::newDecisionLevel() +{ + trail_lim.push(trail.size()); +} + +inline int Solver::decisionLevel() const +{ + return trail_lim.size(); +} +inline uint32_t Solver::abstractLevel(Var x) const +{ + return 1 << (level(x) & 31); +} +inline lbool Solver::value(Var x) const +{ + return assigns[x]; +} +inline lbool Solver::value(Lit p) const +{ + return assigns[var(p)] ^ sign(p); +} +inline lbool Solver::modelValue(Var x) const +{ + return model[x]; +} +inline lbool Solver::modelValue(Lit p) const +{ + return model[var(p)] ^ sign(p); +} +inline int Solver::nAssigns() const +{ + return trail.size(); +} +inline int Solver::nClauses() const +{ + return clauses.size(); +} +inline int Solver::nLearnts() const +{ + return learnts.size(); +} +inline int Solver::nVars() const +{ + return vardata.size(); +} +inline int Solver::nFreeVars() +{ + int a = stats[dec_vars]; + return (int)(a) - (trail_lim.size() == 0 ? trail.size() : trail_lim[0]); +} +inline void Solver::setPolarity(Var v, bool b) +{ + polarity[v] = b; +} +inline void Solver::setDecisionVar(Var v, bool b) +{ + if (b && !decision[v]) + stats[dec_vars]++; + else if (!b && decision[v]) + stats[dec_vars]--; + + decision[v] = b; + insertVarOrder(v); +} +inline void Solver::setConfBudget(int64_t x) +{ + conflict_budget = conflicts + x; +} +inline void Solver::setPropBudget(int64_t x) +{ + propagation_budget = propagations + x; +} +inline void Solver::interrupt() +{ + asynch_interrupt = true; +} +inline void Solver::clearInterrupt() +{ + asynch_interrupt = false; +} +inline void Solver::budgetOff() +{ + conflict_budget = propagation_budget = -1; +} +inline bool Solver::withinBudget() const +{ + return !asynch_interrupt && (conflict_budget < 0 || conflicts < (uint64_t)conflict_budget) && + (propagation_budget < 0 || propagations < (uint64_t)propagation_budget); +} + +// FIXME: after the introduction of asynchronous interrruptions the solve-versions that return a +// pure bool do not give a safe interface. Either interrupts must be possible to turn off here, or +// all calls to solve must return an 'lbool'. I'm not yet sure which I prefer. +inline bool Solver::solve() +{ + budgetOff(); + assumptions.clear(); + return solve_() == l_True; +} +inline bool Solver::solve(Lit p) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + return solve_() == l_True; +} +inline bool Solver::solve(Lit p, Lit q) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + assumptions.push(q); + return solve_() == l_True; +} +inline bool Solver::solve(Lit p, Lit q, Lit r) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + assumptions.push(q); + assumptions.push(r); + return solve_() == l_True; +} +inline bool Solver::solve(const vec& assumps) +{ + budgetOff(); + assumps.copyTo(assumptions); + return solve_() == l_True; +} +inline lbool Solver::solveLimited(const vec& assumps) +{ + assumps.copyTo(assumptions); + return solve_(); +} +inline bool Solver::okay() const +{ + return ok; +} + +inline void Solver::toDimacs(const char* file) +{ + vec as; + toDimacs(file, as); +} +inline void Solver::toDimacs(const char* file, Lit p) +{ + vec as; + as.push(p); + toDimacs(file, as); +} +inline void Solver::toDimacs(const char* file, Lit p, Lit q) +{ + vec as; + as.push(p); + as.push(q); + toDimacs(file, as); +} +inline void Solver::toDimacs(const char* file, Lit p, Lit q, Lit r) +{ + vec as; + as.push(p); + as.push(q); + as.push(r); + toDimacs(file, as); +} + +//================================================================================================= +// Debug etc: + +inline void Solver::printLit(Lit l) +{ + printf("%s%d:%c", sign(l) ? "-" : "", var(l) + 1, value(l) == l_True ? '1' : (value(l) == l_False ? '0' : 'X')); +} + +inline void Solver::printClause(CRef cr) +{ + Clause& c = ca[cr]; + for (int i = 0; i < c.size(); i++) + { + printLit(c[i]); + printf(" "); + } +} + +inline void Solver::printInitialClause(CRef cr) +{ + Clause& c = ca[cr]; + for (int i = 0; i < c.size(); i++) + { + if (!isSelector(var(c[i]))) + { + printLit(c[i]); + printf(" "); + } + } +} + +//================================================================================================= +struct reduceDBAct_lt +{ + ClauseAllocator& ca; + + reduceDBAct_lt(ClauseAllocator& ca_) : ca(ca_) {} + + bool operator()(CRef x, CRef y) + { + + // Main criteria... Like in MiniSat we keep all binary clauses + if (ca[x].size() > 2 && ca[y].size() == 2) + return 1; + + if (ca[y].size() > 2 && ca[x].size() == 2) + return 0; + if (ca[x].size() == 2 && ca[y].size() == 2) + return 0; + + return ca[x].activity() < ca[y].activity(); + } +}; + +struct reduceDB_lt +{ + ClauseAllocator& ca; + + reduceDB_lt(ClauseAllocator& ca_) : ca(ca_) {} + + bool operator()(CRef x, CRef y) + { + + // Main criteria... Like in MiniSat we keep all binary clauses + if (ca[x].size() > 2 && ca[y].size() == 2) + return 1; + + if (ca[y].size() > 2 && ca[x].size() == 2) + return 0; + if (ca[x].size() == 2 && ca[y].size() == 2) + return 0; + + // Second one based on literal block distance + if (ca[x].lbd() > ca[y].lbd()) + return 1; + if (ca[x].lbd() < ca[y].lbd()) + return 0; + + // Finally we can use old activity or size, we choose the last one + return ca[x].activity() < ca[y].activity(); + // return x->size() < y->size(); + + // return ca[x].size() > 2 && (ca[y].size() == 2 || ca[x].activity() < ca[y].activity()); } + } +}; + +} // namespace Glucose + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/core/SolverStats.h b/vendors/mugen/glucose-syrup-4.1/core/SolverStats.h similarity index 73% rename from libs/mugen/glucose-syrup-4.1/core/SolverStats.h rename to vendors/mugen/glucose-syrup-4.1/core/SolverStats.h index 90ad632ac9..d6ecc8201c 100644 --- a/libs/mugen/glucose-syrup-4.1/core/SolverStats.h +++ b/vendors/mugen/glucose-syrup-4.1/core/SolverStats.h @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -48,52 +48,58 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA **************************************************************************************************/ #ifndef SOLVERSTATS_H -#define SOLVERSTATS_H +#define SOLVERSTATS_H #include "mtl/Map.h" -#include -namespace Glucose { - - class SolverStats { - protected: - Map map; - - public: - - SolverStats(std::string all[],int sz) : map() { - addStats(all,sz); - } - - void addStats(std::string names[],int sz) { - for(int i = 0;i map[name]) - map[name] = val; - } - - void minimize(const std::string name,uint64_t val) { - if(val < map[name]) - map[name] = val; - } +#include +namespace Glucose +{ + +class SolverStats +{ + protected: + Map map; + + public: + SolverStats(std::string all[], int sz) : map() + { + addStats(all, sz); + } + + void addStats(std::string names[], int sz) + { + for (int i = 0; i < sz; i++) addStat(names[i]); + } + + void addStat(std::string name) + { + map.insert(name, 0); + } + + const uint64_t& operator[](const std::string name) const + { + return map[name]; + } + + uint64_t& operator[](const std::string name) + { + return map[name]; + } + + void maximize(const std::string name, uint64_t val) + { + if (val > map[name]) + map[name] = val; + } + + void minimize(const std::string name, uint64_t val) + { + if (val < map[name]) + map[name] = val; + } }; -} - -#endif /* SOLVERSTATS_H */ +} // namespace Glucose +#endif /* SOLVERSTATS_H */ diff --git a/vendors/mugen/glucose-syrup-4.1/core/SolverTypes.h b/vendors/mugen/glucose-syrup-4.1/core/SolverTypes.h new file mode 100644 index 0000000000..eb624b5985 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/core/SolverTypes.h @@ -0,0 +1,766 @@ +/***************************************************************************************[SolverTypes.h] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#ifndef Glucose_SolverTypes_h +#define Glucose_SolverTypes_h + +#include "mtl/Alg.h" +#include "mtl/Alloc.h" +#include "mtl/IntTypes.h" +#include "mtl/Map.h" +#include "mtl/Vec.h" + +#include +#include +#include + +namespace Glucose +{ + +//================================================================================================= +// Variables, literals, lifted booleans, clauses: + +// NOTE! Variables are just integers. No abstraction here. They should be chosen from 0..N, +// so that they can be used as array indices. + +typedef int Var; +#define var_Undef (-1) + +struct Lit +{ + int x; + + // Use this as a constructor: + friend Lit mkLit(Var var, bool sign); + + bool operator==(Lit p) const + { + return x == p.x; + } + bool operator!=(Lit p) const + { + return x != p.x; + } + bool operator<(Lit p) const + { + return x < p.x; + } // '<' makes p, ~p adjacent in the ordering. +}; + +inline Lit mkLit(Var var, bool sign = false) +{ + Lit p; + p.x = var + var + (int)sign; + return p; +} +inline Lit operator~(Lit p) +{ + Lit q; + q.x = p.x ^ 1; + return q; +} +inline Lit operator^(Lit p, bool b) +{ + Lit q; + q.x = p.x ^ (unsigned int)b; + return q; +} +inline bool sign(Lit p) +{ + return p.x & 1; +} +inline int var(Lit p) +{ + return p.x >> 1; +} + +// Mapping Literals to and from compact integers suitable for array indexing: +inline int toInt(Var v) +{ + return v; +} +inline int toInt(Lit p) +{ + return p.x; +} +inline Lit toLit(int i) +{ + Lit p; + p.x = i; + return p; +} + +// const Lit lit_Undef = mkLit(var_Undef, false); // }- Useful special constants. +// const Lit lit_Error = mkLit(var_Undef, true ); // } + +const Lit lit_Undef = {-2}; // }- Useful special constants. +const Lit lit_Error = {-1}; // } + +//================================================================================================= +// Lifted booleans: +// +// NOTE: this implementation is optimized for the case when comparisons between values are mostly +// between one variable and one constant. Some care had to be taken to make sure that gcc +// does enough constant propagation to produce sensible code, and this appears to be somewhat +// fragile unfortunately. + +#define l_True (Glucose::lbool((uint8_t)0)) // gcc does not do constant propagation if these are real constants. +#define l_False (Glucose::lbool((uint8_t)1)) +#define l_Undef (Glucose::lbool((uint8_t)2)) + +class lbool +{ + uint8_t value; + + public: + explicit lbool(uint8_t v) : value(v) {} + + lbool() : value(0) {} + explicit lbool(bool x) : value(!x) {} + + bool operator==(lbool b) const + { + return ((b.value & 2) & (value & 2)) | (!(b.value & 2) & (value == b.value)); + } + bool operator!=(lbool b) const + { + return !(*this == b); + } + lbool operator^(bool b) const + { + return lbool((uint8_t)(value ^ (uint8_t)b)); + } + + lbool operator&&(lbool b) const + { + uint8_t sel = (this->value << 1) | (b.value << 3); + uint8_t v = (0xF7F755F4 >> sel) & 3; + return lbool(v); + } + + lbool operator||(lbool b) const + { + uint8_t sel = (this->value << 1) | (b.value << 3); + uint8_t v = (0xFCFCF400 >> sel) & 3; + return lbool(v); + } + + friend int toInt(lbool l); + friend lbool toLbool(int v); +}; +inline int toInt(lbool l) +{ + return l.value; +} +inline lbool toLbool(int v) +{ + return lbool((uint8_t)v); +} + +//================================================================================================= +// Clause -- a simple class for representing a clause: + +class Clause; +typedef RegionAllocator::Ref CRef; + +#define BITS_LBD 20 +#ifdef INCREMENTAL +#define BITS_SIZEWITHOUTSEL 19 +#endif +#define BITS_REALSIZE 32 +class Clause +{ + struct + { + unsigned mark : 2; + unsigned learnt : 1; + unsigned canbedel : 1; + unsigned extra_size : 2; // extra size (end of 32bits) 0..3 + unsigned seen : 1; + unsigned reloced : 1; + unsigned exported : 2; // Values to keep track of the clause status for exportations + unsigned oneWatched : 1; + unsigned lbd : BITS_LBD; + + unsigned size : BITS_REALSIZE; + +#ifdef INCREMENTAL + unsigned szWithoutSelectors : BITS_SIZEWITHOUTSEL; +#endif + } header; + + union + { + Lit lit; + float act; + uint32_t abs; + CRef rel; + } data[0]; + + friend class ClauseAllocator; + + // NOTE: This constructor cannot be used directly (doesn't allocate enough memory). + template + Clause(const V& ps, int _extra_size, bool learnt) + { + assert(_extra_size < (1 << 2)); + header.mark = 0; + header.learnt = learnt; + header.extra_size = _extra_size; + header.reloced = 0; + header.size = ps.size(); + header.lbd = 0; + header.canbedel = 1; + header.exported = 0; + header.oneWatched = 0; + header.seen = 0; + for (int i = 0; i < ps.size(); i++) data[i].lit = ps[i]; + + if (header.extra_size > 0) + { + if (header.learnt) + data[header.size].act = 0; + else + calcAbstraction(); + if (header.extra_size > 1) + { + data[header.size + 1].abs = 0; // learntFrom + } + } + } + + public: + void calcAbstraction() + { + assert(header.extra_size > 0); + uint32_t abstraction = 0; + for (int i = 0; i < size(); i++) abstraction |= 1 << (var(data[i].lit) & 31); + data[header.size].abs = abstraction; + } + + int size() const + { + return header.size; + } + void shrink(int i) + { + assert(i <= size()); + if (header.extra_size > 0) + { + data[header.size - i] = data[header.size]; + if (header.extra_size > 1) + { // Special case for imported clauses + data[header.size - i - 1] = data[header.size - 1]; + } + } + header.size -= i; + } + void pop() + { + shrink(1); + } + bool learnt() const + { + return header.learnt; + } + void nolearnt() + { + header.learnt = false; + } + bool has_extra() const + { + return header.extra_size > 0; + } + uint32_t mark() const + { + return header.mark; + } + void mark(uint32_t m) + { + header.mark = m; + } + const Lit& last() const + { + return data[header.size - 1].lit; + } + + bool reloced() const + { + return header.reloced; + } + CRef relocation() const + { + return data[0].rel; + } + void relocate(CRef c) + { + header.reloced = 1; + data[0].rel = c; + } + + // NOTE: somewhat unsafe to change the clause in-place! Must manually call 'calcAbstraction' afterwards for + // subsumption operations to behave correctly. + Lit& operator[](int i) + { + return data[i].lit; + } + Lit operator[](int i) const + { + return data[i].lit; + } + operator const Lit*(void) const + { + return (Lit*)data; + } + + float& activity() + { + assert(header.extra_size > 0); + return data[header.size].act; + } + uint32_t abstraction() const + { + assert(header.extra_size > 0); + return data[header.size].abs; + } + + // Handle imported clauses lazy sharing + bool wasImported() const + { + return header.extra_size > 1; + } + uint32_t importedFrom() const + { + assert(header.extra_size > 1); + return data[header.size + 1].abs; + } + void setImportedFrom(uint32_t ifrom) + { + assert(header.extra_size > 1); + data[header.size + 1].abs = ifrom; + } + + Lit subsumes(const Clause& other) const; + void strengthen(Lit p); + void setLBD(int i) + { + header.lbd = i; /*if (i < (1<<(BITS_LBD-1))) header.lbd = i; else header.lbd = (1<<(BITS_LBD-1));*/ + } + // unsigned int& lbd () { return header.lbd; } + unsigned int lbd() const + { + return header.lbd; + } + void setCanBeDel(bool b) + { + header.canbedel = b; + } + bool canBeDel() + { + return header.canbedel; + } + void setSeen(bool b) + { + header.seen = b; + } + bool getSeen() + { + return header.seen; + } + void setExported(unsigned int b) + { + header.exported = b; + } + unsigned int getExported() + { + return header.exported; + } + void setOneWatched(bool b) + { + header.oneWatched = b; + } + bool getOneWatched() + { + return header.oneWatched; + } +#ifdef INCREMNENTAL + void setSizeWithoutSelectors(unsigned int n) + { + header.szWithoutSelectors = n; + } + unsigned int sizeWithoutSelectors() const + { + return header.szWithoutSelectors; + } +#endif +}; + +//================================================================================================= +// ClauseAllocator -- a simple class for allocating memory for clauses: + +const CRef CRef_Undef = RegionAllocator::Ref_Undef; +class ClauseAllocator : public RegionAllocator +{ + static int clauseWord32Size(int size, int extra_size) + { + return (sizeof(Clause) + (sizeof(Lit) * (size + extra_size))) / sizeof(uint32_t); + } + + public: + bool extra_clause_field; + + ClauseAllocator(uint32_t start_cap) : RegionAllocator(start_cap), extra_clause_field(false) {} + ClauseAllocator() : extra_clause_field(false) {} + + void moveTo(ClauseAllocator& to) + { + to.extra_clause_field = extra_clause_field; + RegionAllocator::moveTo(to); + } + + template + CRef alloc(const Lits& ps, bool learnt = false, bool imported = false) + { + assert(sizeof(Lit) == sizeof(uint32_t)); + assert(sizeof(float) == sizeof(uint32_t)); + + bool use_extra = learnt | extra_clause_field; + int extra_size = imported ? 3 : (use_extra ? 1 : 0); + CRef cid = RegionAllocator::alloc(clauseWord32Size(ps.size(), extra_size)); + new (lea(cid)) Clause(ps, extra_size, learnt); + + return cid; + } + + // Deref, Load Effective Address (LEA), Inverse of LEA (AEL): + Clause& operator[](Ref r) + { + return (Clause&)RegionAllocator::operator[](r); + } + const Clause& operator[](Ref r) const + { + return (Clause&)RegionAllocator::operator[](r); + } + Clause* lea(Ref r) + { + return (Clause*)RegionAllocator::lea(r); + } + const Clause* lea(Ref r) const + { + return (Clause*)RegionAllocator::lea(r); + } + Ref ael(const Clause* t) + { + return RegionAllocator::ael((uint32_t*)t); + } + + void free(CRef cid) + { + Clause& c = operator[](cid); + RegionAllocator::free(clauseWord32Size(c.size(), c.has_extra())); + } + + void reloc(CRef& cr, ClauseAllocator& to) + { + Clause& c = operator[](cr); + + if (c.reloced()) + { + cr = c.relocation(); + return; + } + + cr = to.alloc(c, c.learnt(), c.wasImported()); + c.relocate(cr); + + // Copy extra data-fields: + // (This could be cleaned-up. Generalize Clause-constructor to be applicable here instead?) + to[cr].mark(c.mark()); + if (to[cr].learnt()) + { + to[cr].activity() = c.activity(); + to[cr].setLBD(c.lbd()); + to[cr].setExported(c.getExported()); + to[cr].setOneWatched(c.getOneWatched()); +#ifdef INCREMENTAL + to[cr].setSizeWithoutSelectors(c.sizeWithoutSelectors()); +#endif + to[cr].setCanBeDel(c.canBeDel()); + if (c.wasImported()) + { + to[cr].setImportedFrom(c.importedFrom()); + } + } + else + { + to[cr].setSeen(c.getSeen()); + if (to[cr].has_extra()) + to[cr].calcAbstraction(); + } + } +}; + +//================================================================================================= +// OccLists -- a class for maintaining occurence lists with lazy deletion: + +template +class OccLists +{ + vec occs; + vec dirty; + vec dirties; + Deleted deleted; + + public: + OccLists(const Deleted& d) : deleted(d) {} + + void init(const Idx& idx) + { + occs.growTo(toInt(idx) + 1); + dirty.growTo(toInt(idx) + 1, 0); + } + // Vec& operator[](const Idx& idx){ return occs[toInt(idx)]; } + Vec& operator[](const Idx& idx) + { + return occs[toInt(idx)]; + } + Vec& lookup(const Idx& idx) + { + if (dirty[toInt(idx)]) + clean(idx); + return occs[toInt(idx)]; + } + + void cleanAll(); + void copyTo(OccLists& copy) const + { + + copy.occs.growTo(occs.size()); + for (int i = 0; i < occs.size(); i++) occs[i].memCopyTo(copy.occs[i]); + dirty.memCopyTo(copy.dirty); + dirties.memCopyTo(copy.dirties); + } + + void clean(const Idx& idx); + void smudge(const Idx& idx) + { + if (dirty[toInt(idx)] == 0) + { + dirty[toInt(idx)] = 1; + dirties.push(idx); + } + } + + void clear(bool free = true) + { + occs.clear(free); + dirty.clear(free); + dirties.clear(free); + } +}; + +template +void OccLists::cleanAll() +{ + for (int i = 0; i < dirties.size(); i++) + // Dirties may contain duplicates so check here if a variable is already cleaned: + if (dirty[toInt(dirties[i])]) + clean(dirties[i]); + dirties.clear(); +} + +template +void OccLists::clean(const Idx& idx) +{ + Vec& vec = occs[toInt(idx)]; + int i, j; + for (i = j = 0; i < vec.size(); i++) + if (!deleted(vec[i])) + vec[j++] = vec[i]; + vec.shrink(i - j); + dirty[toInt(idx)] = 0; +} + +//================================================================================================= +// CMap -- a class for mapping clauses to values: + +template +class CMap +{ + struct CRefHash + { + uint32_t operator()(CRef cr) const + { + return (uint32_t)cr; + } + }; + + typedef Map HashTable; + HashTable map; + + public: + // Size-operations: + void clear() + { + map.clear(); + } + int size() const + { + return map.elems(); + } + + // Insert/Remove/Test mapping: + void insert(CRef cr, const T& t) + { + map.insert(cr, t); + } + void growTo(CRef cr, const T& t) + { + map.insert(cr, t); + } // NOTE: for compatibility + void remove(CRef cr) + { + map.remove(cr); + } + bool has(CRef cr, T& t) + { + return map.peek(cr, t); + } + + // Vector interface (the clause 'c' must already exist): + const T& operator[](CRef cr) const + { + return map[cr]; + } + T& operator[](CRef cr) + { + return map[cr]; + } + + // Iteration (not transparent at all at the moment): + int bucket_count() const + { + return map.bucket_count(); + } + const vec& bucket(int i) const + { + return map.bucket(i); + } + + // Move contents to other map: + void moveTo(CMap& other) + { + map.moveTo(other.map); + } + + // TMP debug: + void debug() + { + printf(" --- size = %d, bucket_count = %d\n", size(), map.bucket_count()); + } +}; + +/*_________________________________________________________________________________________________ +| +| subsumes : (other : const Clause&) -> Lit +| +| Description: +| Checks if clause subsumes 'other', and at the same time, if it can be used to simplify 'other' +| by subsumption resolution. +| +| Result: +| lit_Error - No subsumption or simplification +| lit_Undef - Clause subsumes 'other' +| p - The literal p can be deleted from 'other' +|________________________________________________________________________________________________@*/ +inline Lit Clause::subsumes(const Clause& other) const +{ + // if (other.size() < size() || (extra.abst & ~other.extra.abst) != 0) + // if (other.size() < size() || (!learnt() && !other.learnt() && (extra.abst & ~other.extra.abst) != 0)) + assert(!header.learnt); + assert(!other.header.learnt); + assert(header.extra_size > 0); + assert(other.header.extra_size > 0); + if (other.header.size < header.size || (data[header.size].abs & ~other.data[other.header.size].abs) != 0) + return lit_Error; + + Lit ret = lit_Undef; + const Lit* c = (const Lit*)(*this); + const Lit* d = (const Lit*)other; + + for (unsigned i = 0; i < header.size; i++) + { + // search for c[i] or ~c[i] + for (unsigned j = 0; j < other.header.size; j++) + if (c[i] == d[j]) + goto ok; + else if (ret == lit_Undef && c[i] == ~d[j]) + { + ret = c[i]; + goto ok; + } + + // did not find it + return lit_Error; + ok:; + } + + return ret; +} + +inline void Clause::strengthen(Lit p) +{ + remove(*this, p); + calcAbstraction(); +} + +//================================================================================================= +} // namespace Glucose + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Alg.h b/vendors/mugen/glucose-syrup-4.1/mtl/Alg.h similarity index 87% rename from libs/mugen/glucose-syrup-4.1/mtl/Alg.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Alg.h index 9afb4552b8..203a51fc36 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Alg.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Alg.h @@ -23,7 +23,8 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #include "mtl/Vec.h" -namespace Glucose { +namespace Glucose +{ //================================================================================================= // Useful functions on vector-like types: @@ -32,18 +33,17 @@ namespace Glucose { // Removing and searching for elements: // -template +template static inline void remove(V& ts, const T& t) { int j = 0; for (; j < ts.size() && ts[j] != t; j++); assert(j < ts.size()); - for (; j < ts.size()-1; j++) ts[j] = ts[j+1]; + for (; j < ts.size() - 1; j++) ts[j] = ts[j + 1]; ts.pop(); } - -template +template static inline bool find(V& ts, const T& t) { int j = 0; @@ -51,34 +51,37 @@ static inline bool find(V& ts, const T& t) return j < ts.size(); } - //================================================================================================= // Copying vectors with support for nested vector types: // // Base case: -template +template static inline void copy(const T& from, T& to) { to = from; } // Recursive case: -template +template static inline void copy(const vec& from, vec& to, bool append = false) { if (!append) to.clear(); - for (int i = 0; i < from.size(); i++){ + for (int i = 0; i < from.size(); i++) + { to.push(); copy(from[i], to.last()); } } -template -static inline void append(const vec& from, vec& to){ copy(from, to, true); } +template +static inline void append(const vec& from, vec& to) +{ + copy(from, to, true); +} //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Alloc.h b/vendors/mugen/glucose-syrup-4.1/mtl/Alloc.h similarity index 58% rename from libs/mugen/glucose-syrup-4.1/mtl/Alloc.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Alloc.h index e027666f9e..3441400b7a 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Alloc.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Alloc.h @@ -17,88 +17,129 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ - #ifndef Glucose_Alloc_h #define Glucose_Alloc_h -#include "mtl/XAlloc.h" #include "mtl/Vec.h" +#include "mtl/XAlloc.h" -namespace Glucose { +namespace Glucose +{ //================================================================================================= // Simple Region-based memory allocator: -template +template class RegionAllocator { - T* memory; - uint32_t sz; - uint32_t cap; - uint32_t wasted_; + T* memory; + uint32_t sz; + uint32_t cap; + uint32_t wasted_; void capacity(uint32_t min_cap); - public: + public: // TODO: make this a class for better type-checking? typedef uint32_t Ref; - enum { Ref_Undef = UINT32_MAX }; - enum { Unit_Size = sizeof(uint32_t) }; + enum + { + Ref_Undef = UINT32_MAX + }; + enum + { + Unit_Size = sizeof(uint32_t) + }; - explicit RegionAllocator(uint32_t start_cap = 1024*1024) : memory(NULL), sz(0), cap(0), wasted_(0){ capacity(start_cap); } + explicit RegionAllocator(uint32_t start_cap = 1024 * 1024) : memory(NULL), sz(0), cap(0), wasted_(0) + { + capacity(start_cap); + } ~RegionAllocator() { if (memory != NULL) ::free(memory); } + uint32_t size() const + { + return sz; + } + uint32_t getCap() const + { + return cap; + } + uint32_t wasted() const + { + return wasted_; + } - uint32_t size () const { return sz; } - uint32_t getCap () const { return cap;} - uint32_t wasted () const { return wasted_; } - - Ref alloc (int size); - void free (int size) { wasted_ += size; } + Ref alloc(int size); + void free(int size) + { + wasted_ += size; + } // Deref, Load Effective Address (LEA), Inverse of LEA (AEL): - T& operator[](Ref r) { assert(r >= 0 && r < sz); return memory[r]; } - const T& operator[](Ref r) const { assert(r >= 0 && r < sz); return memory[r]; } - - T* lea (Ref r) { assert(r >= 0 && r < sz); return &memory[r]; } - const T* lea (Ref r) const { assert(r >= 0 && r < sz); return &memory[r]; } - Ref ael (const T* t) { assert((void*)t >= (void*)&memory[0] && (void*)t < (void*)&memory[sz-1]); - return (Ref)(t - &memory[0]); } - - void moveTo(RegionAllocator& to) { - if (to.memory != NULL) ::free(to.memory); - to.memory = memory; - to.sz = sz; - to.cap = cap; + T& operator[](Ref r) + { + assert(r >= 0 && r < sz); + return memory[r]; + } + const T& operator[](Ref r) const + { + assert(r >= 0 && r < sz); + return memory[r]; + } + + T* lea(Ref r) + { + assert(r >= 0 && r < sz); + return &memory[r]; + } + const T* lea(Ref r) const + { + assert(r >= 0 && r < sz); + return &memory[r]; + } + Ref ael(const T* t) + { + assert((void*)t >= (void*)&memory[0] && (void*)t < (void*)&memory[sz - 1]); + return (Ref)(t - &memory[0]); + } + + void moveTo(RegionAllocator& to) + { + if (to.memory != NULL) + ::free(to.memory); + to.memory = memory; + to.sz = sz; + to.cap = cap; to.wasted_ = wasted_; memory = NULL; sz = cap = wasted_ = 0; } - void copyTo(RegionAllocator& to) const { - // if (to.memory != NULL) ::free(to.memory); - to.memory = (T*)xrealloc(to.memory, sizeof(T)*cap); - memcpy(to.memory,memory,sizeof(T)*cap); - to.sz = sz; - to.cap = cap; + void copyTo(RegionAllocator& to) const + { + // if (to.memory != NULL) ::free(to.memory); + to.memory = (T*)xrealloc(to.memory, sizeof(T) * cap); + memcpy(to.memory, memory, sizeof(T) * cap); + to.sz = sz; + to.cap = cap; to.wasted_ = wasted_; } - - - }; -template +template void RegionAllocator::capacity(uint32_t min_cap) { - if (cap >= min_cap) return; + if (cap >= min_cap) + return; uint32_t prev_cap = cap; - while (cap < min_cap){ + while (cap < min_cap) + { // NOTE: Multiply by a factor (13/8) without causing overflow, then add 2 and make the // result even by clearing the least significant bit. The resulting sequence of capacities // is carefully chosen to hit a maximum capacity that is close to the '2^32-1' limit when @@ -109,24 +150,22 @@ void RegionAllocator::capacity(uint32_t min_cap) if (cap <= prev_cap) throw OutOfMemoryException(); } - //printf(" .. (%p) cap = %u\n", this, cap); + // printf(" .. (%p) cap = %u\n", this, cap); assert(cap > 0); - memory = (T*)xrealloc(memory, sizeof(T)*cap); + memory = (T*)xrealloc(memory, sizeof(T) * cap); } - -template -typename RegionAllocator::Ref -RegionAllocator::alloc(int size) -{ - //printf("ALLOC called (this = %p, size = %d)\n", this, size); fflush(stdout); +template +typename RegionAllocator::Ref RegionAllocator::alloc(int size) +{ + // printf("ALLOC called (this = %p, size = %d)\n", this, size); fflush(stdout); assert(size > 0); capacity(sz + size); uint32_t prev_sz = sz; sz += size; - + // Handle overflow: if (sz < prev_sz) throw OutOfMemoryException(); @@ -134,8 +173,7 @@ RegionAllocator::alloc(int size) return prev_sz; } - //================================================================================================= -} +} // namespace Glucose #endif diff --git a/vendors/mugen/glucose-syrup-4.1/mtl/Clone.h b/vendors/mugen/glucose-syrup-4.1/mtl/Clone.h new file mode 100644 index 0000000000..7c1d611384 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Clone.h @@ -0,0 +1,14 @@ +#ifndef Glucose_Clone_h +#define Glucose_Clone_h + +namespace Glucose +{ + +class Clone +{ + public: + virtual Clone* clone() const = 0; +}; +}; // namespace Glucose + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Heap.h b/vendors/mugen/glucose-syrup-4.1/mtl/Heap.h similarity index 61% rename from libs/mugen/glucose-syrup-4.1/mtl/Heap.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Heap.h index 0c40c4ff3f..20d11f31be 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Heap.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Heap.h @@ -23,128 +23,160 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #include "mtl/Vec.h" -namespace Glucose { +namespace Glucose +{ //================================================================================================= // A heap implementation with support for decrease/increase key. - -template -class Heap { +template +class Heap +{ Comp lt; // The heap is a minimum-heap with respect to this comparator vec heap; // Heap of integers vec indices; // Each integers position (index) in the Heap // Index "traversal" functions - static inline int left (int i) { return i*2+1; } - static inline int right (int i) { return (i+1)*2; } - static inline int parent(int i) { return (i-1) >> 1; } - - + static inline int left(int i) + { + return i * 2 + 1; + } + static inline int right(int i) + { + return (i + 1) * 2; + } + static inline int parent(int i) + { + return (i - 1) >> 1; + } void percolateUp(int i) { - int x = heap[i]; - int p = parent(i); - - while (i != 0 && lt(x, heap[p])){ + int x = heap[i]; + int p = parent(i); + + while (i != 0 && lt(x, heap[p])) + { heap[i] = heap[p]; indices[heap[p]] = i; i = p; p = parent(p); } - heap [i] = x; + heap[i] = x; indices[x] = i; } - void percolateDown(int i) { int x = heap[i]; - while (left(i) < heap.size()){ + while (left(i) < heap.size()) + { int child = right(i) < heap.size() && lt(heap[right(i)], heap[left(i)]) ? right(i) : left(i); - if (!lt(heap[child], x)) break; + if (!lt(heap[child], x)) + break; heap[i] = heap[child]; indices[heap[i]] = i; i = child; } - heap [i] = x; + heap[i] = x; indices[x] = i; } - public: - Heap(const Comp& c) : lt(c) { } - - int size () const { return heap.size(); } - bool empty () const { return heap.size() == 0; } - bool inHeap (int n) const { return n < indices.size() && indices[n] >= 0; } - int operator[](int index) const { assert(index < heap.size()); return heap[index]; } + Heap(const Comp& c) : lt(c) {} + int size() const + { + return heap.size(); + } + bool empty() const + { + return heap.size() == 0; + } + bool inHeap(int n) const + { + return n < indices.size() && indices[n] >= 0; + } + int operator[](int index) const + { + assert(index < heap.size()); + return heap[index]; + } - void decrease (int n) { assert(inHeap(n)); percolateUp (indices[n]); } - void increase (int n) { assert(inHeap(n)); percolateDown(indices[n]); } + void decrease(int n) + { + assert(inHeap(n)); + percolateUp(indices[n]); + } + void increase(int n) + { + assert(inHeap(n)); + percolateDown(indices[n]); + } - void copyTo(Heap& copy) const {heap.copyTo(copy.heap);indices.copyTo(copy.indices);} + void copyTo(Heap& copy) const + { + heap.copyTo(copy.heap); + indices.copyTo(copy.indices); + } // Safe variant of insert/decrease/increase: void update(int n) { if (!inHeap(n)) insert(n); - else { + else + { percolateUp(indices[n]); - percolateDown(indices[n]); } + percolateDown(indices[n]); + } } - void insert(int n) { - indices.growTo(n+1, -1); + indices.growTo(n + 1, -1); assert(!inHeap(n)); indices[n] = heap.size(); heap.push(n); - percolateUp(indices[n]); + percolateUp(indices[n]); } - - int removeMin() + int removeMin() { int x = heap[0]; heap[0] = heap.last(); indices[heap[0]] = 0; indices[x] = -1; heap.pop(); - if (heap.size() > 1) percolateDown(0); - return x; + if (heap.size() > 1) + percolateDown(0); + return x; } - // Rebuild the heap from scratch, using the elements in 'ns': - void build(vec& ns) { - for (int i = 0; i < heap.size(); i++) - indices[heap[i]] = -1; + void build(vec& ns) + { + for (int i = 0; i < heap.size(); i++) indices[heap[i]] = -1; heap.clear(); - for (int i = 0; i < ns.size(); i++){ + for (int i = 0; i < ns.size(); i++) + { indices[ns[i]] = i; - heap.push(ns[i]); } + heap.push(ns[i]); + } - for (int i = heap.size() / 2 - 1; i >= 0; i--) - percolateDown(i); + for (int i = heap.size() / 2 - 1; i >= 0; i--) percolateDown(i); } - void clear(bool dealloc = false) - { - for (int i = 0; i < heap.size(); i++) - indices[heap[i]] = -1; - heap.clear(dealloc); + void clear(bool dealloc = false) + { + for (int i = 0; i < heap.size(); i++) indices[heap[i]] = -1; + heap.clear(dealloc); } }; - //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/IntTypes.h b/vendors/mugen/glucose-syrup-4.1/mtl/IntTypes.h similarity index 84% rename from libs/mugen/glucose-syrup-4.1/mtl/IntTypes.h rename to vendors/mugen/glucose-syrup-4.1/mtl/IntTypes.h index 2d8d4e872f..ff9a9594e3 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/IntTypes.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/IntTypes.h @@ -21,17 +21,17 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #define Glucose_IntTypes_h #ifdef __sun - // Not sure if there are newer versions that support C99 headers. The - // needed features are implemented in the headers below though: +// Not sure if there are newer versions that support C99 headers. The +// needed features are implemented in the headers below though: -# include -# include -# include +#include +#include +#include #else -# include -# include +#include +#include #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Map.h b/vendors/mugen/glucose-syrup-4.1/mtl/Map.h similarity index 54% rename from libs/mugen/glucose-syrup-4.1/mtl/Map.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Map.h index 34063cbac6..8697059bf2 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Map.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Map.h @@ -22,93 +22,167 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #include "mtl/IntTypes.h" #include "mtl/Vec.h" + #include #include -namespace Glucose { +namespace Glucose +{ //================================================================================================= // Default hash/equals functions // -static inline uint32_t hash(std::string x) {std::hash hasher;return hasher(x); } - -template struct Hash { uint32_t operator()(const K& k) const { return hash(k); } }; -template struct Equal { bool operator()(const K& k1, const K& k2) const { return k1 == k2; } }; +static inline uint32_t hash(std::string x) +{ + std::hash hasher; + return hasher(x); +} -template struct DeepHash { uint32_t operator()(const K* k) const { return hash(*k); } }; -template struct DeepEqual { bool operator()(const K* k1, const K* k2) const { return *k1 == *k2; } }; +template +struct Hash +{ + uint32_t operator()(const K& k) const + { + return hash(k); + } +}; +template +struct Equal +{ + bool operator()(const K& k1, const K& k2) const + { + return k1 == k2; + } +}; -static inline uint32_t hash(uint32_t x){ return x; } -static inline uint32_t hash(uint64_t x){ return (uint32_t)x; } -static inline uint32_t hash(int32_t x) { return (uint32_t)x; } -static inline uint32_t hash(int64_t x) { return (uint32_t)x; } +template +struct DeepHash +{ + uint32_t operator()(const K* k) const + { + return hash(*k); + } +}; +template +struct DeepEqual +{ + bool operator()(const K* k1, const K* k2) const + { + return *k1 == *k2; + } +}; +static inline uint32_t hash(uint32_t x) +{ + return x; +} +static inline uint32_t hash(uint64_t x) +{ + return (uint32_t)x; +} +static inline uint32_t hash(int32_t x) +{ + return (uint32_t)x; +} +static inline uint32_t hash(int64_t x) +{ + return (uint32_t)x; +} //================================================================================================= // Some primes // -static const int nprimes = 25; -static const int primes [nprimes] = { 31, 73, 151, 313, 643, 1291, 2593, 5233, 10501, 21013, 42073, 84181, 168451, 337219, 674701, 1349473, 2699299, 5398891, 10798093, 21596719, 43193641, 86387383, 172775299, 345550609, 691101253 }; +static const int nprimes = 25; +static const int primes[nprimes] = {31, 73, 151, 313, 643, 1291, 2593, + 5233, 10501, 21013, 42073, 84181, 168451, 337219, + 674701, 1349473, 2699299, 5398891, 10798093, 21596719, 43193641, + 86387383, 172775299, 345550609, 691101253}; //================================================================================================= // Hash table implementation of Maps // -template, class E = Equal > -class Map { - public: - struct Pair { K key; D data; }; +template , class E = Equal> +class Map +{ + public: + struct Pair + { + K key; + D data; + }; - private: - H hash; - E equals; + private: + H hash; + E equals; vec* table; int cap; int size; // Don't allow copying (error prone): - Map& operator = (Map& other) { assert(0); } - Map (Map& other) { assert(0); } + Map& operator=(Map& other) + { + assert(0); + } + Map(Map& other) + { + assert(0); + } - bool checkCap(int new_size) const { return new_size > cap; } + bool checkCap(int new_size) const + { + return new_size > cap; + } - int32_t index (const K& k) const { return hash(k) % cap; } - void _insert (const K& k, const D& d) { + int32_t index(const K& k) const + { + return hash(k) % cap; + } + void _insert(const K& k, const D& d) + { vec& ps = table[index(k)]; - ps.push(); ps.last().key = k; ps.last().data = d; } + ps.push(); + ps.last().key = k; + ps.last().data = d; + } - void rehash () { + void rehash() + { const vec* old = table; int old_cap = cap; int newsize = primes[0]; - for (int i = 1; newsize <= cap && i < nprimes; i++) - newsize = primes[i]; + for (int i = 1; newsize <= cap && i < nprimes; i++) newsize = primes[i]; table = new vec[newsize]; cap = newsize; - for (int i = 0; i < old_cap; i++){ - for (int j = 0; j < old[i].size(); j++){ - _insert(old[i][j].key, old[i][j].data); }} + for (int i = 0; i < old_cap; i++) + { + for (int j = 0; j < old[i].size(); j++) + { + _insert(old[i][j].key, old[i][j].data); + } + } - delete [] old; + delete[] old; // printf(" --- rehashing, old-cap=%d, new-cap=%d\n", cap, newsize); } - - public: - - Map () : table(NULL), cap(0), size(0) {} - Map (const H& h, const E& e) : hash(h), equals(e), table(NULL), cap(0), size(0){} - ~Map () { delete [] table; } + public: + Map() : table(NULL), cap(0), size(0) {} + Map(const H& h, const E& e) : hash(h), equals(e), table(NULL), cap(0), size(0) {} + ~Map() + { + delete[] table; + } // PRECONDITION: the key must already exist in the map. - const D& operator [] (const K& k) const + const D& operator[](const K& k) const { assert(size != 0); const D* res = NULL; @@ -116,13 +190,13 @@ class Map { for (int i = 0; i < ps.size(); i++) if (equals(ps[i].key, k)) res = &ps[i].data; -// if(res==NULL) printf("%s\n",k.c_str()); + // if(res==NULL) printf("%s\n",k.c_str()); assert(res != NULL); return *res; } // PRECONDITION: the key must already exist in the map. - D& operator [] (const K& k) + D& operator[](const K& k) { assert(size != 0); D* res = NULL; @@ -130,26 +204,38 @@ class Map { for (int i = 0; i < ps.size(); i++) if (equals(ps[i].key, k)) res = &ps[i].data; -// if(res==NULL) printf("%s\n",k.c_str()); + // if(res==NULL) printf("%s\n",k.c_str()); assert(res != NULL); return *res; } // PRECONDITION: the key must *NOT* exist in the map. - void insert (const K& k, const D& d) { if (checkCap(size+1)) rehash(); _insert(k, d); size++; } - bool peek (const K& k, D& d) const { - if (size == 0) return false; + void insert(const K& k, const D& d) + { + if (checkCap(size + 1)) + rehash(); + _insert(k, d); + size++; + } + bool peek(const K& k, D& d) const + { + if (size == 0) + return false; const vec& ps = table[index(k)]; for (int i = 0; i < ps.size(); i++) - if (equals(ps[i].key, k)){ + if (equals(ps[i].key, k)) + { d = ps[i].data; - return true; } + return true; + } return false; } - bool has (const K& k) const { - if (size == 0) return false; + bool has(const K& k) const + { + if (size == 0) + return false; const vec& ps = table[index(k)]; for (int i = 0; i < ps.size(); i++) if (equals(ps[i].key, k)) @@ -158,10 +244,11 @@ class Map { } // PRECONDITION: the key must exist in the map. - void remove(const K& k) { + void remove(const K& k) + { assert(table != NULL); vec& ps = table[index(k)]; - int j = 0; + int j = 0; for (; j < ps.size() && !equals(ps[j].key, k); j++); assert(j < ps.size()); ps[j] = ps.last(); @@ -169,18 +256,26 @@ class Map { size--; } - void clear () { + void clear() + { cap = size = 0; - delete [] table; + delete[] table; table = NULL; } - int elems() const { return size; } - int bucket_count() const { return cap; } + int elems() const + { + return size; + } + int bucket_count() const + { + return cap; + } // NOTE: the hash and equality objects are not moved by this method: - void moveTo(Map& other){ - delete [] other.table; + void moveTo(Map& other) + { + delete[] other.table; other.table = table; other.cap = cap; @@ -191,10 +286,13 @@ class Map { } // NOTE: given a bit more time, I could make a more C++-style iterator out of this: - const vec& bucket(int i) const { return table[i]; } + const vec& bucket(int i) const + { + return table[i]; + } }; //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Queue.h b/vendors/mugen/glucose-syrup-4.1/mtl/Queue.h similarity index 61% rename from libs/mugen/glucose-syrup-4.1/mtl/Queue.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Queue.h index c71e45bae9..6c322a89e0 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Queue.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Queue.h @@ -23,49 +23,79 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #include "mtl/Vec.h" -namespace Glucose { +namespace Glucose +{ //================================================================================================= -template -class Queue { - vec buf; - int first; - int end; +template +class Queue +{ + vec buf; + int first; + int end; -public: + public: typedef T Key; Queue() : buf(1), first(0), end(0) {} - void clear (bool dealloc = false) { buf.clear(dealloc); buf.growTo(1); first = end = 0; } - int size () const { return (end >= first) ? end - first : end - first + buf.size(); } + void clear(bool dealloc = false) + { + buf.clear(dealloc); + buf.growTo(1); + first = end = 0; + } + int size() const + { + return (end >= first) ? end - first : end - first + buf.size(); + } + + const T& operator[](int index) const + { + assert(index >= 0); + assert(index < size()); + return buf[(first + index) % buf.size()]; + } + T& operator[](int index) + { + assert(index >= 0); + assert(index < size()); + return buf[(first + index) % buf.size()]; + } - - - const T& operator [] (int index) const { assert(index >= 0); assert(index < size()); return buf[(first + index) % buf.size()]; } - T& operator [] (int index) { assert(index >= 0); assert(index < size()); return buf[(first + index) % buf.size()]; } + T peek() const + { + assert(first != end); + return buf[first]; + } + void pop() + { + assert(first != end); + first++; + if (first == buf.size()) + first = 0; + } - T peek () const { assert(first != end); return buf[first]; } - void pop () { assert(first != end); first++; if (first == buf.size()) first = 0; } - - - void copyTo(Queue& copy) const { + void copyTo(Queue& copy) const + { copy.first = first; - copy.end = end; + copy.end = end; buf.memCopyTo(copy.buf); } - - - void insert(T elem) { // INVARIANT: buf[end] is always unused + + void insert(T elem) + { // INVARIANT: buf[end] is always unused buf[end++] = elem; - if (end == buf.size()) end = 0; - if (first == end){ // Resize: - vec tmp((buf.size()*3 + 1) >> 1); + if (end == buf.size()) + end = 0; + if (first == end) + { // Resize: + vec tmp((buf.size() * 3 + 1) >> 1); //**/printf("queue alloc: %d elems (%.1f MB)\n", tmp.size(), tmp.size() * sizeof(T) / 1000000.0); - int i = 0; + int i = 0; for (int j = first; j < buf.size(); j++) tmp[i++] = buf[j]; - for (int j = 0 ; j < end ; j++) tmp[i++] = buf[j]; + for (int j = 0; j < end; j++) tmp[i++] = buf[j]; first = 0; end = buf.size(); tmp.moveTo(buf); @@ -73,8 +103,7 @@ class Queue { } }; - //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/Sort.h b/vendors/mugen/glucose-syrup-4.1/mtl/Sort.h similarity index 62% rename from libs/mugen/glucose-syrup-4.1/mtl/Sort.h rename to vendors/mugen/glucose-syrup-4.1/mtl/Sort.h index 50cb448449..4bb1f8d882 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/Sort.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Sort.h @@ -26,32 +26,42 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA //================================================================================================= // Some sorting algorithms for vec's +namespace Glucose +{ -namespace Glucose { - -template -struct LessThan_default { - bool operator () (T x, T y) { return x < y; } +template +struct LessThan_default +{ + bool operator()(T x, T y) + { + return x < y; + } }; - template void selectionSort(T* array, int size, LessThan lt) { - int i, j, best_i; - T tmp; + int i, j, best_i; + T tmp; - for (i = 0; i < size-1; i++){ + for (i = 0; i < size - 1; i++) + { best_i = i; - for (j = i+1; j < size; j++){ + for (j = i + 1; j < size; j++) + { if (lt(array[j], array[best_i])) best_i = j; } - tmp = array[i]; array[i] = array[best_i]; array[best_i] = tmp; + tmp = array[i]; + array[i] = array[best_i]; + array[best_i] = tmp; } } -template static inline void selectionSort(T* array, int size) { - selectionSort(array, size, LessThan_default()); } +template +static inline void selectionSort(T* array, int size) +{ + selectionSort(array, size, LessThan_default()); +} template void sort(T* array, int size, LessThan lt) @@ -59,40 +69,53 @@ void sort(T* array, int size, LessThan lt) if (size <= 15) selectionSort(array, size, lt); - else{ - T pivot = array[size / 2]; - T tmp; - int i = -1; - int j = size; - - for(;;){ - do i++; while(lt(array[i], pivot)); - do j--; while(lt(pivot, array[j])); - - if (i >= j) break; - - tmp = array[i]; array[i] = array[j]; array[j] = tmp; + else + { + T pivot = array[size / 2]; + T tmp; + int i = -1; + int j = size; + + for (;;) + { + do i++; + while (lt(array[i], pivot)); + do j--; + while (lt(pivot, array[j])); + + if (i >= j) + break; + + tmp = array[i]; + array[i] = array[j]; + array[j] = tmp; } - sort(array , i , lt); - sort(&array[i], size-i, lt); + sort(array, i, lt); + sort(&array[i], size - i, lt); } } -template static inline void sort(T* array, int size) { - sort(array, size, LessThan_default()); } - +template +static inline void sort(T* array, int size) +{ + sort(array, size, LessThan_default()); +} //================================================================================================= // For 'vec's: - -template void sort(vec& v, LessThan lt) { - sort((T*)v, v.size(), lt); } -template void sort(vec& v) { - sort(v, LessThan_default()); } - +template +void sort(vec& v, LessThan lt) +{ + sort((T*)v, v.size(), lt); +} +template +void sort(vec& v) +{ + sort(v, LessThan_default()); +} //================================================================================================= -} +} // namespace Glucose #endif diff --git a/vendors/mugen/glucose-syrup-4.1/mtl/Vec.h b/vendors/mugen/glucose-syrup-4.1/mtl/Vec.h new file mode 100644 index 0000000000..29e4b5948e --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/mtl/Vec.h @@ -0,0 +1,251 @@ +/*******************************************************************************************[Vec.h] +Copyright (c) 2003-2007, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +**************************************************************************************************/ + +#ifndef Glucose_Vec_h +#define Glucose_Vec_h + +#include "mtl/IntTypes.h" +#include "mtl/XAlloc.h" + +#include + +#include +#include + +namespace Glucose +{ + +//================================================================================================= +// Automatically resizable arrays +// +// NOTE! Don't use this vector on datatypes that cannot be re-located in memory (with realloc) + +template +class vec +{ + T* data; + int sz; + int cap; + + // Don't allow copying (error prone): + vec& operator=(vec& other) + { + assert(0); + return *this; + } + vec(vec& other) + { + assert(0); + } + + // Helpers for calculating next capacity: + static inline int imax(int x, int y) + { + int mask = (y - x) >> (sizeof(int) * 8 - 1); + return (x & mask) + (y & (~mask)); + } + // static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } + static inline void nextCap(int& cap) + { + cap += ((cap >> 1) + 2) & ~1; + } + + public: + // Constructors: + vec() : data(NULL), sz(0), cap(0) {} + explicit vec(int size) : data(NULL), sz(0), cap(0) + { + growTo(size); + } + vec(int size, const T& pad) : data(NULL), sz(0), cap(0) + { + growTo(size, pad); + } + ~vec() + { + clear(true); + } + + // Pointer to first element: + operator T*(void) + { + return data; + } + + // Size operations: + int size(void) const + { + return sz; + } + void shrink(int nelems) + { + assert(nelems <= sz); + for (int i = 0; i < nelems; i++) sz--, data[sz].~T(); + } + void shrink_(int nelems) + { + assert(nelems <= sz); + sz -= nelems; + } + int capacity(void) const + { + return cap; + } + void capacity(int min_cap); + void growTo(int size); + void growTo(int size, const T& pad); + void clear(bool dealloc = false); + + // Stack interface: + void push(void) + { + if (sz == cap) + capacity(sz + 1); + new (&data[sz]) T(); + sz++; + } + void push(const T& elem) + { + if (sz == cap) + capacity(sz + 1); + data[sz++] = elem; + } + void push_(const T& elem) + { + assert(sz < cap); + data[sz++] = elem; + } + void pop(void) + { + assert(sz > 0); + sz--, data[sz].~T(); + } + + void remove(const T& elem) + { + int tmp; + for (tmp = 0; tmp < sz; tmp++) + { + if (data[tmp] == elem) + break; + } + if (tmp < sz) + { + assert(data[tmp] == elem); + data[tmp] = data[sz - 1]; + sz = sz - 1; + } + } + + // NOTE: it seems possible that overflow can happen in the 'sz+1' expression of 'push()', but + // in fact it can not since it requires that 'cap' is equal to INT_MAX. This in turn can not + // happen given the way capacities are calculated (below). Essentially, all capacities are + // even, but INT_MAX is odd. + + const T& last(void) const + { + return data[sz - 1]; + } + T& last(void) + { + return data[sz - 1]; + } + + // Vector interface: + const T& operator[](int index) const + { + return data[index]; + } + T& operator[](int index) + { + return data[index]; + } + + // Duplicatation (preferred instead): + void copyTo(vec& copy) const + { + copy.clear(); + copy.growTo(sz); + for (int i = 0; i < sz; i++) copy[i] = data[i]; + } + void moveTo(vec& dest) + { + dest.clear(true); + dest.data = data; + dest.sz = sz; + dest.cap = cap; + data = NULL; + sz = 0; + cap = 0; + } + void memCopyTo(vec& copy) const + { + copy.capacity(cap); + copy.sz = sz; + memcpy(copy.data, data, sizeof(T) * cap); + } +}; + +template +void vec::capacity(int min_cap) +{ + if (cap >= min_cap) + return; + int add = imax((min_cap - cap + 1) & ~1, ((cap >> 1) + 2) & ~1); // NOTE: grow by approximately 3/2 + if (add > INT_MAX - cap || ((data = (T*)::realloc(data, (cap += add) * sizeof(T))) == NULL) && errno == ENOMEM) + throw OutOfMemoryException(); +} + +template +void vec::growTo(int size, const T& pad) +{ + if (sz >= size) + return; + capacity(size); + for (int i = sz; i < size; i++) data[i] = pad; + sz = size; +} + +template +void vec::growTo(int size) +{ + if (sz >= size) + return; + capacity(size); + for (int i = sz; i < size; i++) new (&data[i]) T(); + sz = size; +} + +template +void vec::clear(bool dealloc) +{ + if (data != NULL) + { + for (int i = 0; i < sz; i++) data[i].~T(); + sz = 0; + if (dealloc) + free(data), data = NULL, cap = 0; + } +} + +//================================================================================================= +} // namespace Glucose + +#endif diff --git a/vendors/mugen/glucose-syrup-4.1/mtl/VecThreads.h b/vendors/mugen/glucose-syrup-4.1/mtl/VecThreads.h new file mode 100644 index 0000000000..d7119bfc75 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/mtl/VecThreads.h @@ -0,0 +1,327 @@ +/*******************************************************************************************[VecThreads.h] + * Threads safe version used in Glucose-Syrup, 2015, Gilles Audemard, Laurent Simon +Copyright (c) 2003-2007, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +**************************************************************************************************/ + +#ifndef Glucose_VecThreads_h +#define Glucose_VecThreads_h + +#include "mtl/IntTypes.h" +#include "mtl/XAlloc.h" + +#include + +#include +#include + +namespace Glucose +{ + +//================================================================================================= +// Automatically resizable arrays +// +// NOTE! Don't use this vector on datatypes that cannot be re-located in memory (with realloc) + +template +class vecThreads +{ + T* data; + int sz; + int cap; + bool lock; + int nbusers; + + // Don't allow copying (error prone): + vecThreads& operator=(vecThreads& other) + { + assert(0); + return *this; + } + vecThreads(vecThreads& other) + { + assert(0); + } + + // Helpers for calculating next capacity: + static inline int imax(int x, int y) + { + int mask = (y - x) >> (sizeof(int) * 8 - 1); + return (x & mask) + (y & (~mask)); + } + // static inline void nextCap(int& cap){ cap += ((cap >> 1) + 2) & ~1; } + static inline void nextCap(int& cap) + { + cap += ((cap >> 1) + 2) & ~1; + } + + public: + // Constructors: + vecThreads() : data(NULL), sz(0), cap(0), lock(false), nbusers(0) {} + explicit vecThreads(int size) : data(NULL), sz(0), cap(0), lock(false), nbusers(0) + { + growTo(size); + } + vecThreads(int size, const T& pad) : data(NULL), sz(0), cap(0), lock(false), nbusers(0) + { + growTo(size, pad); + } + ~vecThreads() + { + clear(true); + } + + // Pointer to first element: + operator T*(void) + { + return data; + } + + // Size operations: + int size(void) const + { + return sz; + } + void shrink(int nelems) + { + assert(nelems <= sz); + for (int i = 0; i < nelems; i++) sz--, data[sz].~T(); + } + void shrink_(int nelems) + { + assert(nelems <= sz); + sz -= nelems; + } + int capacity(void) const + { + return cap; + } + void capacity(int min_cap); + void capacityProtected(int min_cap); + void growTo(int size); + void growTo(int size, const T& pad); + void clear(bool dealloc = false); + + // Stack interface: + void push(void) + { + if (sz == cap) + capacity(sz + 1); + new (&data[sz]) T(); + sz++; + } + void push(const T& elem) + { + if (sz == cap) + capacity(sz + 1); + data[sz++] = elem; + } + void push_(const T& elem) + { + assert(sz < cap); + data[sz++] = elem; + } + void pop(void) + { + assert(sz > 0); + sz--, data[sz].~T(); + } + + void startMaintenance(); + void endMaintenance(); + void startLoop(); + void endLoop(); + + void remove(const T& elem) + { + int tmp; + for (tmp = 0; tmp < sz; tmp++) + { + if (data[tmp] == elem) + break; + } + if (tmp < sz) + { + assert(data[tmp] == elem); + data[tmp] = data[sz - 1]; + sz = sz - 1; + } + } + + // NOTE: it seems possible that overflow can happen in the 'sz+1' expression of 'push()', but + // in fact it can not since it requires that 'cap' is equal to INT_MAX. This in turn can not + // happen given the way capacities are calculated (below). Essentially, all capacities are + // even, but INT_MAX is odd. + + const T& last(void) const + { + return data[sz - 1]; + } + T& last(void) + { + return data[sz - 1]; + } + + // Vector interface: + const T& operator[](int index) const + { + return data[index]; + } + T& operator[](int index) + { + return data[index]; + } + + // Duplicatation (preferred instead): + void copyTo(vecThreads& copy) const + { + copy.clear(); + copy.growTo(sz); + startLoop(); + for (int i = 0; i < sz; i++) copy[i] = data[i]; + endLoop(); + } + void moveTo(vecThreads& dest) + { + assert(false); // This cannot be made thread safe from here. + dest.clear(true); + startMaintenance(); + dest.data = data; + dest.sz = sz; + dest.cap = cap; + data = NULL; + sz = 0; + cap = 0; + endMaintenance(); + } + void memCopyTo(vecThreads& copy) const + { + copy.capacity(cap); + copy.sz = sz; + memcpy(copy.data, data, sizeof(T) * cap); + } +}; + +template +void vecThreads::startLoop() +{ + bool retry = true; + while (retry) + { + while (!__sync_bool_compare_and_swap(&lock, false, true)); + if (nbusers >= 0) + { + nbusers++; + retry = false; + } + lock = false; + } +} + +template +void vecThreads::endLoop() +{ + while (!__sync_bool_compare_and_swap(&lock, false, true)); + nbusers--; + lock = false; +} + +template +inline void vecThreads::startMaintenance() +{ + bool retry = true; + while (retry) + { + while (!__sync_bool_compare_and_swap(&lock, false, true)); + if (nbusers == 0) + { + nbusers--; + retry = false; + } + lock = false; + } +} + +template +inline void vecThreads::endMaintenance() +{ + while (!__sync_bool_compare_and_swap(&lock, false, true)); + nbusers++; + lock = false; +} +template +inline void vecThreads::capacityProtected(int min_cap) +{ + startMaintenance(); + capacity(min_cap); + endMaintenance(); +} + +template +void vecThreads::capacity(int min_cap) +{ + if (cap >= min_cap) + return; + + int add = imax((min_cap - cap + 1) & ~1, ((cap >> 1) + 2) & ~1); // NOTE: grow by approximately 3/2 + if (add > INT_MAX - cap || ((data = (T*)::realloc(data, (cap += add) * sizeof(T))) == NULL) && errno == ENOMEM) + throw OutOfMemoryException(); +} + +template +void vecThreads::growTo(int size, const T& pad) +{ + if (sz >= size) + return; + startMaintenance(); + capacity(size); + for (int i = sz; i < size; i++) data[i] = pad; + sz = size; + endMaintenance(); +} + +template +void vecThreads::growTo(int size) +{ + if (sz >= size) + return; + startMaintenance(); + capacity(size); + for (int i = sz; i < size; i++) new (&data[i]) T(); + sz = size; + endMaintenance(); +} + +template +void vecThreads::clear(bool dealloc) +{ + if (data != NULL) + { + startMaintenance(); + for (int i = 0; i < sz; i++) data[i].~T(); + sz = 0; + if (dealloc) + free(data), data = NULL, cap = 0; + endMaintenance(); + } +} + +//================================================================================================= +} // namespace Glucose + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/XAlloc.h b/vendors/mugen/glucose-syrup-4.1/mtl/XAlloc.h similarity index 89% rename from libs/mugen/glucose-syrup-4.1/mtl/XAlloc.h rename to vendors/mugen/glucose-syrup-4.1/mtl/XAlloc.h index f8ca4fec47..00e5520061 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/XAlloc.h +++ b/vendors/mugen/glucose-syrup-4.1/mtl/XAlloc.h @@ -17,31 +17,35 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ - #ifndef Glucose_XAlloc_h #define Glucose_XAlloc_h #include -#include #include +#include -namespace Glucose { +namespace Glucose +{ //================================================================================================= // Simple layer on top of malloc/realloc to catch out-of-memory situtaions and provide some typing: -class OutOfMemoryException{}; -static inline void* xrealloc(void *ptr, size_t size) +class OutOfMemoryException +{}; +static inline void* xrealloc(void* ptr, size_t size) { void* mem = realloc(ptr, size); - if (mem == NULL && errno == ENOMEM){ + if (mem == NULL && errno == ENOMEM) + { throw OutOfMemoryException(); - }else { + } + else + { return mem; - } + } } //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/mtl/config.mk b/vendors/mugen/glucose-syrup-4.1/mtl/config.mk similarity index 100% rename from libs/mugen/glucose-syrup-4.1/mtl/config.mk rename to vendors/mugen/glucose-syrup-4.1/mtl/config.mk diff --git a/libs/mugen/glucose-syrup-4.1/mtl/template.mk b/vendors/mugen/glucose-syrup-4.1/mtl/template.mk similarity index 95% rename from libs/mugen/glucose-syrup-4.1/mtl/template.mk rename to vendors/mugen/glucose-syrup-4.1/mtl/template.mk index db3327a193..091f0589d4 100644 --- a/libs/mugen/glucose-syrup-4.1/mtl/template.mk +++ b/vendors/mugen/glucose-syrup-4.1/mtl/template.mk @@ -8,7 +8,7 @@ PWD = $(shell pwd) EXEC ?= $(notdir $(PWD)) -CSRCS = $(wildcard $(PWD)/*.cc) +CSRCS = $(wildcard $(PWD)/*.cc) DSRCS = $(foreach dir, $(DEPDIR), $(filter-out $(MROOT)/$(dir)/Main.cc, $(wildcard $(MROOT)/$(dir)/*.cc))) CHDRS = $(wildcard $(PWD)/*.h) COBJS = $(CSRCS:.cc=.o) $(DSRCS:.cc=.o) @@ -19,14 +19,14 @@ RCOBJS = $(addsuffix r, $(COBJS)) CXX ?= g++ CFLAGS ?= -Wall -Wno-parentheses -std=c++11 -LFLAGS ?= -Wall -lpthread +LFLAGS ?= -Wall -lpthread COPTIMIZE ?= -O3 CFLAGS += -I$(MROOT) -D __STDC_LIMIT_MACROS -D __STDC_FORMAT_MACROS LFLAGS += -lz -.PHONY : s p d r rs clean +.PHONY : s p d r rs clean s: $(EXEC) p: $(EXEC)_profile @@ -87,11 +87,11 @@ libs libp libd libr: ## Clean rule allclean: clean - + @rm -f ../simp/*.o ../simp/*.or ../simp/*.od ../core/*.o ../core/*.or ../core/*.od clean: rm -f $(EXEC) $(EXEC)_profile $(EXEC)_debug $(EXEC)_release $(EXEC)_static \ - $(COBJS) $(PCOBJS) $(DCOBJS) $(RCOBJS) *.core depend.mk + $(COBJS) $(PCOBJS) $(DCOBJS) $(RCOBJS) *.core depend.mk ## Make dependencies depend.mk: $(CSRCS) $(CHDRS) diff --git a/libs/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc b/vendors/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc similarity index 54% rename from libs/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc rename to vendors/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc index 5b18767a00..eb9c129a59 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc +++ b/vendors/mugen/glucose-syrup-4.1/parallel/ClausesBuffer.cc @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -62,7 +62,7 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA * + l1 l2 l3 are the literals of the clause * * ********************************************************************************************** - * **CAREFUL** This class is not thread-safe. In glucose-syrup, the SharedCompanion is + * **CAREFUL** This class is not thread-safe. In glucose-syrup, the SharedCompanion is * responsible for ensuring atomicity of main functions * ********************************************************************************************** * @@ -81,155 +81,182 @@ extern IntOption opt_fifoSizeByCore; // index + 1 : nbSeen // index + 2 : threadId // index + 3 : .. index + 3 + size : Lit of clause -ClausesBuffer::ClausesBuffer(int _nbThreads, unsigned int _maxsize) : first(0), last(_maxsize-1), - maxsize(_maxsize), queuesize(0), - removedClauses(0), - forcedRemovedClauses(0), nbThreads(_nbThreads), - whenFullRemoveOlder(opt_whenFullRemoveOlder), fifoSizeByCore(opt_fifoSizeByCore) { - lastOfThread.growTo(_nbThreads); - for(int i=0;i= maxsize) - return i - maxsize; + return i - maxsize; return i; } -void ClausesBuffer::removeLastClause() { +void ClausesBuffer::removeLastClause() +{ assert(queuesize > 0); - do { - unsigned int size = (unsigned int) elems[nextIndex(last)]; - unsigned int nextlast = addIndex(last, size+headerSize); - - for(int i=0;i 0); - queuesize --; - } - removedClauses ++; - assert(last >= 0); - assert(last < maxsize); - assert(last == nextlast); - } while (queuesize > 0 && (elems[addIndex(last,2)] == 0)); - + do + { + unsigned int size = (unsigned int)elems[nextIndex(last)]; + unsigned int nextlast = addIndex(last, size + headerSize); + + for (int i = 0; i < nbThreads; i++) + { + if (lastOfThread[i] == last) + lastOfThread[i] = nextlast; + } + + // printf("Removing clause starting at %d of size %d.\n",nextIndex(last), size); + for (unsigned int i = 0; i < size + headerSize; i++) + { + last = nextIndex(last); + assert(queuesize > 0); + queuesize--; + } + removedClauses++; + assert(last >= 0); + assert(last < maxsize); + assert(last == nextlast); + } while (queuesize > 0 && (elems[addIndex(last, 2)] == 0)); } - // Pushes a single uint to the fifo -inline void ClausesBuffer::noCheckPush(uint32_t x) { +inline void ClausesBuffer::noCheckPush(uint32_t x) +{ elems[first] = x; - first = nextIndex(first); + first = nextIndex(first); } // Pops a single uint from the fifo -inline uint32_t ClausesBuffer::noCheckPop(uint32_t & index) { - index = nextIndex(index); +inline uint32_t ClausesBuffer::noCheckPop(uint32_t& index) +{ + index = nextIndex(index); uint32_t ret = elems[index]; return ret; } - - // Return true if the clause was succesfully added -bool ClausesBuffer::pushClause(int threadId, Clause & c) { +bool ClausesBuffer::pushClause(int threadId, Clause& c) +{ if (!whenFullRemoveOlder && (queuesize + c.size() + headerSize >= maxsize)) - return false; // We need to remove some old clauses - while (queuesize + c.size() + headerSize >= maxsize) { // We need to remove some old clauses - forcedRemovedClauses ++; - removeLastClause(); - assert(queuesize > 0); + return false; // We need to remove some old clauses + while (queuesize + c.size() + headerSize >= maxsize) + { // We need to remove some old clauses + forcedRemovedClauses++; + removeLastClause(); + assert(queuesize > 0); } noCheckPush(c.size()); - noCheckPush(nbThreads>1?nbThreads-1:1); + noCheckPush(nbThreads > 1 ? nbThreads - 1 : 1); noCheckPush(threadId); - for(int i=0;i (%d, %d)\n", first, last); } -bool ClausesBuffer::getClause(int threadId, int & threadOrigin, vec & resultClause, bool firstFound) { +bool ClausesBuffer::getClause(int threadId, int& threadOrigin, vec& resultClause, bool firstFound) +{ assert(lastOfThread.size() > threadId); unsigned int thislast = lastOfThread[threadId]; - assert(!firstFound || thislast == last); // FIXME: Gilles has this assertion on his cluster + assert(!firstFound || thislast == last); // FIXME: Gilles has this assertion on his cluster // Early exiting - if (nextIndex(thislast) == first) return false; - - if ( ( thislast < last && last < first) || - ( first < thislast && thislast < last ) || - ( last < first && first < thislast) ) { - // Special case where last has moved and lastOfThread[threadId] is no more valid (is behind) - thislast = last; + if (nextIndex(thislast) == first) + return false; + + if ((thislast < last && last < first) || (first < thislast && thislast < last) || + (last < first && first < thislast)) + { + // Special case where last has moved and lastOfThread[threadId] is no more valid (is behind) + thislast = last; } assert(!firstFound); // Go to next clause for this thread id - if (!firstFound) { - while (nextIndex(thislast) != first && elems[addIndex(thislast,3)] == ((unsigned int)threadId)) { // 3 = 2 + 1 - thislast = addIndex(thislast, elems[nextIndex(thislast)] + headerSize); // - assert(thislast >= 0); - assert(thislast < maxsize); - } - assert(nextIndex(thislast)==first || elems[addIndex(thislast,3)] != (unsigned int)threadId); + if (!firstFound) + { + while (nextIndex(thislast) != first && elems[addIndex(thislast, 3)] == ((unsigned int)threadId)) + { // 3 = 2 + 1 + thislast = addIndex(thislast, elems[nextIndex(thislast)] + headerSize); // + assert(thislast >= 0); + assert(thislast < maxsize); + } + assert(nextIndex(thislast) == first || elems[addIndex(thislast, 3)] != (unsigned int)threadId); } - if (nextIndex(thislast) == first) { - lastOfThread[threadId] = thislast; - return false; - } - assert(elems[addIndex(thislast,3)] != ((unsigned int) threadId)); + if (nextIndex(thislast) == first) + { + lastOfThread[threadId] = thislast; + return false; + } + assert(elems[addIndex(thislast, 3)] != ((unsigned int)threadId)); unsigned int previouslast = thislast; - bool removeAfter = false; - int csize = noCheckPop(thislast); - removeAfter = (--elems[addIndex(thislast,1)] == 0); // We are sure this is not one of our own clause - thislast = nextIndex(thislast); // Skips the removeAfter fieldr - threadOrigin = noCheckPop(thislast); + bool removeAfter = false; + int csize = noCheckPop(thislast); + removeAfter = (--elems[addIndex(thislast, 1)] == 0); // We are sure this is not one of our own clause + thislast = nextIndex(thislast); // Skips the removeAfter fieldr + threadOrigin = noCheckPop(thislast); assert(threadOrigin != threadId); resultClause.clear(); - for(int i=0;i elems; - unsigned int first; - unsigned int last; - unsigned int maxsize; - unsigned int queuesize; // Number of current elements (must be < maxsize !) - unsigned int removedClauses; - unsigned int forcedRemovedClauses; - static const int headerSize = 3; - int nbThreads; - bool whenFullRemoveOlder; - unsigned int fifoSizeByCore; - vec lastOfThread; // Last value for a thread - - public: - ClausesBuffer(int _nbThreads, unsigned int _maxsize); - ClausesBuffer(); - - void setNbThreads(int _nbThreads); - unsigned int nextIndex(unsigned int i); - unsigned int addIndex(unsigned int i, unsigned int a); - void removeLastClause(); - - void noCheckPush(uint32_t x); - uint32_t noCheckPop(unsigned int & index); - - // Return true if the clause was succesfully added - bool pushClause(int threadId, Clause & c); - bool getClause(int threadId, int & threadOrigin, vec & resultClause, bool firstFound = false); - - int maxSize() const {return maxsize;} - uint32_t getCap(); - void growTo(int size) { - assert(0); // Not implemented (essentially for efficiency reasons) - elems.growTo(size); - first=0; maxsize=size; queuesize = 0;last = 0; - for(int i=0;i elems; + unsigned int first; + unsigned int last; + unsigned int maxsize; + unsigned int queuesize; // Number of current elements (must be < maxsize !) + unsigned int removedClauses; + unsigned int forcedRemovedClauses; + static const int headerSize = 3; + int nbThreads; + bool whenFullRemoveOlder; + unsigned int fifoSizeByCore; + vec lastOfThread; // Last value for a thread + + public: + ClausesBuffer(int _nbThreads, unsigned int _maxsize); + ClausesBuffer(); + + void setNbThreads(int _nbThreads); + unsigned int nextIndex(unsigned int i); + unsigned int addIndex(unsigned int i, unsigned int a); + void removeLastClause(); + + void noCheckPush(uint32_t x); + uint32_t noCheckPop(unsigned int& index); + + // Return true if the clause was succesfully added + bool pushClause(int threadId, Clause& c); + bool getClause(int threadId, int& threadOrigin, vec& resultClause, bool firstFound = false); + + int maxSize() const + { + return maxsize; + } + uint32_t getCap(); + void growTo(int size) + { + assert(0); // Not implemented (essentially for efficiency reasons) + elems.growTo(size); + first = 0; + maxsize = size; + queuesize = 0; + last = 0; + for (int i = 0; i < size; i++) elems[i] = 0; + } + + void fastclear() + { + first = 0; + last = 0; + queuesize = 0; + } + + int size(void) + { + return queuesize; + } + + void clear(bool dealloc = false) + { + elems.clear(dealloc); + first = 0; + maxsize = 0; + queuesize = 0; + } + inline int toInt(Lit p) + { + return p.x; + } +}; +} // namespace Glucose //================================================================================================= #endif diff --git a/libs/mugen/glucose-syrup-4.1/parallel/Main.cc b/vendors/mugen/glucose-syrup-4.1/parallel/Main.cc similarity index 52% rename from libs/mugen/glucose-syrup-4.1/parallel/Main.cc rename to vendors/mugen/glucose-syrup-4.1/parallel/Main.cc index a95192c420..52d2454352 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/Main.cc +++ b/vendors/mugen/glucose-syrup-4.1/parallel/Main.cc @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -47,209 +47,248 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ -#include - -#include -#include - - -#include "utils/System.h" -#include "utils/ParseUtils.h" -#include "utils/Options.h" #include "core/Dimacs.h" #include "core/SolverTypes.h" - -#include "simp/SimpSolver.h" -#include "parallel/ParallelSolver.h" #include "parallel/MultiSolvers.h" +#include "parallel/ParallelSolver.h" +#include "simp/SimpSolver.h" +#include "utils/Options.h" +#include "utils/ParseUtils.h" +#include "utils/System.h" -using namespace Glucose; - +#include +#include +#include +using namespace Glucose; static MultiSolvers* pmsolver; // Terminate by notifying the solver and back out gracefully. This is mainly to have a test-case // for this feature of the Solver as it may take longer than an immediate call to '_exit()'. -//static void SIGINT_interrupt(int signum) { pmsolver->interrupt(); } - +// static void SIGINT_interrupt(int signum) { pmsolver->interrupt(); } // Note that '_exit()' rather than 'exit()' has to be used. The reason is that 'exit()' calls // destructors and may cause deadlocks if a malloc/free function happens to be running (these // functions are guarded by locks for multithreaded use). -static void SIGINT_exit(int signum) { - printf("\n"); printf("*** INTERRUPTED ***\n"); - if (pmsolver->verbosity() > 0){ +static void SIGINT_exit(int signum) +{ + printf("\n"); + printf("*** INTERRUPTED ***\n"); + if (pmsolver->verbosity() > 0) + { pmsolver->printFinalStats(); - printf("\n"); printf("*** INTERRUPTED ***\n"); } - _exit(1); } - + printf("\n"); + printf("*** INTERRUPTED ***\n"); + } + _exit(1); +} //================================================================================================= // Main: - int main(int argc, char** argv) { double realTimeStart = realTime(); - printf("c\nc This is glucose-syrup 4.0 (glucose in many threads) -- based on MiniSAT (Many thanks to MiniSAT team)\nc\n"); - try { - setUsageHelp("c USAGE: %s [options] \n\n where input may be either in plain or gzipped DIMACS.\n"); + printf("c\nc This is glucose-syrup 4.0 (glucose in many threads) -- based on MiniSAT (Many thanks to MiniSAT " + "team)\nc\n"); + try + { + setUsageHelp("c USAGE: %s [options] \n\n where input may be either in plain " + "or gzipped DIMACS.\n"); // printf("This is MiniSat 2.0 beta\n"); - + // Extra options: // - IntOption verb ("MAIN", "verb", "Verbosity level (0=silent, 1=some, 2=more).", 1, IntRange(0, 2)); - BoolOption mod ("MAIN", "model", "show model.", false); - IntOption vv ("MAIN", "vv", "Verbosity every vv conflicts", 10000, IntRange(1,INT32_MAX)); - BoolOption pre ("MAIN", "pre", "Completely turn on/off any preprocessing.", true); - - IntOption cpu_lim("MAIN", "cpu-lim","Limit on CPU time allowed in seconds.\n", INT32_MAX, IntRange(0, INT32_MAX)); - IntOption mem_lim("MAIN", "mem-lim","Limit on memory usage in megabytes.\n", INT32_MAX, IntRange(0, INT32_MAX)); - + IntOption verb("MAIN", "verb", "Verbosity level (0=silent, 1=some, 2=more).", 1, IntRange(0, 2)); + BoolOption mod("MAIN", "model", "show model.", false); + IntOption vv("MAIN", "vv", "Verbosity every vv conflicts", 10000, IntRange(1, INT32_MAX)); + BoolOption pre("MAIN", "pre", "Completely turn on/off any preprocessing.", true); + + IntOption cpu_lim("MAIN", "cpu-lim", "Limit on CPU time allowed in seconds.\n", INT32_MAX, + IntRange(0, INT32_MAX)); + IntOption mem_lim("MAIN", "mem-lim", "Limit on memory usage in megabytes.\n", INT32_MAX, + IntRange(0, INT32_MAX)); + parseOptions(argc, argv, true); - MultiSolvers msolver; - pmsolver = & msolver; + MultiSolvers msolver; + pmsolver = &msolver; msolver.setVerbosity(verb); msolver.setVerbEveryConflicts(vv); msolver.setShowModel(mod); double initial_time = cpuTime(); - // Use signal handlers that forcibly quit until the solver will be able to respond to + // Use signal handlers that forcibly quit until the solver will be able to respond to // interrupts: - signal(SIGINT, SIGINT_exit); - signal(SIGXCPU,SIGINT_exit); + signal(SIGINT, SIGINT_exit); + signal(SIGXCPU, SIGINT_exit); // Set limit on CPU-time: - if (cpu_lim != INT32_MAX){ + if (cpu_lim != INT32_MAX) + { rlimit rl; getrlimit(RLIMIT_CPU, &rl); - if (rl.rlim_max == RLIM_INFINITY || (rlim_t)cpu_lim < rl.rlim_max){ + if (rl.rlim_max == RLIM_INFINITY || (rlim_t)cpu_lim < rl.rlim_max) + { rl.rlim_cur = cpu_lim; if (setrlimit(RLIMIT_CPU, &rl) == -1) printf("c WARNING! Could not set resource limit: CPU-time.\n"); - } } + } + } // Set limit on virtual memory: - if (mem_lim != INT32_MAX){ - rlim_t new_mem_lim = (rlim_t)mem_lim * 1024*1024; + if (mem_lim != INT32_MAX) + { + rlim_t new_mem_lim = (rlim_t)mem_lim * 1024 * 1024; rlimit rl; getrlimit(RLIMIT_AS, &rl); - if (rl.rlim_max == RLIM_INFINITY || new_mem_lim < rl.rlim_max){ + if (rl.rlim_max == RLIM_INFINITY || new_mem_lim < rl.rlim_max) + { rl.rlim_cur = new_mem_lim; if (setrlimit(RLIMIT_AS, &rl) == -1) printf("c WARNING! Could not set resource limit: Virtual memory.\n"); - } } - + } + } + if (argc == 1) printf("c Reading from standard input... Use '--help' for help.\n"); - + gzFile in = (argc == 1) ? gzdopen(0, "rb") : gzopen(argv[1], "rb"); if (in == NULL) printf("c ERROR! Could not open file: %s\n", argc == 1 ? "" : argv[1]), exit(1); - - if (msolver.verbosity() > 0){ - printf("c ========================================[ Problem Statistics ]===========================================\n"); - printf("c | |\n"); } - + + if (msolver.verbosity() > 0) + { + printf("c ========================================[ Problem Statistics " + "]===========================================\n"); + printf("c | " + " |\n"); + } + parse_DIMACS(in, msolver); gzclose(in); - - - FILE* res = (argc >= 3) ? fopen(argv[argc-1], "wb") : NULL; + FILE* res = (argc >= 3) ? fopen(argv[argc - 1], "wb") : NULL; + + if (msolver.verbosity() > 0) + { + printf( + "c | Number of variables: %12d |\n", + msolver.nVars()); + printf( + "c | Number of clauses: %12d |\n", + msolver.nClauses()); + } - if (msolver.verbosity() > 0){ - printf("c | Number of variables: %12d |\n", msolver.nVars()); - printf("c | Number of clauses: %12d |\n", msolver.nClauses()); } - double parsed_time = cpuTime(); - if (msolver.verbosity() > 0){ - printf("c | Parse time: %12.2f s |\n", parsed_time - initial_time); - printf("c | |\n"); } - + if (msolver.verbosity() > 0) + { + printf("c | Parse time: %12.2f s " + " |\n", + parsed_time - initial_time); + printf("c | " + " |\n"); + } + // Change to signal-handlers that will only notify the solver and allow it to terminate // voluntarily: - //signal(SIGINT, SIGINT_interrupt); - //signal(SIGXCPU,SIGINT_interrupt); - - - int ret2 = msolver.simplify(); - msolver.use_simplification = pre; - if(ret2) + // signal(SIGINT, SIGINT_interrupt); + // signal(SIGXCPU,SIGINT_interrupt); + + int ret2 = msolver.simplify(); + msolver.use_simplification = pre; + if (ret2) msolver.eliminate(); - if(pre) { + if (pre) + { double simplified_time = cpuTime(); - if (msolver.verbosity() > 0){ - printf("c | Simplification time: %12.2f s |\n", simplified_time - parsed_time); - printf("c | |\n"); } + if (msolver.verbosity() > 0) + { + printf("c | Simplification time: %12.2f s " + " |\n", + simplified_time - parsed_time); + printf("c | " + " |\n"); + } } - - if (!ret2 || !msolver.okay()){ - //if (S.certifiedOutput != NULL) fprintf(S.certifiedOutput, "0\n"), fclose(S.certifiedOutput); - if (res != NULL) fprintf(res, "UNSAT\n"), fclose(res); - if (msolver.verbosity() > 0){ - printf("c =========================================================================================================\n"); - printf("Solved by unit propagation\n"); - printf("c real time : %g s\n", realTime() - realTimeStart); - printf("c cpu time : %g s\n", cpuTime()); - printf("\n"); } + + if (!ret2 || !msolver.okay()) + { + // if (S.certifiedOutput != NULL) fprintf(S.certifiedOutput, "0\n"), fclose(S.certifiedOutput); + if (res != NULL) + fprintf(res, "UNSAT\n"), fclose(res); + if (msolver.verbosity() > 0) + { + printf("c " + "===============================================================================================" + "==========\n"); + printf("Solved by unit propagation\n"); + printf("c real time : %g s\n", realTime() - realTimeStart); + printf("c cpu time : %g s\n", cpuTime()); + printf("\n"); + } printf("s UNSATISFIABLE\n"); exit(20); } - // vec dummy; + // vec dummy; lbool ret = msolver.solve(); - - + printf("c\n"); - printf("c real time : %g s\n", realTime() - realTimeStart); - printf("c cpu time : %g s\n", cpuTime()); - if (msolver.verbosity() > 0){ + printf("c real time : %g s\n", realTime() - realTimeStart); + printf("c cpu time : %g s\n", cpuTime()); + if (msolver.verbosity() > 0) + { msolver.printFinalStats(); - printf("\n"); } - - //-------------- Result is put in a external file - /* I must admit I have to print the model of one thread... But which one? FIXME !! - if (res != NULL){ - if (ret == l_True){ - fprintf(res, "SAT\n"); - for (int i = 0; i < S.nVars(); i++) - if (S.model[i] != l_Undef) - fprintf(res, "%s%s%d", (i==0)?"":" ", (S.model[i]==l_True)?"":"-", i+1); - fprintf(res, " 0\n"); - }else if (ret == l_False) - fprintf(res, "UNSAT\n"); - else - fprintf(res, "INDET\n"); - fclose(res); - - //-------------- Want certified output - } else { - */ - printf(ret == l_True ? "s SATISFIABLE\n" : ret == l_False ? "s UNSATISFIABLE\n" : "s INDETERMINATE\n"); - - if(msolver.getShowModel() && ret==l_True) { - printf("v "); - for (int i = 0; i < msolver.model.size() ; i++) { + printf("\n"); + } + + //-------------- Result is put in a external file + /* I must admit I have to print the model of one thread... But which one? FIXME !! + if (res != NULL){ + if (ret == l_True){ + fprintf(res, "SAT\n"); + for (int i = 0; i < S.nVars(); i++) + if (S.model[i] != l_Undef) + fprintf(res, "%s%s%d", (i==0)?"":" ", (S.model[i]==l_True)?"":"-", i+1); + fprintf(res, " 0\n"); + }else if (ret == l_False) + fprintf(res, "UNSAT\n"); + else + fprintf(res, "INDET\n"); + fclose(res); + + //-------------- Want certified output + } else { + */ + printf(ret == l_True ? "s SATISFIABLE\n" : ret == l_False ? "s UNSATISFIABLE\n" : "s INDETERMINATE\n"); + + if (msolver.getShowModel() && ret == l_True) + { + printf("v "); + for (int i = 0; i < msolver.model.size(); i++) + { assert(msolver.model[i] != l_Undef); if (msolver.model[i] != l_Undef) - printf("%s%s%d", (i==0)?"":" ", (msolver.model[i]==l_True)?"":"-", i+1); + printf("%s%s%d", (i == 0) ? "" : " ", (msolver.model[i] == l_True) ? "" : "-", i + 1); } - printf(" 0\n"); - } + printf(" 0\n"); + } - - #ifdef NDEBUG - exit(ret == l_True ? 10 : ret == l_False ? 20 : 0); // (faster than "return", which will invoke the destructor for 'Solver') + exit(ret == l_True ? 10 : + ret == l_False ? 20 : + 0); // (faster than "return", which will invoke the destructor for 'Solver') #else return (ret == l_True ? 10 : ret == l_False ? 20 : 0); #endif - } catch (OutOfMemoryException&){ - printf("c ===================================================================================================\n"); + } + catch (OutOfMemoryException&) + { + printf( + "c ===================================================================================================\n"); printf("INDETERMINATE\n"); exit(0); } diff --git a/libs/mugen/glucose-syrup-4.1/parallel/Makefile b/vendors/mugen/glucose-syrup-4.1/parallel/Makefile similarity index 100% rename from libs/mugen/glucose-syrup-4.1/parallel/Makefile rename to vendors/mugen/glucose-syrup-4.1/parallel/Makefile diff --git a/vendors/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc b/vendors/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc new file mode 100644 index 0000000000..2c30f54a4a --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/parallel/MultiSolvers.cc @@ -0,0 +1,754 @@ +/***************************************************************************************[MultiSolvers.cc] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#include "parallel/MultiSolvers.h" + +#include "mtl/Sort.h" +#include "parallel/SolverConfiguration.h" +#include "simp/SimpSolver.h" +#include "utils/System.h" + +#include +#include +#include + +using namespace Glucose; + +extern const char* _parallel; +extern const char* _cunstable; +// Options at the parallel solver level +static IntOption opt_nbsolversmultithreads(_parallel, "nthreads", "Number of core threads for syrup (0 for automatic)", + 0); +static IntOption opt_maxnbsolvers(_parallel, "maxnbthreads", + "Maximum number of core threads to ask for (when nbthreads=0)", 4); +static IntOption opt_maxmemory(_parallel, "maxmemory", "Maximum memory to use (in Mb, 0 for no software limit)", 20000); +static IntOption opt_statsInterval(_parallel, "statsinterval", "Seconds (real time) between two stats reports", 5); +// +// Shared with ClausesBuffer.cc +BoolOption opt_whenFullRemoveOlder(_parallel, "removeolder", + "When the FIFO for exchanging clauses between threads is full, remove older clauses", + false); +IntOption opt_fifoSizeByCore(_parallel, "fifosize", + "Size of the FIFO structure for exchanging clauses between threads, by threads", 100000); +// +// Shared options with Solver.cc +BoolOption opt_dontExportDirectReusedClauses(_cunstable, "reusedClauses", "Don't export directly reused clauses", + false); +BoolOption opt_plingeling(_cunstable, "plingeling", "plingeling strategy for sharing clauses (exploratory feature)", + false); + +#include +#include +#include + +static inline double cpuTime(void) +{ + struct rusage ru; + getrusage(RUSAGE_SELF, &ru); + return (double)ru.ru_utime.tv_sec + (double)ru.ru_utime.tv_usec / 1000000; +} + +void MultiSolvers::informEnd(lbool res) +{ + result = res; + pthread_cond_broadcast(&cfinished); +} + +MultiSolvers::MultiSolvers(ParallelSolver* s) : + use_simplification(true), + ok(true), + maxnbthreads(4), + nbthreads(opt_nbsolversmultithreads), + nbsolvers(opt_nbsolversmultithreads), + nbcompanions(4), + nbcompbysolver(2), + allClonesAreBuilt(0), + showModel(false), + winner(-1), + var_decay(1 / 0.95), + clause_decay(1 / 0.999), + cla_inc(1), + var_inc(1), + random_var_freq(0.02), + restart_first(100), + restart_inc(1.5), + learntsize_factor((double)1 / (double)3), + learntsize_inc(1.1), + expensive_ccmin(true), + polarity_mode(polarity_false), + maxmemory(opt_maxmemory), + maxnbsolvers(opt_maxnbsolvers), + verb(0), + verbEveryConflicts(10000), + numvar(0), + numclauses(0) +{ + result = l_Undef; + SharedCompanion* sc = new SharedCompanion(); + this->sharedcomp = sc; + + // Generate only solver 0. + // It loads the formula + // All others solvers are clone of this one + solvers.push(s); + s->verbosity = 0; // No reportf in solvers... All is done in MultiSolver + s->setThreadNumber(0); + // s->belongsto = this; + s->sharedcomp = sc; + sc->addSolver(s); + assert(solvers[0]->threadNumber() == 0); + + pthread_mutex_init(&m, NULL); // PTHREAD_MUTEX_INITIALIZER; + pthread_mutex_init(&mfinished, NULL); // PTHREAD_MUTEX_INITIALIZER; + pthread_cond_init(&cfinished, NULL); + + if (nbsolvers > 0) + fprintf(stdout, "c %d solvers engines and 1 companion as a blackboard created.\n", nbsolvers); +} + +MultiSolvers::MultiSolvers() : MultiSolvers(new ParallelSolver(-1)) {} + +MultiSolvers::~MultiSolvers() {} + +/** + * Generate All solvers + */ + +void MultiSolvers::generateAllSolvers() +{ + assert(solvers[0] != NULL); + assert(allClonesAreBuilt == 0); + + for (int i = 1; i < nbsolvers; i++) + { + ParallelSolver* s = (ParallelSolver*)solvers[0]->clone(); + solvers.push(s); + s->verbosity = 0; // No reportf in solvers... All is done in MultiSolver + s->setThreadNumber(i); + s->sharedcomp = this->sharedcomp; + this->sharedcomp->addSolver(s); + assert(solvers[i]->threadNumber() == i); + } + + adjustParameters(); + + allClonesAreBuilt = 1; +} + +/** + * Choose solver for threads i (if no given in command line see above) + */ + +ParallelSolver* MultiSolvers::retrieveSolver(int i) +{ + return new ParallelSolver(i); +} + +Var MultiSolvers::newVar(bool sign, bool dvar) +{ + assert(solvers[0] != NULL); + numvar++; + int v; + sharedcomp->newVar(sign); + if (!allClonesAreBuilt) + { // At the beginning we want to generate only solvers 0 + v = solvers[0]->newVar(sign, dvar); + assert(numvar == v + 1); // Just a useless check + } + else + { + for (int i = 0; i < nbsolvers; i++) + { + v = solvers[i]->newVar(sign, dvar); + } + } + return numvar; +} + +bool MultiSolvers::addClause_(vec& ps) +{ + assert(solvers[0] != NULL); // There is at least one solver. + // Check if clause is satisfied and remove false/duplicate literals: + if (!okay()) + return false; + + sort(ps); + Lit p; + int i, j; + for (i = j = 0, p = lit_Undef; i < ps.size(); i++) + if (solvers[0]->value(ps[i]) == l_True || ps[i] == ~p) + return true; + else if (solvers[0]->value(ps[i]) != l_False && ps[i] != p) + ps[j++] = p = ps[i]; + ps.shrink(i - j); + + if (ps.size() == 0) + { + return ok = false; + } + else if (ps.size() == 1) + { + assert(solvers[0]->value(ps[0]) == l_Undef); // TODO : Passes values to all threads + solvers[0]->uncheckedEnqueue(ps[0]); + if (!allClonesAreBuilt) + { + return ok = ((solvers[0]->propagate()) == + CRef_Undef); // checks only main solver here for propagation constradiction + } + + // Here, all clones are built. + // Gives the unit clause to everybody + for (int i = 0; i < nbsolvers; i++) solvers[i]->uncheckedEnqueue(ps[0]); + return ok = ((solvers[0]->propagate()) == + CRef_Undef); // checks only main solver here for propagation constradiction + } + else + { + // printf("Adding clause %0xd for solver %d.\n",(void*)c, thn); + // At the beginning only solver 0 load the formula + solvers[0]->addClause(ps); + + if (!allClonesAreBuilt) + { + numclauses++; + return true; + } + // Clones are built, need to pass the clause to all the threads + for (int i = 1; i < nbsolvers; i++) + { + solvers[i]->addClause(ps); + } + numclauses++; + } + return true; +} + +bool MultiSolvers::simplify() +{ + assert(solvers[0] != NULL); // There is at least one solver. + + if (!okay()) + return false; + return ok = solvers[0]->simplify(); +} + +bool MultiSolvers::eliminate() +{ + + // TODO allow variable elimination when all threads are built! + assert(allClonesAreBuilt == false); + + SimpSolver* s = (SimpSolver*)getPrimarySolver(); + s->use_simplification = use_simplification; + if (!use_simplification) + return true; + + return s->eliminate(true); +} + +// TODO: Use a template here +void* localLaunch(void* arg) +{ + ParallelSolver* s = (ParallelSolver*)arg; + + (void)s->solve(); + + pthread_exit(NULL); +} + +#define MAXIMUM_SLEEP_DURATION 5 + +void MultiSolvers::printStats() +{ + static int nbprinted = 1; + double cpu_time = cpuTime(); + printf("c\n"); + + printf( + "c " + "|-------------------------------------------------------------------------------------------------------|\n"); + printf("c | id | starts | decisions | confls | Init T | learnts | exported | imported | promoted | %% " + "| \n"); + printf( + "c " + "|-------------------------------------------------------------------------------------------------------|\n"); + + // printf("%.0fs | ",cpu_time); + for (int i = 0; i < solvers.size(); i++) + { + solvers[i]->reportProgress(); + // printf(" %2d: %12ld confl. |", i, (long int) solvers[i]->conflicts); + } + long long int totalconf = 0; + long long int totalprop = 0; + for (int i = 0; i < solvers.size(); i++) + { + totalconf += (long int)solvers[i]->conflicts; + totalprop += solvers[i]->propagations; + } + printf("c \n"); + + printf("c synthesis %11lld conflicts %11lld propagations %8.0f conflicts/sec %8.0f propagations/sec\n", totalconf, + totalprop, (double)totalconf / cpu_time, (double)totalprop / cpu_time); + + nbprinted++; +} + +// Still a ugly function... To be rewritten with some statistics class some day +void MultiSolvers::printFinalStats() +{ + sharedcomp->printStats(); + printf("c\nc\n"); + printf("c\n"); + printf("c |---------------------------------------- FINAL STATS " + "--------------------------------------------------|\n"); + printf("c\n"); + + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); + + printf("c | Threads | Total "); + for (int i = 0; i < solvers.size(); i++) + { + printf("| %10d ", i); + } + printf("|\n"); + + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); + + //-- + printf("c | Conflicts "); + long long int totalconf = 0; + for (int i = 0; i < solvers.size(); i++) totalconf += solvers[i]->conflicts; + printf("| %15lld ", totalconf); + + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->conflicts); + printf("|\n"); + + //-- + printf("c | Decisions "); + long long int totaldecs = 0; + for (int i = 0; i < solvers.size(); i++) totaldecs += solvers[i]->decisions; + printf("| %15lld ", totaldecs); + + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->decisions); + printf("|\n"); + + //-- + printf("c | Propagations "); + long long int totalprops = 0; + for (int i = 0; i < solvers.size(); i++) totalprops += solvers[i]->propagations; + printf("| %15lld ", totalprops); + + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->propagations); + printf("|\n"); + + printf("c | Avg_Trail "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + printf("| %10" PRIu64 " ", + solvers[i]->conflicts == 0 ? 0 : solvers[i]->stats[sumTrail] / solvers[i]->conflicts); + printf("|\n"); + + //-- + printf("c | Avg_DL "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + printf("| %10" PRIu64 " ", + solvers[i]->conflicts == 0 ? 0 : solvers[i]->stats[sumDecisionLevels] / solvers[i]->conflicts); + printf("|\n"); + + //-- + printf("c | Avg_Res "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + printf("| %10" PRIu64 " ", solvers[i]->conflicts == 0 ? 0 : solvers[i]->stats[sumRes] / solvers[i]->conflicts); + printf("|\n"); + + //-- + printf("c | Avg_Res_Seen "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + printf("| %10" PRIu64 " ", + solvers[i]->conflicts == 0 ? 0 : solvers[i]->stats[sumResSeen] / solvers[i]->conflicts); + printf("|\n"); + + //-- + + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); + + printf("c | Exported "); + uint64_t exported = 0; + for (int i = 0; i < solvers.size(); i++) exported += solvers[i]->stats[nbexported]; + printf("| %15" PRIu64 " ", exported); + + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->stats[nbexported]); + printf("|\n"); + //-- + printf("c | Imported "); + uint64_t imported = 0; + for (int i = 0; i < solvers.size(); i++) imported += solvers[i]->stats[nbimported]; + printf("| %15" PRIu64 " ", imported); + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->stats[nbimported]); + printf("|\n"); + //-- + + printf("c | Good "); + uint64_t importedGood = 0; + for (int i = 0; i < solvers.size(); i++) importedGood += solvers[i]->stats[nbImportedGoodClauses]; + printf("| %15" PRIu64 " ", importedGood); + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->stats[nbImportedGoodClauses]); + printf("|\n"); + //-- + + printf("c | Purge "); + uint64_t importedPurg = 0; + for (int i = 0; i < solvers.size(); i++) importedPurg += solvers[i]->stats[nbimportedInPurgatory]; + printf("| %15" PRIu64 " ", importedPurg); + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->stats[nbimportedInPurgatory]); + printf("|\n"); + //-- + + printf("c | Promoted "); + uint64_t promoted = 0; + for (int i = 0; i < solvers.size(); i++) promoted += solvers[i]->stats[nbPromoted]; + printf("| %15" PRIu64 " ", promoted); + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->stats[nbPromoted]); + printf("|\n"); + //-- + + printf("c | Remove_Imp "); + uint64_t removedimported = 0; + for (int i = 0; i < solvers.size(); i++) removedimported += solvers[i]->stats[nbRemovedUnaryWatchedClauses]; + printf("| %15" PRIu64 " ", removedimported); + for (int i = 0; i < solvers.size(); i++) + printf("| %10" PRIu64 " ", solvers[i]->stats[nbRemovedUnaryWatchedClauses]); + printf("|\n"); + //-- + + printf("c | Blocked_Reuse "); + uint64_t blockedreused = 0; + for (int i = 0; i < solvers.size(); i++) blockedreused += solvers[i]->nbNotExportedBecauseDirectlyReused; + printf("| %15" PRIu64 " ", blockedreused); + for (int i = 0; i < solvers.size(); i++) printf("| %10" PRIu64 " ", solvers[i]->nbNotExportedBecauseDirectlyReused); + printf("|\n"); + //-- + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); + + printf("c | Unaries "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + { + printf("| %10" PRIu64 " ", solvers[i]->stats[nbUn]); + } + printf("|\n"); + //-- + + printf("c | Binaries "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + { + printf("| %10" PRIu64 " ", solvers[i]->stats[nbBin]); + } + printf("|\n"); + //-- + + printf("c | Glues "); + printf("| "); + for (int i = 0; i < solvers.size(); i++) + { + printf("| %10" PRIu64 " ", solvers[i]->stats[nbDL2]); + } + printf("|\n"); + //-- + + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); + + printf("c | Orig_Seen "); + uint64_t origseen = 0; + + for (int i = 0; i < solvers.size(); i++) + { + origseen += solvers[i]->stats[originalClausesSeen]; + } + printf("| %13" PRIu64 " %% ", origseen * 100 / nClauses() / solvers.size()); + + for (int i = 0; i < solvers.size(); i++) + { + printf("| %10" PRIu64 " ", solvers[i]->stats[originalClausesSeen]); + } + + printf("|\n"); + + int winner = -1; + for (int i = 0; i < solvers.size(); i++) + { + if (sharedcomp->winner() == solvers[i]) + winner = i; + } + + //-- + if (winner != -1) + { + printf("c | Diff Orig seen"); + printf("| "); + + for (int i = 0; i < solvers.size(); i++) + { + if (i == winner) + { + printf("| X "); + continue; + } + if (solvers[i]->stats[originalClausesSeen] > solvers[winner]->stats[originalClausesSeen]) + printf("| %10" PRIu64 " ", + solvers[i]->stats[originalClausesSeen] - solvers[winner]->stats[originalClausesSeen]); + else + printf("| -%9" PRIu64 " ", + solvers[winner]->stats[originalClausesSeen] - solvers[i]->stats[originalClausesSeen]); + } + + printf("|\n"); + } + + //-- + + if (winner != -1) + { + int sum = 0; + printf("c | Hamming "); + for (int i = 0; i < solvers.size(); i++) + { + if (i == winner) + continue; + int nb = 0; + for (int j = 0; j < nVars(); j++) + { + if (solvers[i]->valuePhase(j) != solvers[winner]->valuePhase(j)) + nb++; + } + sum += nb; + } + sum = sum / (solvers.size() > 1 ? solvers.size() - 1 : 1); + + printf("| %13d %% ", sum * 100 / nVars()); + + for (int i = 0; i < solvers.size(); i++) + { + if (i == winner) + { + printf("| X "); + continue; + } + int nb = 0; + for (int j = 0; j < nVars(); j++) + { + if (solvers[i]->valuePhase(j) != solvers[winner]->valuePhase(j)) + nb++; + } + printf("| %10d ", nb); + sum += nb; + } + printf("|\n"); + } + + printf("c |---------------|-----------------"); + for (int i = 0; i < solvers.size(); i++) printf("|------------"); + printf("|\n"); +} + +// Well, all those parameteres are just naive guesses... No experimental evidences for this. +void MultiSolvers::adjustParameters() +{ + SolverConfiguration::configure(this, nbsolvers); +} + +void MultiSolvers::adjustNumberOfCores() +{ + float mem = memUsed(); + if (nbthreads == 0) + { // Automatic configuration + if (verb >= 1) + printf("c | Automatic Adjustement of the number of solvers. MaxMemory=%5d, MaxCores=%3d. " + " |\n", + maxmemory, maxnbsolvers); + unsigned int tmpnbsolvers = maxmemory * 4 / 10 / mem; + if (tmpnbsolvers > maxnbsolvers) + tmpnbsolvers = maxnbsolvers; + if (tmpnbsolvers < 1) + tmpnbsolvers = 1; + if (verb >= 1) + printf("c | One Solver is taking %.2fMb... Let's take %d solvers for this run (max 40%% of the " + "maxmemory). |\n", + mem, tmpnbsolvers); + nbsolvers = tmpnbsolvers; + nbthreads = nbsolvers; + } + else + { + assert(nbthreads == nbsolvers); + } +} + +lbool MultiSolvers::solve() +{ + pthread_attr_t thAttr; + int i; + + adjustNumberOfCores(); + sharedcomp->setNbThreads(nbsolvers); + if (verb >= 1) + printf("c | Generating clones " + " |\n"); + generateAllSolvers(); + if (verb >= 1) + { + printf("c |  all clones generated. Memory = %6.2fMb. " + " |\n", + memUsed()); + printf("c " + "=======================================================================================================" + "=|\n"); + } + + model.clear(); + + /* Initialize and set thread detached attribute */ + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + // Launching all solvers + for (i = 0; i < nbsolvers; i++) + { + pthread_t* pt = (pthread_t*)malloc(sizeof(pthread_t)); + threads.push(pt); + solvers[i]->pmfinished = &mfinished; + solvers[i]->pcfinished = &cfinished; + pthread_create(threads[i], &thAttr, &localLaunch, (void*)solvers[i]); + } + + bool done = false; + bool adjustedlimitonce = false; + + (void)pthread_mutex_lock(&m); + while (!done) + { + struct timespec timeout; + time(&timeout.tv_sec); + timeout.tv_sec += MAXIMUM_SLEEP_DURATION; + timeout.tv_nsec = 0; + if (pthread_cond_timedwait(&cfinished, &mfinished, &timeout) != ETIMEDOUT) + done = true; + else + printStats(); + + float mem = memUsed(); + if (verb >= 1) + printf("c Total Memory so far : %.2fMb\n", mem); + if ((maxmemory > 0) && (mem > maxmemory) && !sharedcomp->panicMode) + printf("c ** reduceDB switching to Panic Mode due to memory limitations !\n"), sharedcomp->panicMode = true; + + if (!done && !adjustedlimitonce) + { + uint64_t sumconf = 0; + uint64_t sumimported = 0; + for (int i = 0; i < nbsolvers; i++) + { + sumconf += solvers[i]->conflicts; + sumimported += solvers[i]->stats[nbimported]; + } + if (sumconf > 10000000 && sumimported > 4 * sumconf) + { // too many many imported clauses (after a while) + for (int i = 0; i < nbsolvers; i++) + { // we have like 32 threads, so we need to export just very good clauses + solvers[i]->goodlimitlbd -= 2; + solvers[i]->goodlimitsize -= 4; + } + adjustedlimitonce = true; + printf("c adjusting (once) the limits to send fewer clauses.\n"); + } + } + } + + (void)pthread_mutex_unlock(&m); + + for (i = 0; i < nbsolvers; i++) + { // Wait for all threads to finish + pthread_join(*threads[i], NULL); + } + + assert(sharedcomp != NULL); + result = sharedcomp->jobStatus; + if (result == l_True) + { + sharedcomp->jobFinishedBy->extendModel(); + int n = sharedcomp->jobFinishedBy->nVars(); + model.growTo(n); + for (int i = 0; i < n; i++) + { + model[i] = sharedcomp->jobFinishedBy->model[i]; + assert(model[i] != l_Undef); + } + } + + return result; + /* + for(int i=0;i& ps); // Add a clause to the solver. NOTE! 'ps' may be shrunk by this method! + bool addClause_(vec& ps); + + bool simplify(); // Removes already satisfied clauses. + + int nVars() const; // The current number of variables. + int nClauses() const; // The current number of variables. + ParallelSolver* getPrimarySolver(); + + void generateAllSolvers(); + + // Solving: + // + lbool solve(); // Search without assumptions. + bool eliminate(); // Perform variable elimination + void adjustParameters(); + void adjustNumberOfCores(); + void interrupt() {} + vec model; // If problem is satisfiable, this vector contains the model (if any). + inline bool okay() + { + if (!ok) + return ok; + for (int i = 0; i < solvers.size(); i++) + { + if (!((SimpSolver*)solvers[i])->okay()) + { + ok = false; + return false; + } + } + return true; + } + + bool use_simplification; + + protected: + friend class ParallelSolver; + friend class SolverCompanion; + + struct Stats + { + uint64_t min, max, avg, std, med; + Stats(uint64_t _min = 0, uint64_t _max = 0, uint64_t _avg = 0, uint64_t _std = 0, uint64_t _med = 0) : + min(_min), + max(_max), + avg(_avg), + std(_std), + med(_med) + {} + }; + + void printStats(); + int ok; + lbool result; + int maxnbthreads; // Maximal number of threads + int nbthreads; // Current number of threads + int nbsolvers; // Number of CDCL solvers + int nbcompanions; // Number of companions + int nbcompbysolver; // Number of companions by solvers + bool immediateSharingGlue; + int allClonesAreBuilt; + bool showModel; // show model on/off + + int winner; + + vec add_tmp; + + double var_decay; // Inverse of the variable activity decay factor. (default 1 / 0.95) + double clause_decay; // Inverse of the clause activity decay factor. (1 / 0.999) + double cla_inc; // Amount to bump next clause with. + double var_inc; // Amount to bump next variable with. + double random_var_freq; // The frequency with which the decision heuristic tries to choose a random variable. + // (default 0.02) + int restart_first; // The initial restart limit. (default 100) + double restart_inc; // The factor with which the restart limit is multiplied in each restart. (default 1.5) + double learntsize_factor; // The intitial limit for learnt clauses is a factor of the original clauses. (default 1 + // / 3) + double learntsize_inc; // The limit for learnt clauses is multiplied with this factor each restart. (default 1.1) + bool expensive_ccmin; // Controls conflict clause minimization. (default TRUE) + int polarity_mode; // Controls which polarity the decision heuristic chooses. See enum below for allowed modes. + // (default polarity_false) + unsigned int maxmemory; + unsigned int maxnbsolvers; + int verb; + int verbEveryConflicts; + int numvar; // Number of variables + int numclauses; // Number of clauses + + enum + { + polarity_true = 0, + polarity_false = 1, + polarity_user = 2, + polarity_rnd = 3 + }; + + // ClauseAllocator ca; + SharedCompanion* sharedcomp; + + void informEnd(lbool res); + ParallelSolver* retrieveSolver(int i); + + pthread_mutex_t m; // mutex for any high level sync between all threads (like reportf) + pthread_mutex_t + mfinished; // mutex on which main process may wait for... As soon as one process finishes it release the mutex + pthread_cond_t cfinished; // condition variable that says that a thread has finished + + vec solvers; // set of plain solvers + vec solvercompanions; // set of companion solvers + vec threads; // all threads of this process + vec threadIndexOfSolver; // threadIndexOfSolver[solvers[i]] is the index in threads[] of the solver i + vec threadIndexOfSolverCompanion; // threadIndexOfSolverCompanion[solvercompanions[i]] is the index in + // threads[] of the solvercompanion i +}; + +inline bool MultiSolvers::addClause(const vec& ps) +{ + ps.copyTo(add_tmp); + return addClause_(add_tmp); +} + +inline void MultiSolvers::setVerbosity(int i) +{ + verb = i; +} +inline void MultiSolvers::setVerbEveryConflicts(int i) +{ + verbEveryConflicts = i; +} +inline int MultiSolvers::nVars() const +{ + return numvar; +} +inline int MultiSolvers::nClauses() const +{ + return numclauses; +} +inline int MultiSolvers::verbosity() +{ + return verb; +} +inline ParallelSolver* MultiSolvers::getPrimarySolver() +{ + return solvers[0]; +} + +} // namespace Glucose +#endif diff --git a/libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc b/vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc similarity index 55% rename from libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc rename to vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc index 260dcec99b..769421a75a 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc +++ b/vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.cc @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -48,6 +48,7 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA **************************************************************************************************/ #include "parallel/ParallelSolver.h" + #include "mtl/Sort.h" using namespace Glucose; @@ -56,167 +57,205 @@ using namespace Glucose; // == Options const char* _cunstable = "CORE/PARALLEL -- UNSTABLE FEATURES"; -const char* _parallel = "PARALLEL"; +const char* _parallel = "PARALLEL"; -extern BoolOption opt_dontExportDirectReusedClauses; // (_cunstable, "reusedClauses", "Don't export directly reused clauses", false); -extern BoolOption opt_plingeling; // (_cunstable, "plingeling", "plingeling strategy for sharing clauses (exploratory feature)", false); +extern BoolOption opt_dontExportDirectReusedClauses; // (_cunstable, "reusedClauses", "Don't export directly reused + // clauses", false); +extern BoolOption opt_plingeling; // (_cunstable, "plingeling", "plingeling strategy for sharing clauses + // (exploratory feature)", false); //===================================================================== //===================================================================== - ParallelSolver::ParallelSolver(int threadId) : - SimpSolver() -, thn(threadId) // The thread number of this solver -, goodlimitlbd(7) -, goodlimitsize(25) -, purgatory(true) -, shareAfterProbation(!opt_plingeling) // only share clauses after probation -, plingeling(opt_plingeling) -, nbTimesSeenBeforeExport(2) -, firstSharing(5000) // Strong limit : do not share anything (except unary clauses) before this number of conflicts -, limitSharingByGoodLBD(true) // Moving limit of what a good LBD is (median value of last learnt clauses set) -, limitSharingByFixedLimitLBD(0) // No fixed bound (like 8 in plingeling) -, limitSharingByFixedLimitSize(0) // No fixed boud (like 40 in plingeling) -, dontExportDirectReusedClauses(opt_dontExportDirectReusedClauses) -, nbNotExportedBecauseDirectlyReused(0) + SimpSolver(), + thn(threadId) // The thread number of this solver + , + goodlimitlbd(7), + goodlimitsize(25), + purgatory(true), + shareAfterProbation(!opt_plingeling) // only share clauses after probation + , + plingeling(opt_plingeling), + nbTimesSeenBeforeExport(2), + firstSharing( + 5000) // Strong limit : do not share anything (except unary clauses) before this number of conflicts + , + limitSharingByGoodLBD(true) // Moving limit of what a good LBD is (median value of last learnt clauses set) + , + limitSharingByFixedLimitLBD(0) // No fixed bound (like 8 in plingeling) + , + limitSharingByFixedLimitSize(0) // No fixed boud (like 40 in plingeling) + , + dontExportDirectReusedClauses(opt_dontExportDirectReusedClauses), + nbNotExportedBecauseDirectlyReused(0) { - useUnaryWatched = true; // We want to use promoted clauses here ! - stats.growTo(parallelStatsSize,0); + useUnaryWatched = true; // We want to use promoted clauses here ! + stats.growTo(parallelStatsSize, 0); } - - - -ParallelSolver::~ParallelSolver() { +ParallelSolver::~ParallelSolver() +{ printf("c Solver of thread %d ended.\n", thn); fflush(stdout); } -ParallelSolver::ParallelSolver(const ParallelSolver &s) : - SimpSolver(s) - , sharedcomp(s.sharedcomp) -, goodlimitlbd(s.goodlimitlbd) -, goodlimitsize(s.goodlimitsize) -, purgatory(s.purgatory) -, shareAfterProbation(s.shareAfterProbation) // only share clauses after probation -, plingeling(s.plingeling) -,nbTimesSeenBeforeExport(2) -, firstSharing(s.firstSharing) // Strong limit : do not share anything (except unary clauses) before this number of conflicts -, limitSharingByGoodLBD(s.limitSharingByGoodLBD) // Moving limit of what a good LBD is (median value of last learnt clauses set) -, limitSharingByFixedLimitLBD(s.limitSharingByFixedLimitLBD) // No fixed bound (like 8 in plingeling) -, limitSharingByFixedLimitSize(s.limitSharingByFixedLimitSize) // No fixed boud (like 40 in plingeling) -, dontExportDirectReusedClauses(s.dontExportDirectReusedClauses) -, nbNotExportedBecauseDirectlyReused(s.nbNotExportedBecauseDirectlyReused) +ParallelSolver::ParallelSolver(const ParallelSolver& s) : + SimpSolver(s), + sharedcomp(s.sharedcomp), + goodlimitlbd(s.goodlimitlbd), + goodlimitsize(s.goodlimitsize), + purgatory(s.purgatory), + shareAfterProbation(s.shareAfterProbation) // only share clauses after probation + , + plingeling(s.plingeling), + nbTimesSeenBeforeExport(2), + firstSharing(s.firstSharing) // Strong limit : do not share anything (except unary clauses) before this number + // of conflicts + , + limitSharingByGoodLBD( + s.limitSharingByGoodLBD) // Moving limit of what a good LBD is (median value of last learnt clauses set) + , + limitSharingByFixedLimitLBD(s.limitSharingByFixedLimitLBD) // No fixed bound (like 8 in plingeling) + , + limitSharingByFixedLimitSize(s.limitSharingByFixedLimitSize) // No fixed boud (like 40 in plingeling) + , + dontExportDirectReusedClauses(s.dontExportDirectReusedClauses), + nbNotExportedBecauseDirectlyReused(s.nbNotExportedBecauseDirectlyReused) { - s.goodImportsFromThreads.memCopyTo(goodImportsFromThreads); + s.goodImportsFromThreads.memCopyTo(goodImportsFromThreads); useUnaryWatched = s.useUnaryWatched; s.stats.copyTo(stats); - s.elimclauses.copyTo(elimclauses); // This should be done more efficiently some day + s.elimclauses.copyTo(elimclauses); // This should be done more efficiently some day } - // Strategy to reduce unary watches list -struct reduceDB_oneWatched_lt { +struct reduceDB_oneWatched_lt +{ ClauseAllocator& ca; - reduceDB_oneWatched_lt(ClauseAllocator& ca_) : ca(ca_) { - } + reduceDB_oneWatched_lt(ClauseAllocator& ca_) : ca(ca_) {} - bool operator()(CRef x, CRef y) { + bool operator()(CRef x, CRef y) + { // Main criteria... Like in MiniSat we keep all binary clauses - if (ca[x].size() > 2 && ca[y].size() == 2) return 1; + if (ca[x].size() > 2 && ca[y].size() == 2) + return 1; - if (ca[y].size() > 2 && ca[x].size() == 2) return 0; - if (ca[x].size() == 2 && ca[y].size() == 2) return 0; + if (ca[y].size() > 2 && ca[x].size() == 2) + return 0; + if (ca[x].size() == 2 && ca[y].size() == 2) + return 0; // Second one based on literal block distance - if (ca[x].size() > ca[y].size()) return 1; - if (ca[x].size() < ca[y].size()) return 0; + if (ca[x].size() > ca[y].size()) + return 1; + if (ca[x].size() < ca[y].size()) + return 0; - if (ca[x].lbd() > ca[y].lbd()) return 1; - if (ca[x].lbd() < ca[y].lbd()) return 0; + if (ca[x].lbd() > ca[y].lbd()) + return 1; + if (ca[x].lbd() < ca[y].lbd()) + return 0; // Finally we can use old activity or size, we choose the last one return ca[x].activity() < ca[y].activity(); - //return x->size() < y->size(); + // return x->size() < y->size(); - //return ca[x].size() > 2 && (ca[y].size() == 2 || ca[x].activity() < ca[y].activity()); } + // return ca[x].size() > 2 && (ca[y].size() == 2 || ca[x].activity() < ca[y].activity()); } } }; // @overide -void ParallelSolver::reduceDB() { +void ParallelSolver::reduceDB() +{ int i, j; stats[nbReduceDB]++; - + int limit; - if (chanseokStrategy) - sort(learnts, reduceDBAct_lt(ca)); - else - sort(learnts, reduceDB_lt(ca)); + if (chanseokStrategy) + sort(learnts, reduceDBAct_lt(ca)); + else + sort(learnts, reduceDB_lt(ca)); - if (!chanseokStrategy && !panicModeIsEnabled()) { + if (!chanseokStrategy && !panicModeIsEnabled()) + { // We have a lot of "good" clauses, it is difficult to compare them. Keep more ! - if (ca[learnts[learnts.size() / RATIOREMOVECLAUSES]].lbd() <= 3) nbclausesbeforereduce += specialIncReduceDB; + if (ca[learnts[learnts.size() / RATIOREMOVECLAUSES]].lbd() <= 3) + nbclausesbeforereduce += specialIncReduceDB; // Useless :-) - if (ca[learnts.last()].lbd() <= 5) nbclausesbeforereduce += specialIncReduceDB; - } - // Don't delete binary or locked clauses. From the rest, delete clauses from the first half - // Keep clauses which seem to be usefull (their lbd was reduce during this sequence) + if (ca[learnts.last()].lbd() <= 5) + nbclausesbeforereduce += specialIncReduceDB; + } + // Don't delete binary or locked clauses. From the rest, delete clauses from the first half + // Keep clauses which seem to be usefull (their lbd was reduce during this sequence) - if (!panicModeIsEnabled()) { + if (!panicModeIsEnabled()) + { limit = learnts.size() / 2; - } else { + } + else + { limit = panicModeLastRemoved; } panicModeLastRemoved = 0; uint64_t sumsize = 0; - for (i = j = 0; i < learnts.size(); i++) { + for (i = j = 0; i < learnts.size(); i++) + { Clause& c = ca[learnts[i]]; if (i == learnts.size() / 2) goodlimitlbd = c.lbd(); sumsize += c.size(); - if (c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) { + if (c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) + { removeClause(learnts[i]); stats[nbRemovedClauses]++; panicModeLastRemoved++; - } else { - if (!c.canBeDel()) limit++; //we keep c, so we can delete an other clause - c.setCanBeDel(true); // At the next step, c can be delete + } + else + { + if (!c.canBeDel()) + limit++; // we keep c, so we can delete an other clause + c.setCanBeDel(true); // At the next step, c can be delete learnts[j++] = learnts[i]; } } learnts.shrink(i - j); if (learnts.size() > 0) - goodlimitsize = 1 + (double) sumsize / (double) learnts.size(); + goodlimitsize = 1 + (double)sumsize / (double)learnts.size(); // Special treatment for imported clauses if (!panicModeIsEnabled()) - limit = unaryWatchedClauses.size() - (learnts.size() * (chanseokStrategy?4:2)); + limit = unaryWatchedClauses.size() - (learnts.size() * (chanseokStrategy ? 4 : 2)); else limit = panicModeLastRemovedShared; panicModeLastRemovedShared = 0; - if ((unaryWatchedClauses.size() > 100) && (limit > 0)) { + if ((unaryWatchedClauses.size() > 100) && (limit > 0)) + { sort(unaryWatchedClauses, reduceDB_oneWatched_lt(ca)); - for (i = j = 0; i < unaryWatchedClauses.size(); i++) { + for (i = j = 0; i < unaryWatchedClauses.size(); i++) + { Clause& c = ca[unaryWatchedClauses[i]]; - if (c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) { - removeClause(unaryWatchedClauses[i], c.getOneWatched()); // remove from the purgatory (or not) + if (c.lbd() > 2 && c.size() > 2 && c.canBeDel() && !locked(c) && (i < limit)) + { + removeClause(unaryWatchedClauses[i], c.getOneWatched()); // remove from the purgatory (or not) stats[nbRemovedUnaryWatchedClauses]++; panicModeLastRemovedShared++; - } else { - if (!c.canBeDel()) limit++; //we keep c, so we can delete an other clause - c.setCanBeDel(true); // At the next step, c can be delete + } + else + { + if (!c.canBeDel()) + limit++; // we keep c, so we can delete an other clause + c.setCanBeDel(true); // At the next step, c can be delete unaryWatchedClauses[j++] = unaryWatchedClauses[i]; } } @@ -226,68 +265,74 @@ void ParallelSolver::reduceDB() { checkGarbage(); } - /*_________________________________________________________________________________________________ | | parallelImportClauseDuringConflictAnalysis : (Clause &c,CRef confl) -> [void] -| +| | Description: | Verify if the clause using during conflict analysis is good for export | @see : analyze | Output: |________________________________________________________________________________________________@*/ - -void ParallelSolver::parallelImportClauseDuringConflictAnalysis(Clause &c,CRef confl) { - if (dontExportDirectReusedClauses && (confl == lastLearntClause) && (c.getExported() < nbTimesSeenBeforeExport)) { // Experimental stuff +void ParallelSolver::parallelImportClauseDuringConflictAnalysis(Clause& c, CRef confl) +{ + if (dontExportDirectReusedClauses && (confl == lastLearntClause) && (c.getExported() < nbTimesSeenBeforeExport)) + { // Experimental stuff c.setExported(nbTimesSeenBeforeExport); nbNotExportedBecauseDirectlyReused++; - } else if (shareAfterProbation && c.getExported() != nbTimesSeenBeforeExport && conflicts > firstSharing) { + } + else if (shareAfterProbation && c.getExported() != nbTimesSeenBeforeExport && conflicts > firstSharing) + { c.setExported(c.getExported() + 1); - if (!c.wasImported() && c.getExported() == nbTimesSeenBeforeExport) { // It's a new interesting clause: - if (c.lbd() == 2 || (c.size() < goodlimitsize && c.lbd() <= goodlimitlbd)) { + if (!c.wasImported() && c.getExported() == nbTimesSeenBeforeExport) + { // It's a new interesting clause: + if (c.lbd() == 2 || (c.size() < goodlimitsize && c.lbd() <= goodlimitlbd)) + { shareClause(c); } } } - } - - // These Two functions are useless here !! -void ParallelSolver::reportProgress() { - printf("c | %2d | %6d | %10d | %10d | %8d | %8d | %8d | %8d | %8d | %6.3f |\n",(int)thn,(int)starts,(int)decisions,(int)conflicts,(int)stats[originalClausesSeen],(int)learnts.size(),(int)stats[nbexported],(int)stats[nbimported],(int)stats[nbPromoted],progressEstimate()*100); +void ParallelSolver::reportProgress() +{ + printf("c | %2d | %6d | %10d | %10d | %8d | %8d | %8d | %8d | %8d | %6.3f |\n", (int)thn, (int)starts, + (int)decisions, (int)conflicts, (int)stats[originalClausesSeen], (int)learnts.size(), (int)stats[nbexported], + (int)stats[nbimported], (int)stats[nbPromoted], progressEstimate() * 100); - //printf("c thread=%d confl=%lld starts=%llu reduceDB=%llu learnts=%d broadcast=%llu blockedReuse=%lld imported=%llu promoted=%llu limitlbd=%llu limitsize=%llu\n", thn, conflicts, starts, nbReduceDB, learnts.size(), nbexported, nbNotExportedBecauseDirectlyReused, nbimported, nbPromoted, goodlimitlbd, goodlimitsize); + // printf("c thread=%d confl=%lld starts=%llu reduceDB=%llu learnts=%d broadcast=%llu blockedReuse=%lld + // imported=%llu promoted=%llu limitlbd=%llu limitsize=%llu\n", thn, conflicts, starts, nbReduceDB, learnts.size(), + // nbexported, nbNotExportedBecauseDirectlyReused, nbimported, nbPromoted, goodlimitlbd, goodlimitsize); } -void ParallelSolver::reportProgressArrayImports(vec &totalColumns) { - return ; // TODO : does not currently work +void ParallelSolver::reportProgressArrayImports(vec& totalColumns) +{ + return; // TODO : does not currently work unsigned int totalImports = 0; printf("c %3d | ", thn); - for (int i = 0; i < sharedcomp->nbThreads; i++) { + for (int i = 0; i < sharedcomp->nbThreads; i++) + { totalImports += goodImportsFromThreads[i]; totalColumns[i] += goodImportsFromThreads[i]; printf(" %8d", goodImportsFromThreads[i]); } printf(" | %8d\n", totalImports); - } - - /*_________________________________________________________________________________________________ | | shareClause : (Clause &c) -> [bool] -| +| | Description: -| share a clause to other cores +| share a clause to other cores | @see : analyze | Output: true if the clause is indeed sent |________________________________________________________________________________________________@*/ -bool ParallelSolver::shareClause(Clause & c) { +bool ParallelSolver::shareClause(Clause& c) +{ bool sent = sharedcomp->addLearnt(this, c); if (sent) stats[nbexported]++; @@ -297,27 +342,31 @@ bool ParallelSolver::shareClause(Clause & c) { /*_________________________________________________________________________________________________ | | panicModeIsEnabled : () -> [bool] -| +| | Description: | is panic mode (save memory) is enabled ? |________________________________________________________________________________________________@*/ -bool ParallelSolver::panicModeIsEnabled() { +bool ParallelSolver::panicModeIsEnabled() +{ return sharedcomp->panicMode; } /*_________________________________________________________________________________________________ | | parallelImportUnaryClauses : () -> [void] -| +| | Description: | import all unary clauses from other cores |________________________________________________________________________________________________@*/ -void ParallelSolver::parallelImportUnaryClauses() { +void ParallelSolver::parallelImportUnaryClauses() +{ Lit l; - while ((l = sharedcomp->getUnary(this)) != lit_Undef) { - if (value(var(l)) == l_Undef) { + while ((l = sharedcomp->getUnary(this)) != lit_Undef) + { + if (value(var(l)) == l_Undef) + { uncheckedEnqueue(l); stats[nbimportedunit]++; } @@ -327,17 +376,19 @@ void ParallelSolver::parallelImportUnaryClauses() { /*_________________________________________________________________________________________________ | | parallelImportClauses : () -> [bool] -| +| | Description: | import all clauses from other cores | Output : if there is a final conflict |________________________________________________________________________________________________@*/ -bool ParallelSolver::parallelImportClauses() { +bool ParallelSolver::parallelImportClauses() +{ assert(decisionLevel() == 0); int importedFromThread; - while (sharedcomp->getNewClause(this, importedFromThread, importedClause)) { + while (sharedcomp->getNewClause(this, importedFromThread, importedClause)) + { assert(importedFromThread <= sharedcomp->nbThreads); assert(importedFromThread >= 0); @@ -346,29 +397,40 @@ bool ParallelSolver::parallelImportClauses() { if (importedClause.size() == 0) return true; - //printf("Thread %d imports clause from thread %d\n", threadNumber(), importedFromThread); + // printf("Thread %d imports clause from thread %d\n", threadNumber(), importedFromThread); CRef cr = ca.alloc(importedClause, true, true); ca[cr].setLBD(importedClause.size()); - if (plingeling) // 0 means a broadcasted clause (good clause), 1 means a survivor clause, broadcasted - ca[cr].setExported(2); // A broadcasted clause (or a survivor clause) do not share it anymore - else { - ca[cr].setExported(1); // next time we see it in analyze, we share it (follow route / broadcast depending on the global strategy, part of an ongoing experimental stuff: a clause in one Watched will be set to exported 2 when promotted. + if (plingeling) // 0 means a broadcasted clause (good clause), 1 means a survivor clause, broadcasted + ca[cr].setExported(2); // A broadcasted clause (or a survivor clause) do not share it anymore + else + { + ca[cr].setExported(1); // next time we see it in analyze, we share it (follow route / broadcast depending + // on the global strategy, part of an ongoing experimental stuff: a clause in one + // Watched will be set to exported 2 when promotted. } ca[cr].setImportedFrom(importedFromThread); - if(useUnaryWatched) + if (useUnaryWatched) unaryWatchedClauses.push(cr); - else + else learnts.push(cr); - - if (plingeling || ca[cr].size() <= 2) {//|| importedRoute == 0) { // importedRoute == 0 means a glue clause in another thread (or any very good clause) - ca[cr].setOneWatched(false); // Warning: those clauses will never be promoted by a conflict clause (or rarely: they are propagated!) + + if (plingeling || ca[cr].size() <= 2) + { //|| importedRoute == 0) { // importedRoute == 0 means a glue clause in another thread (or any very good + //clause) + ca[cr].setOneWatched(false); // Warning: those clauses will never be promoted by a conflict clause (or + // rarely: they are propagated!) attachClause(cr); stats[nbImportedGoodClauses]++; - } else { - if(useUnaryWatched) { - attachClausePurgatory(cr); // + } + else + { + if (useUnaryWatched) + { + attachClausePurgatory(cr); // ca[cr].setOneWatched(true); - } else { + } + else + { attachClause(cr); ca[cr].setOneWatched(false); } @@ -380,132 +442,143 @@ bool ParallelSolver::parallelImportClauses() { return false; } - /*_________________________________________________________________________________________________ | | parallelExportUnaryClause : (Lit p) -> [void] -| +| | Description: | export unary clauses to other cores |________________________________________________________________________________________________@*/ -void ParallelSolver::parallelExportUnaryClause(Lit p) { +void ParallelSolver::parallelExportUnaryClause(Lit p) +{ // Multithread - sharedcomp->addLearnt(this,p ); // TODO: there can be a contradiction here (two theads proving a and -a) + sharedcomp->addLearnt(this, p); // TODO: there can be a contradiction here (two theads proving a and -a) stats[nbexportedunit]++; } - /*_________________________________________________________________________________________________ | | parallelExportClauseDuringSearch : (Clause &c) -> [void] -| +| | Description: | Verify if a new learnt clause is useful for export | @see search -| +| |________________________________________________________________________________________________@*/ -void ParallelSolver::parallelExportClauseDuringSearch(Clause &c) { +void ParallelSolver::parallelExportClauseDuringSearch(Clause& c) +{ // // Multithread // Now I'm sharing the clause if seen in at least two conflicts analysis shareClause(ca[cr]); - if ((plingeling && !shareAfterProbation && c.lbd() < 8 && c.size() < 40) || - (c.lbd() <= 2)) { // For this class of clauses, I'm sharing them asap (they are Glue CLauses, no probation for them) + if ((plingeling && !shareAfterProbation && c.lbd() < 8 && c.size() < 40) || (c.lbd() <= 2)) + { // For this class of clauses, I'm sharing them asap (they are Glue CLauses, no probation for them) shareClause(c); c.setExported(2); } - } - /*_________________________________________________________________________________________________ | | parallelJobIsFinished : () -> [bool] -| +| | Description: | Is a core already finish the search -| +| |________________________________________________________________________________________________@*/ -bool ParallelSolver::parallelJobIsFinished() { +bool ParallelSolver::parallelJobIsFinished() +{ // Parallel: another job has finished let's quit return (sharedcomp->jobFinished()); } // @overide -lbool ParallelSolver::solve_(bool do_simp, bool turn_off_simp) { - vec extra_frozen; +lbool ParallelSolver::solve_(bool do_simp, bool turn_off_simp) +{ + vec extra_frozen; lbool result = l_True; do_simp &= use_simplification; - if (do_simp){ + if (do_simp) + { // Assumptions must be temporarily frozen to run variable elimination: - for (int i = 0; i < assumptions.size(); i++){ + for (int i = 0; i < assumptions.size(); i++) + { Var v = var(assumptions[i]); // If an assumption has been eliminated, remember it. assert(!isEliminated(v)); - if (!frozen[v]){ + if (!frozen[v]) + { // Freeze and store. setFrozen(v, true); extra_frozen.push(v); - } } + } + } result = lbool(eliminate(turn_off_simp)); } model.clear(); conflict.clear(); - if (!ok) return l_False; + if (!ok) + return l_False; solves++; - lbool status = l_Undef; // Search: int curr_restarts = 0; - while (status == l_Undef && !sharedcomp->jobFinished()) { - status = search(luby_restart?luby(restart_inc, curr_restarts)*luby_restart_factor:0); // the parameter is useless in glucose, kept to allow modifications - if (!withinBudget()) break; + while (status == l_Undef && !sharedcomp->jobFinished()) + { + status = search(luby_restart ? luby(restart_inc, curr_restarts) * luby_restart_factor : + 0); // the parameter is useless in glucose, kept to allow modifications + if (!withinBudget()) + break; curr_restarts++; } if (verbosity >= 1) - printf("c =========================================================================================================\n"); + printf("c " + "=======================================================================================================" + "==\n"); + + /* + if (do_simp) + // Unfreeze the assumptions that were frozen: + for (int i = 0; i < extra_frozen.size(); i++) + setFrozen(extra_frozen[i], false); + */ -/* - if (do_simp) - // Unfreeze the assumptions that were frozen: - for (int i = 0; i < extra_frozen.size(); i++) - setFrozen(extra_frozen[i], false); -*/ - bool firstToFinish = false; if (status != l_Undef) firstToFinish = sharedcomp->IFinished(this); - if (firstToFinish) { - printf("c Thread %d is 100%% pure glucose! First thread to finish! (%s answer).\n", threadNumber(), status == l_True ? "SAT" : status == l_False ? "UNSAT" : "UNKOWN"); + if (firstToFinish) + { + printf("c Thread %d is 100%% pure glucose! First thread to finish! (%s answer).\n", threadNumber(), + status == l_True ? "SAT" : + status == l_False ? "UNSAT" : + "UNKOWN"); sharedcomp->jobStatus = status; } - - if (firstToFinish && status == l_True) { - extendModel(); + if (firstToFinish && status == l_True) + { + extendModel(); // Extend & copy model: model.growTo(nVars()); for (int i = 0; i < nVars(); i++) model[i] = value(i); - } else if (status == l_False && conflict.size() == 0) + } + else if (status == l_False && conflict.size() == 0) ok = false; - pthread_cond_signal(pcfinished); - //cancelUntil(0); - + // cancelUntil(0); return status; - } diff --git a/libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h b/vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h similarity index 55% rename from libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h rename to vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h index 38e826d915..d126aa3c3c 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h +++ b/vendors/mugen/glucose-syrup-4.1/parallel/ParallelSolver.h @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -48,105 +48,110 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA **************************************************************************************************/ #ifndef PARALLELSOLVER_H -#define PARALLELSOLVER_H +#define PARALLELSOLVER_H -#include "core/SolverTypes.h" #include "core/Solver.h" -#include "simp/SimpSolver.h" +#include "core/SolverTypes.h" #include "parallel/SharedCompanion.h" -namespace Glucose { - - enum ParallelStats{ - nbexported=coreStatsSize, - nbimported, - nbexportedunit, - nbimportedunit, - nbimportedInPurgatory, - nbImportedGoodClauses - } ; +#include "simp/SimpSolver.h" +namespace Glucose +{ + +enum ParallelStats +{ + nbexported = coreStatsSize, + nbimported, + nbexportedunit, + nbimportedunit, + nbimportedInPurgatory, + nbImportedGoodClauses +}; #define parallelStatsSize (coreStatsSize + 6) - + //================================================================================================= - //class MultiSolvers; - //class SolverCompanion; - // class MultiSolvers; - -class ParallelSolver : public SimpSolver { +class MultiSolvers; +class SolverCompanion; +class SharedCompanion; + +class ParallelSolver : public SimpSolver +{ friend class MultiSolvers; friend class SolverCompanion; friend class SharedCompanion; -// friend class ReasoningCompanion; -// friend class SolverConfiguration; - -protected : - // Multithread : - int thn; // internal thread number - //MultiSolvers* belongsto; // Not working (due to incomplete types) - SharedCompanion *sharedcomp; - bool coreFUIP; // true if one core is specialized for branching on all FUIP - bool ImTheSolverFUIP; - pthread_mutex_t *pmfinished; // mutex on which main process may wait for... As soon as one process finishes it release the mutex - pthread_cond_t *pcfinished; // condition variable that says that a thread as finished - -public: + // friend class ReasoningCompanion; + // friend class SolverConfiguration; + + protected: + // Multithread : + int thn; // internal thread number + // MultiSolvers* belongsto; // Not working (due to incomplete types) + SharedCompanion* sharedcomp; + bool coreFUIP; // true if one core is specialized for branching on all FUIP + bool ImTheSolverFUIP; + pthread_mutex_t* + pmfinished; // mutex on which main process may wait for... As soon as one process finishes it release the mutex + pthread_cond_t* pcfinished; // condition variable that says that a thread as finished + + public: // Constructor/Destructor: // ParallelSolver(int threadId); - ParallelSolver(const ParallelSolver &s); + ParallelSolver(const ParallelSolver& s); ~ParallelSolver(); - + /** * Clone function */ - virtual Clone* clone() const { - return new ParallelSolver(*this); - } - - int threadNumber () const; - void setThreadNumber (int i); - void reportProgress(); - void reportProgressArrayImports(vec &totalColumns); - virtual void reduceDB(); - virtual lbool solve_ (bool do_simp = true, bool turn_off_simp = false); - - vec importedClause; // Temporary clause used to copy each imported clause - unsigned int goodlimitlbd; // LBD score of the "good" clauses, locally - int goodlimitsize; - bool purgatory; // mode of operation - bool shareAfterProbation; // Share any none glue clause only after probation (seen 2 times in conflict analysis) - bool plingeling; // plingeling strategy for sharing clauses (experimental) - int nbTimesSeenBeforeExport; + virtual Clone* clone() const + { + return new ParallelSolver(*this); + } + + int threadNumber() const; + void setThreadNumber(int i); + void reportProgress(); + void reportProgressArrayImports(vec& totalColumns); + virtual void reduceDB(); + virtual lbool solve_(bool do_simp = true, bool turn_off_simp = false); + + vec importedClause; // Temporary clause used to copy each imported clause + unsigned int goodlimitlbd; // LBD score of the "good" clauses, locally + int goodlimitsize; + bool purgatory; // mode of operation + bool shareAfterProbation; // Share any none glue clause only after probation (seen 2 times in conflict analysis) + bool plingeling; // plingeling strategy for sharing clauses (experimental) + int nbTimesSeenBeforeExport; // Stats front end -// uint64_t getNbExported() { return nbexported;} - // uint64_t getNbImported() { return nbimported;} - // uint64_t getNbExportedUnit() {return nbexportedunit;} - - uint32_t firstSharing, limitSharingByGoodLBD, limitSharingByFixedLimitLBD, limitSharingByFixedLimitSize; - uint32_t probationByFollowingRoads, probationByFriend; - uint32_t survivorLayers; // Number of layers for a common clause to survive - bool dontExportDirectReusedClauses ; // When true, directly reused clauses are not exported + // uint64_t getNbExported() { return nbexported;} + // uint64_t getNbImported() { return nbimported;} + // uint64_t getNbExportedUnit() {return nbexportedunit;} + + uint32_t firstSharing, limitSharingByGoodLBD, limitSharingByFixedLimitLBD, limitSharingByFixedLimitSize; + uint32_t probationByFollowingRoads, probationByFriend; + uint32_t survivorLayers; // Number of layers for a common clause to survive + bool dontExportDirectReusedClauses; // When true, directly reused clauses are not exported uint64_t nbNotExportedBecauseDirectlyReused; - - - vec goodImportsFromThreads; // Stats of good importations from other threads - virtual void parallelImportClauseDuringConflictAnalysis(Clause &c,CRef confl); - virtual bool parallelImportClauses(); // true if the empty clause was received + vec goodImportsFromThreads; // Stats of good importations from other threads + + virtual void parallelImportClauseDuringConflictAnalysis(Clause& c, CRef confl); + virtual bool parallelImportClauses(); // true if the empty clause was received virtual void parallelImportUnaryClauses(); virtual void parallelExportUnaryClause(Lit p); - virtual void parallelExportClauseDuringSearch(Clause &c); + virtual void parallelExportClauseDuringSearch(Clause& c); virtual bool parallelJobIsFinished(); virtual bool panicModeIsEnabled(); - - bool shareClause(Clause & c); // true if the clause was succesfully sent - - + bool shareClause(Clause& c); // true if the clause was succesfully sent }; - - inline int ParallelSolver::threadNumber () const {return thn;} - inline void ParallelSolver::setThreadNumber (int i) {thn = i;} +inline int ParallelSolver::threadNumber() const +{ + return thn; } -#endif /* PARALLELSOLVER_H */ - +inline void ParallelSolver::setThreadNumber(int i) +{ + thn = i; +} +} // namespace Glucose +#endif /* PARALLELSOLVER_H */ diff --git a/libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc b/vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc similarity index 57% rename from libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc rename to vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc index dadc6724b6..ba5aeb1ed7 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc +++ b/vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.cc @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -47,103 +47,114 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ +#include "parallel/SharedCompanion.h" + #include "core/Solver.h" -#include "parallel/ParallelSolver.h" #include "core/SolverTypes.h" #include "parallel/ClausesBuffer.h" -#include "parallel/SharedCompanion.h" - +#include "parallel/ParallelSolver.h" using namespace Glucose; SharedCompanion::SharedCompanion(int _nbThreads) : - nbThreads(_nbThreads), - bjobFinished(false), - jobFinishedBy(NULL), - panicMode(false), // The bug in the SAT2014 competition :) - jobStatus(l_Undef), - random_seed(9164825) { - - pthread_mutex_init(&mutexSharedClauseCompanion,NULL); // This is the shared companion lock - pthread_mutex_init(&mutexSharedUnitCompanion,NULL); // This is the shared companion lock - pthread_mutex_init(&mutexSharedCompanion,NULL); // This is the shared companion lock - pthread_mutex_init(&mutexJobFinished,NULL); // This is the shared companion lock - if (_nbThreads> 0) { - setNbThreads(_nbThreads); - fprintf(stdout,"c Shared companion initialized: handling of clauses of %d threads.\nc %d ints for the sharing clause buffer (not expandable) .\n", _nbThreads, clausesBuffer.maxSize()); - } - + nbThreads(_nbThreads), + bjobFinished(false), + jobFinishedBy(NULL), + panicMode(false), // The bug in the SAT2014 competition :) + jobStatus(l_Undef), + random_seed(9164825) +{ + + pthread_mutex_init(&mutexSharedClauseCompanion, NULL); // This is the shared companion lock + pthread_mutex_init(&mutexSharedUnitCompanion, NULL); // This is the shared companion lock + pthread_mutex_init(&mutexSharedCompanion, NULL); // This is the shared companion lock + pthread_mutex_init(&mutexJobFinished, NULL); // This is the shared companion lock + if (_nbThreads > 0) + { + setNbThreads(_nbThreads); + fprintf(stdout, + "c Shared companion initialized: handling of clauses of %d threads.\nc %d ints for the sharing clause " + "buffer (not expandable) .\n", + _nbThreads, clausesBuffer.maxSize()); + } } -void SharedCompanion::setNbThreads(int _nbThreads) { - nbThreads = _nbThreads; - clausesBuffer.setNbThreads(_nbThreads); +void SharedCompanion::setNbThreads(int _nbThreads) +{ + nbThreads = _nbThreads; + clausesBuffer.setNbThreads(_nbThreads); } -void SharedCompanion::printStats() { -} +void SharedCompanion::printStats() {} // No multithread safe -bool SharedCompanion::addSolver(ParallelSolver* s) { - watchedSolvers.push(s); - pthread_mutex_t* mu = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t)); - pthread_mutex_init(mu,NULL); - assert(s->thn == watchedSolvers.size()-1); // all solvers must have been registered in the good order - nextUnit.push(0); - - return true; +bool SharedCompanion::addSolver(ParallelSolver* s) +{ + watchedSolvers.push(s); + pthread_mutex_t* mu = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t)); + pthread_mutex_init(mu, NULL); + assert(s->thn == watchedSolvers.size() - 1); // all solvers must have been registered in the good order + nextUnit.push(0); + + return true; } -void SharedCompanion::newVar(bool sign) { - isUnary .push(l_Undef); +void SharedCompanion::newVar(bool sign) +{ + isUnary.push(l_Undef); } -void SharedCompanion::addLearnt(ParallelSolver *s,Lit unary) { - pthread_mutex_lock(&mutexSharedUnitCompanion); - if (isUnary[var(unary)]==l_Undef) { - unitLit.push(unary); - isUnary[var(unary)] = sign(unary)?l_False:l_True; - } - pthread_mutex_unlock(&mutexSharedUnitCompanion); +void SharedCompanion::addLearnt(ParallelSolver* s, Lit unary) +{ + pthread_mutex_lock(&mutexSharedUnitCompanion); + if (isUnary[var(unary)] == l_Undef) + { + unitLit.push(unary); + isUnary[var(unary)] = sign(unary) ? l_False : l_True; + } + pthread_mutex_unlock(&mutexSharedUnitCompanion); } -Lit SharedCompanion::getUnary(ParallelSolver *s) { - int sn = s->thn; - Lit ret = lit_Undef; +Lit SharedCompanion::getUnary(ParallelSolver* s) +{ + int sn = s->thn; + Lit ret = lit_Undef; - pthread_mutex_lock(&mutexSharedUnitCompanion); - if (nextUnit[sn] < unitLit.size()) - ret = unitLit[nextUnit[sn]++]; - pthread_mutex_unlock(&mutexSharedUnitCompanion); - return ret; + pthread_mutex_lock(&mutexSharedUnitCompanion); + if (nextUnit[sn] < unitLit.size()) + ret = unitLit[nextUnit[sn]++]; + pthread_mutex_unlock(&mutexSharedUnitCompanion); + return ret; } // Specialized functions for this companion // must be multithread safe // Add a clause to the threads-wide clause database (all clauses, through) -bool SharedCompanion::addLearnt(ParallelSolver *s, Clause & c) { - int sn = s->thn; // thread number of the solver - bool ret = false; - assert(watchedSolvers.size()>sn); - - pthread_mutex_lock(&mutexSharedClauseCompanion); - ret = clausesBuffer.pushClause(sn, c); - pthread_mutex_unlock(&mutexSharedClauseCompanion); - return ret; +bool SharedCompanion::addLearnt(ParallelSolver* s, Clause& c) +{ + int sn = s->thn; // thread number of the solver + bool ret = false; + assert(watchedSolvers.size() > sn); + + pthread_mutex_lock(&mutexSharedClauseCompanion); + ret = clausesBuffer.pushClause(sn, c); + pthread_mutex_unlock(&mutexSharedClauseCompanion); + return ret; } +bool SharedCompanion::getNewClause(ParallelSolver* s, int& threadOrigin, vec& newclause) +{ // gets a new interesting clause for solver s + int sn = s->thn; -bool SharedCompanion::getNewClause(ParallelSolver *s, int & threadOrigin, vec& newclause) { // gets a new interesting clause for solver s - int sn = s->thn; - // First, let's get the clauses on the big blackboard pthread_mutex_lock(&mutexSharedClauseCompanion); bool b = clausesBuffer.getClause(sn, threadOrigin, newclause); pthread_mutex_unlock(&mutexSharedClauseCompanion); - - return b; + + return b; } -bool SharedCompanion::jobFinished() { +bool SharedCompanion::jobFinished() +{ bool ret = false; pthread_mutex_lock(&mutexJobFinished); ret = bjobFinished; @@ -151,17 +162,16 @@ bool SharedCompanion::jobFinished() { return ret; } -bool SharedCompanion::IFinished(ParallelSolver *s) { +bool SharedCompanion::IFinished(ParallelSolver* s) +{ bool ret = false; pthread_mutex_lock(&mutexJobFinished); - if (!bjobFinished) { - ret = true; - bjobFinished = true; - jobFinishedBy = s; + if (!bjobFinished) + { + ret = true; + bjobFinished = true; + jobFinishedBy = s; } pthread_mutex_unlock(&mutexJobFinished); return ret; } - - - diff --git a/libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h b/vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h similarity index 54% rename from libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h rename to vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h index 9ab34614b0..6d702c71a0 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h +++ b/vendors/mugen/glucose-syrup-4.1/parallel/SharedCompanion.h @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -53,70 +53,77 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA * Only one sharedCompanion is created for all the solvers */ - #ifndef SharedCompanion_h #define SharedCompanion_h #include "core/SolverTypes.h" +#include "parallel/ClausesBuffer.h" #include "parallel/ParallelSolver.h" #include "parallel/SolverCompanion.h" -#include "parallel/ClausesBuffer.h" -namespace Glucose { +namespace Glucose +{ - -class SharedCompanion : public SolverCompanion { +class ParallelSolver; +class SharedCompanion : public SolverCompanion +{ friend class MultiSolvers; friend class ParallelSolver; -public: - SharedCompanion(int nbThreads=0); - void setNbThreads(int _nbThreads); // Sets the number of threads (cannot by changed once the solver is running) - void newVar(bool sign); // Adds a var (used to keep track of unary variables) - void printStats(); // Printing statistics of all solvers - - bool jobFinished(); // True if the job is over - bool IFinished(ParallelSolver *s); // returns true if you are the first solver to finish - bool addSolver(ParallelSolver*); // attach a solver to accompany - void addLearnt(ParallelSolver *s,Lit unary); // Add a unary clause to share - bool addLearnt(ParallelSolver *s, Clause & c); // Add a clause to the shared companion, as a database manager - - bool getNewClause(ParallelSolver *s, int &th, vec & nc); // gets a new interesting clause for solver s - Lit getUnary(ParallelSolver *s); // Gets a new unary literal - inline ParallelSolver* winner(){return jobFinishedBy;} // Gets the first solver that called IFinished() - - protected: - - ClausesBuffer clausesBuffer; // A big blackboard for all threads sharing non unary clauses - int nbThreads; // Number of threads - - // A set of mutex variables - pthread_mutex_t mutexSharedCompanion; // mutex for any high level sync between all threads (like reportf) - pthread_mutex_t mutexSharedClauseCompanion; // mutex for reading/writing clauses on the blackboard - pthread_mutex_t mutexSharedUnitCompanion; // mutex for reading/writing unit clauses on the blackboard - pthread_mutex_t mutexJobFinished; - - bool bjobFinished; - ParallelSolver *jobFinishedBy; - bool panicMode; // panicMode means no more increasing space needed - lbool jobStatus; // globale status of the job - - // Shared clauses are a queue of lits... - // friend class wholearnt; - vec nextUnit; // indice of next unit clause to retrieve for solver number i - vec unitLit; // Set of unit literals found so far - vec isUnary; // sign of the unary var (if proved, or l_Undef if not) - double random_seed; - - // Returns a random float 0 <= x < 1. Seed must never be 0. - static inline double drand(double& seed) { - seed *= 1389796; - int q = (int)(seed / 2147483647); - seed -= (double)q * 2147483647; - return seed / 2147483647; } - - // Returns a random integer 0 <= x < size. Seed must never be 0. - static inline int irand(double& seed, int size) { - return (int)(drand(seed) * size); } + public: + SharedCompanion(int nbThreads = 0); + void setNbThreads(int _nbThreads); // Sets the number of threads (cannot by changed once the solver is running) + void newVar(bool sign); // Adds a var (used to keep track of unary variables) + void printStats(); // Printing statistics of all solvers + + bool jobFinished(); // True if the job is over + bool IFinished(ParallelSolver* s); // returns true if you are the first solver to finish + bool addSolver(ParallelSolver*); // attach a solver to accompany + void addLearnt(ParallelSolver* s, Lit unary); // Add a unary clause to share + bool addLearnt(ParallelSolver* s, Clause& c); // Add a clause to the shared companion, as a database manager + + bool getNewClause(ParallelSolver* s, int& th, vec& nc); // gets a new interesting clause for solver s + Lit getUnary(ParallelSolver* s); // Gets a new unary literal + inline ParallelSolver* winner() + { + return jobFinishedBy; + } // Gets the first solver that called IFinished() + + protected: + ClausesBuffer clausesBuffer; // A big blackboard for all threads sharing non unary clauses + int nbThreads; // Number of threads + + // A set of mutex variables + pthread_mutex_t mutexSharedCompanion; // mutex for any high level sync between all threads (like reportf) + pthread_mutex_t mutexSharedClauseCompanion; // mutex for reading/writing clauses on the blackboard + pthread_mutex_t mutexSharedUnitCompanion; // mutex for reading/writing unit clauses on the blackboard + pthread_mutex_t mutexJobFinished; + + bool bjobFinished; + ParallelSolver* jobFinishedBy; + bool panicMode; // panicMode means no more increasing space needed + lbool jobStatus; // globale status of the job + + // Shared clauses are a queue of lits... + // friend class wholearnt; + vec nextUnit; // indice of next unit clause to retrieve for solver number i + vec unitLit; // Set of unit literals found so far + vec isUnary; // sign of the unary var (if proved, or l_Undef if not) + double random_seed; + + // Returns a random float 0 <= x < 1. Seed must never be 0. + static inline double drand(double& seed) + { + seed *= 1389796; + int q = (int)(seed / 2147483647); + seed -= (double)q * 2147483647; + return seed / 2147483647; + } + + // Returns a random integer 0 <= x < size. Seed must never be 0. + static inline int irand(double& seed, int size) + { + return (int)(drand(seed) * size); + } }; -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc b/vendors/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc similarity index 83% rename from libs/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc rename to vendors/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc index 8cd5c51997..2fb6c645cb 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc +++ b/vendors/mugen/glucose-syrup-4.1/parallel/SolverCompanion.cc @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -59,29 +59,29 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA using namespace Glucose; -SolverCompanion::SolverCompanion() -{} +SolverCompanion::SolverCompanion() {} -SolverCompanion::~SolverCompanion() -{} +SolverCompanion::~SolverCompanion() {} - -bool SolverCompanion::addSolver(ParallelSolver* s) { - watchedSolvers.push(s); - return true; +bool SolverCompanion::addSolver(ParallelSolver* s) +{ + watchedSolvers.push(s); + return true; } -int SolverCompanion::runOnceCompanion() { - int errcode = 0; - for(int indexSolver = 0; indexSolver watchedSolvers; +namespace Glucose +{ + +class ParallelSolver; + +class SolverCompanion +{ + public: + SolverCompanion(); + ~SolverCompanion(); + + bool addSolver(ParallelSolver* s); // attach a solver to accompany + + int runOnceCompanion(); // run it as a thread, but run it just once... + + protected: + int runOnceCompanion(ParallelSolver* s); // run it only on this watched solver + friend class ParallelSolver; + vec watchedSolvers; }; -} +} // namespace Glucose #endif - diff --git a/vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc b/vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc new file mode 100644 index 0000000000..d83147c696 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.cc @@ -0,0 +1,193 @@ +/***************************************************************************************[SolverConfiguration.cc] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#include "core/Solver.h" +#include "parallel/MultiSolvers.h" +// #include "parallel/ParallelSolver.h" +#include "parallel/SolverConfiguration.h" + +using namespace Glucose; + +void SolverConfiguration::configure(MultiSolvers* ms, int nbsolvers) +{ + for (int i = 1; i < nbsolvers; i++) + { // Configuration for the sat race 2015 + ms->solvers[i]->randomizeFirstDescent = true; + ms->solvers[i]->adaptStrategies = (i % 2 == 0); // Just half of the cores are in adaptive mode + ms->solvers[i]->forceUnsatOnNewDescent = (i % 4 == 0); // Just half of adaptive cores have the unsat force + } + if (nbsolvers > 8) + { // configuration for the second phase of the sat race 2015 + for (int i = 0; i < nbsolvers; i++) + { // we have like 32 threads, so we need to export just very good clauses + ms->solvers[i]->goodlimitlbd = 5; + ms->solvers[i]->goodlimitsize = 15; + } + } +} + +void SolverConfiguration::configureSAT15Adapt(MultiSolvers* ms, int nbsolvers) +{ + for (int i = 1; i < nbsolvers; i++) + { // Configuration for the sat race 2015 + ms->solvers[i]->randomizeFirstDescent = true; + ms->solvers[i]->adaptStrategies = (i % 2 == 0); // Just half of the cores are in adaptive mode + } + if (nbsolvers > 8) + { // configuration for the second phase of the sat race 2015 + for (int i = 0; i < nbsolvers; i++) + { // we have like 32 threads, so we need to export just very good clauses + ms->solvers[i]->goodlimitlbd = 5; + ms->solvers[i]->goodlimitsize = 15; + } + } +} + +void SolverConfiguration::configureSAT15Default(MultiSolvers* ms, int nbsolvers) +{ + for (int i = 1; i < nbsolvers; i++) ms->solvers[i]->randomizeFirstDescent = true; + + if (nbsolvers > 8) + { // configuration for the second phase of the sat race 2015 + for (int i = 0; i < nbsolvers; i++) + { + ms->solvers[i]->goodlimitlbd = 5; + ms->solvers[i]->goodlimitsize = 15; + } + } +} + +void SolverConfiguration::configureSAT14(MultiSolvers* ms, int nbsolvers) +{ + + if (nbsolvers < 2) + return; + + ms->solvers[1]->var_decay = 0.94; + ms->solvers[1]->max_var_decay = 0.96; + ms->solvers[1]->firstReduceDB = 600; + + if (nbsolvers < 3) + return; + + ms->solvers[2]->var_decay = 0.90; + ms->solvers[2]->max_var_decay = 0.97; + ms->solvers[2]->firstReduceDB = 500; + + if (nbsolvers < 4) + return; + + ms->solvers[3]->var_decay = 0.85; + ms->solvers[3]->max_var_decay = 0.93; + ms->solvers[3]->firstReduceDB = 400; + + if (nbsolvers < 5) + return; + + // Glucose 2.0 (+ blocked restarts) + ms->solvers[4]->var_decay = 0.95; + ms->solvers[4]->max_var_decay = 0.95; + ms->solvers[4]->firstReduceDB = 4000; + ms->solvers[4]->lbdQueue.growTo(100); + ms->solvers[4]->sizeLBDQueue = 100; + ms->solvers[4]->K = 0.7; + ms->solvers[4]->incReduceDB = 500; + + if (nbsolvers < 6) + return; + + ms->solvers[5]->var_decay = 0.93; + ms->solvers[5]->max_var_decay = 0.96; + ms->solvers[5]->firstReduceDB = 100; + ms->solvers[5]->incReduceDB = 500; + + if (nbsolvers < 7) + return; + + ms->solvers[6]->var_decay = 0.75; + ms->solvers[6]->max_var_decay = 0.94; + ms->solvers[6]->firstReduceDB = 2000; + + if (nbsolvers < 8) + return; + + ms->solvers[7]->var_decay = 0.94; + ms->solvers[7]->max_var_decay = 0.96; + ms->solvers[7]->firstReduceDB = 800; + + if (nbsolvers < 9) + return; + + // ms->solvers[8]->reduceOnSize = true; // NOT USED ANYMORE + + if (nbsolvers < 10) + return; + + // ms->solvers[9]->reduceOnSize = true; // NOT USED ANYMORE + // ms->solvers[9]->reduceOnSizeSize = 14; + + if (nbsolvers < 11) + return; + + double noisevar_decay = 0.005; + int noiseReduceDB = 50; + for (int i = 10; i < nbsolvers; i++) + { + ms->solvers[i]->var_decay = ms->solvers[i % 8]->var_decay; + ms->solvers[i]->max_var_decay = ms->solvers[i % 8]->max_var_decay; + ms->solvers[i]->firstReduceDB = ms->solvers[i % 8]->firstReduceDB; + ms->solvers[i]->var_decay += noisevar_decay; + ms->solvers[i]->firstReduceDB += noiseReduceDB; + if ((i + 1) % 8 == 0) + { + noisevar_decay += 0.006; + noiseReduceDB += 25; + } + } +} diff --git a/libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h b/vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h similarity index 87% rename from libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h rename to vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h index 94118562c4..09fa62bf71 100644 --- a/libs/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h +++ b/vendors/mugen/glucose-syrup-4.1/parallel/SolverConfiguration.h @@ -9,19 +9,19 @@ Labri - Univ. Bordeaux, France Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of -Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -47,27 +47,25 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ - #ifndef SolverConfiguration_h #define SolverConfiguration_h - - -namespace Glucose { +namespace Glucose +{ class MultiSolvers; -class SolverConfiguration { +class SolverConfiguration +{ -public : - static void configure(MultiSolvers *ms, int nbsolvers); + public: + static void configure(MultiSolvers* ms, int nbsolvers); // Special configurations - static void configureSAT14(MultiSolvers *ms, int nbsolvers); - void configureSAT15Adapt(MultiSolvers *ms, int nbsolvers); - void configureSAT15Default(MultiSolvers *ms, int nbsolvers); - + static void configureSAT14(MultiSolvers* ms, int nbsolvers); + void configureSAT15Adapt(MultiSolvers* ms, int nbsolvers); + void configureSAT15Default(MultiSolvers* ms, int nbsolvers); }; -} +} // namespace Glucose #endif diff --git a/vendors/mugen/glucose-syrup-4.1/simp/Main.cc b/vendors/mugen/glucose-syrup-4.1/simp/Main.cc new file mode 100644 index 0000000000..eb90bb4076 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/simp/Main.cc @@ -0,0 +1,364 @@ +/***************************************************************************************[Main.cc] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#include "core/Dimacs.h" +#include "simp/SimpSolver.h" +#include "utils/Options.h" +#include "utils/ParseUtils.h" +#include "utils/System.h" + +#include +#include +#include +#include + +using namespace Glucose; + +//================================================================================================= + +static const char* _certified = "CORE -- CERTIFIED UNSAT"; + +void printStats(Solver& solver) +{ + double cpu_time = cpuTime(); + double mem_used = 0; // memUsedPeak(); + printf("c restarts : %" PRIu64 " (%" PRIu64 " conflicts in avg)\n", solver.starts, + (solver.starts > 0 ? solver.conflicts / solver.starts : 0)); + printf("c blocked restarts : %" PRIu64 " (multiple: %" PRIu64 ") \n", solver.stats[nbstopsrestarts], + solver.stats[nbstopsrestartssame]); + printf("c last block at restart : %" PRIu64 "\n", solver.stats[lastblockatrestart]); + printf("c nb ReduceDB : %" PRIu64 "\n", solver.stats[nbReduceDB]); + printf("c nb removed Clauses : %" PRIu64 "\n", solver.stats[nbRemovedClauses]); + printf("c nb learnts DL2 : %" PRIu64 "\n", solver.stats[nbDL2]); + printf("c nb learnts size 2 : %" PRIu64 "\n", solver.stats[nbBin]); + printf("c nb learnts size 1 : %" PRIu64 "\n", solver.stats[nbUn]); + if (solver.chanseokStrategy) + printf("c nb permanent learnts : %" PRIu64 "\n", solver.stats[nbPermanentLearnts]); + + printf("c conflicts : %-12" PRIu64 " (%.0f /sec)\n", solver.conflicts, solver.conflicts / cpu_time); + printf("c decisions : %-12" PRIu64 " (%4.2f %% random) (%.0f /sec)\n", solver.decisions, + (float)solver.stats[rnd_decisions] * 100 / (float)solver.decisions, solver.decisions / cpu_time); + printf("c propagations : %-12" PRIu64 " (%.0f /sec)\n", solver.propagations, + solver.propagations / cpu_time); + // printf("c conflict literals : %-12" PRIu64" (%4.2f %% deleted)\n", solver.stats[tot_literals], + // (solver.stats[max_literals] - solver.stats[tot_literals])*100 / (double)solver.stats[max_literals]); printf("c + // Average resolutions : %-12" PRIu64" (%.0f seen + // ones)\n",solver.stats[sumRes]/solver.conflicts,((double)solver.stats[sumResSeen])/solver.conflicts); + printf("c nb reduced Clauses : %" PRIu64 "\n", solver.stats[nbReducedClauses]); + + if (mem_used != 0) + printf("Memory used : %.2f MB\n", mem_used); + printf("c CPU time : %g s\n", cpu_time); +} + +static Solver* solver; +// Terminate by notifying the solver and back out gracefully. This is mainly to have a test-case +// for this feature of the Solver as it may take longer than an immediate call to '_exit()'. +static void SIGINT_interrupt(int signum) +{ + solver->interrupt(); +} + +// Note that '_exit()' rather than 'exit()' has to be used. The reason is that 'exit()' calls +// destructors and may cause deadlocks if a malloc/free function happens to be running (these +// functions are guarded by locks for multithreaded use). +static void SIGINT_exit(int signum) +{ + printf("\n"); + printf("*** INTERRUPTED ***\n"); + if (solver->verbosity > 0) + { + printStats(*solver); + printf("\n"); + printf("*** INTERRUPTED ***\n"); + } + _exit(1); +} + +//================================================================================================= +// Main: + +int main(int argc, char** argv) +{ + try + { + printf("c\nc This is glucose 4.0 -- based on MiniSAT (Many thanks to MiniSAT team)\nc\n"); + + setUsageHelp("c USAGE: %s [options] \n\n where input may be either in plain " + "or gzipped DIMACS.\n"); + + // Extra options: + // + IntOption verb("MAIN", "verb", "Verbosity level (0=silent, 1=some, 2=more).", 1, IntRange(0, 2)); + BoolOption mod("MAIN", "model", "show model.", false); + IntOption vv("MAIN", "vv", "Verbosity every vv conflicts", 10000, IntRange(1, INT32_MAX)); + BoolOption pre("MAIN", "pre", "Completely turn on/off any preprocessing.", true); + StringOption dimacs("MAIN", "dimacs", "If given, stop after preprocessing and write the result to this file."); + IntOption cpu_lim("MAIN", "cpu-lim", "Limit on CPU time allowed in seconds.\n", INT32_MAX, + IntRange(0, INT32_MAX)); + IntOption mem_lim("MAIN", "mem-lim", "Limit on memory usage in megabytes.\n", INT32_MAX, + IntRange(0, INT32_MAX)); + // BoolOption opt_incremental ("MAIN","incremental", "Use incremental SAT solving",false); + + BoolOption opt_certified(_certified, "certified", "Certified UNSAT using DRUP format", false); + StringOption opt_certified_file(_certified, "certified-output", "Certified UNSAT output file", "NULL"); + BoolOption opt_vbyte(_certified, "vbyte", "Emit proof in variable-byte encoding", false); + + parseOptions(argc, argv, true); + + SimpSolver S; + double initial_time = cpuTime(); + + S.parsing = 1; + S.use_simplification = pre; + + // if (!pre) S.eliminate(true); + + S.verbosity = verb; + S.verbEveryConflicts = vv; + S.showModel = mod; + + S.certifiedUNSAT = opt_certified; + S.vbyte = opt_vbyte; + if (S.certifiedUNSAT) + { + if (!strcmp(opt_certified_file, "NULL")) + { + S.vbyte = false; // Cannot write binary to stdout + S.certifiedOutput = fopen("/dev/stdout", "wb"); + if (S.verbosity >= 1) + printf("c\nc Write unsat proof on stdout using text format\nc\n"); + } + else + S.certifiedOutput = fopen(opt_certified_file, "wb"); + const char* name = opt_certified_file; + if (S.verbosity >= 1) + printf("c\nc Write unsat proof on %s using %s format\nc\n", name, S.vbyte ? "binary" : "text"); + } + + solver = &S; + // Use signal handlers that forcibly quit until the solver will be able to respond to + // interrupts: + signal(SIGINT, SIGINT_exit); + signal(SIGXCPU, SIGINT_exit); + + // Set limit on CPU-time: + if (cpu_lim != INT32_MAX) + { + rlimit rl; + getrlimit(RLIMIT_CPU, &rl); + if (rl.rlim_max == RLIM_INFINITY || (rlim_t)cpu_lim < rl.rlim_max) + { + rl.rlim_cur = cpu_lim; + if (setrlimit(RLIMIT_CPU, &rl) == -1) + printf("c WARNING! Could not set resource limit: CPU-time.\n"); + } + } + + // Set limit on virtual memory: + if (mem_lim != INT32_MAX) + { + rlim_t new_mem_lim = (rlim_t)mem_lim * 1024 * 1024; + rlimit rl; + getrlimit(RLIMIT_AS, &rl); + if (rl.rlim_max == RLIM_INFINITY || new_mem_lim < rl.rlim_max) + { + rl.rlim_cur = new_mem_lim; + if (setrlimit(RLIMIT_AS, &rl) == -1) + printf("c WARNING! Could not set resource limit: Virtual memory.\n"); + } + } + + if (argc == 1) + printf("c Reading from standard input... Use '--help' for help.\n"); + + gzFile in = (argc == 1) ? gzdopen(0, "rb") : gzopen(argv[1], "rb"); + if (in == NULL) + printf("ERROR! Could not open file: %s\n", argc == 1 ? "" : argv[1]), exit(1); + + if (S.verbosity > 0) + { + printf("c ========================================[ Problem Statistics " + "]===========================================\n"); + printf("c | " + " |\n"); + } + + FILE* res = (argc >= 3) ? fopen(argv[argc - 1], "wb") : NULL; + parse_DIMACS(in, S); + gzclose(in); + + if (S.verbosity > 0) + { + printf( + "c | Number of variables: %12d |\n", + S.nVars()); + printf( + "c | Number of clauses: %12d |\n", + S.nClauses()); + } + + double parsed_time = cpuTime(); + if (S.verbosity > 0) + { + printf("c | Parse time: %12.2f s " + " |\n", + parsed_time - initial_time); + printf("c | " + " |\n"); + } + + // Change to signal-handlers that will only notify the solver and allow it to terminate + // voluntarily: + signal(SIGINT, SIGINT_interrupt); + signal(SIGXCPU, SIGINT_interrupt); + + S.parsing = 0; + if (pre /* && !S.isIncremental()*/) + { + printf("c | Preprocesing is fully done\n"); + S.eliminate(true); + double simplified_time = cpuTime(); + if (S.verbosity > 0) + { + printf("c | Simplification time: %12.2f s " + " |\n", + simplified_time - parsed_time); + } + } + printf("c | " + " |\n"); + if (!S.okay()) + { + if (S.certifiedUNSAT) + fprintf(S.certifiedOutput, "0\n"), fclose(S.certifiedOutput); + if (res != NULL) + fprintf(res, "UNSAT\n"), fclose(res); + if (S.verbosity > 0) + { + printf("c " + "===============================================================================================" + "==========\n"); + printf("Solved by simplification\n"); + printStats(S); + printf("\n"); + } + printf("s UNSATISFIABLE\n"); + exit(20); + } + + if (dimacs) + { + if (S.verbosity > 0) + printf("c =======================================[ Writing DIMACS " + "]===============================================\n"); + S.toDimacs((const char*)dimacs); + if (S.verbosity > 0) + printStats(S); + exit(0); + } + + vec dummy; + lbool ret = S.solveLimited(dummy); + + if (S.verbosity > 0) + { + printStats(S); + printf("\n"); + } + printf(ret == l_True ? "s SATISFIABLE\n" : ret == l_False ? "s UNSATISFIABLE\n" : "s INDETERMINATE\n"); + + if (res != NULL) + { + if (ret == l_True) + { + printf("SAT\n"); + for (int i = 0; i < S.nVars(); i++) + if (S.model[i] != l_Undef) + fprintf(res, "%s%s%d", (i == 0) ? "" : " ", (S.model[i] == l_True) ? "" : "-", i + 1); + fprintf(res, " 0\n"); + } + else + { + if (ret == l_False) + { + fprintf(res, "UNSAT\n"); + } + } + fclose(res); + } + else + { + if (S.showModel && ret == l_True) + { + printf("v "); + for (int i = 0; i < S.nVars(); i++) + if (S.model[i] != l_Undef) + printf("%s%s%d", (i == 0) ? "" : " ", (S.model[i] == l_True) ? "" : "-", i + 1); + printf(" 0\n"); + } + } + +#ifdef NDEBUG + exit(ret == l_True ? 10 : + ret == l_False ? 20 : + 0); // (faster than "return", which will invoke the destructor for 'Solver') +#else + return (ret == l_True ? 10 : ret == l_False ? 20 : 0); +#endif + } + catch (OutOfMemoryException&) + { + printf("c " + "=======================================================================================================" + "==\n"); + printf("INDETERMINATE\n"); + exit(0); + } +} diff --git a/libs/mugen/glucose-syrup-4.1/simp/Makefile b/vendors/mugen/glucose-syrup-4.1/simp/Makefile similarity index 100% rename from libs/mugen/glucose-syrup-4.1/simp/Makefile rename to vendors/mugen/glucose-syrup-4.1/simp/Makefile diff --git a/libs/mugen/glucose-syrup-4.1/simp/SimpSolver.cc b/vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.cc similarity index 65% rename from libs/mugen/glucose-syrup-4.1/simp/SimpSolver.cc rename to vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.cc index 7c36bd026b..d0d6efd23a 100644 --- a/libs/mugen/glucose-syrup-4.1/simp/SimpSolver.cc +++ b/vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.cc @@ -15,13 +15,13 @@ is based on. (see below). Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software without restriction, including the rights to use, copy, modify, merge, publish, distribute, -sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above and below copyrights notices and this permission notice shall be included in all copies or substantial portions of the Software; - The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot -be used in any competitive event (sat competitions/evaluations) without the express permission of +be used in any competitive event (sat competitions/evaluations) without the express permission of the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event using Glucose Parallel as an embedded SAT engine (single core or not). @@ -47,8 +47,9 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ -#include "mtl/Sort.h" #include "simp/SimpSolver.h" + +#include "mtl/Sort.h" #include "utils/System.h" using namespace Glucose; @@ -56,79 +57,80 @@ using namespace Glucose; //================================================================================================= // Options: - static const char* _cat = "SIMP"; -static BoolOption opt_use_asymm (_cat, "asymm", "Shrink clauses by asymmetric branching.", false); -static BoolOption opt_use_rcheck (_cat, "rcheck", "Check if a clause is already implied. (costly)", false); -static BoolOption opt_use_elim (_cat, "elim", "Perform variable elimination.", true); -static IntOption opt_grow (_cat, "grow", "Allow a variable elimination step to grow by a number of clauses.", 0); -static IntOption opt_clause_lim (_cat, "cl-lim", "Variables are not eliminated if it produces a resolvent with a length above this limit. -1 means no limit", 20, IntRange(-1, INT32_MAX)); -static IntOption opt_subsumption_lim (_cat, "sub-lim", "Do not check if subsumption against a clause larger than this. -1 means no limit.", 1000, IntRange(-1, INT32_MAX)); -static DoubleOption opt_simp_garbage_frac(_cat, "simp-gc-frac", "The fraction of wasted memory allowed before a garbage collection is triggered during simplification.", 0.5, DoubleRange(0, false, HUGE_VAL, false)); - +static BoolOption opt_use_asymm(_cat, "asymm", "Shrink clauses by asymmetric branching.", false); +static BoolOption opt_use_rcheck(_cat, "rcheck", "Check if a clause is already implied. (costly)", false); +static BoolOption opt_use_elim(_cat, "elim", "Perform variable elimination.", true); +static IntOption opt_grow(_cat, "grow", "Allow a variable elimination step to grow by a number of clauses.", 0); +static IntOption opt_clause_lim( + _cat, "cl-lim", + "Variables are not eliminated if it produces a resolvent with a length above this limit. -1 means no limit", 20, + IntRange(-1, INT32_MAX)); +static IntOption + opt_subsumption_lim(_cat, "sub-lim", + "Do not check if subsumption against a clause larger than this. -1 means no limit.", 1000, + IntRange(-1, INT32_MAX)); +static DoubleOption opt_simp_garbage_frac( + _cat, "simp-gc-frac", + "The fraction of wasted memory allowed before a garbage collection is triggered during simplification.", 0.5, + DoubleRange(0, false, HUGE_VAL, false)); //================================================================================================= // Constructor/Destructor: - SimpSolver::SimpSolver() : - Solver() - , grow (opt_grow) - , clause_lim (opt_clause_lim) - , subsumption_lim (opt_subsumption_lim) - , simp_garbage_frac (opt_simp_garbage_frac) - , use_asymm (opt_use_asymm) - , use_rcheck (opt_use_rcheck) - , use_elim (opt_use_elim) - , merges (0) - , asymm_lits (0) - , eliminated_vars (0) - , use_simplification (true) - , elimorder (1) - , occurs (ClauseDeleted(ca)) - , elim_heap (ElimLt(n_occ)) - , bwdsub_assigns (0) - , n_touched (0) + Solver(), + grow(opt_grow), + clause_lim(opt_clause_lim), + subsumption_lim(opt_subsumption_lim), + simp_garbage_frac(opt_simp_garbage_frac), + use_asymm(opt_use_asymm), + use_rcheck(opt_use_rcheck), + use_elim(opt_use_elim), + merges(0), + asymm_lits(0), + eliminated_vars(0), + use_simplification(true), + elimorder(1), + occurs(ClauseDeleted(ca)), + elim_heap(ElimLt(n_occ)), + bwdsub_assigns(0), + n_touched(0) { - vec dummy(1,lit_Undef); - ca.extra_clause_field = true; // NOTE: must happen before allocating the dummy clause below. + vec dummy(1, lit_Undef); + ca.extra_clause_field = true; // NOTE: must happen before allocating the dummy clause below. bwdsub_tmpunit = ca.alloc(dummy); remove_satisfied = false; } - -SimpSolver::~SimpSolver() -{ -} - - - -SimpSolver::SimpSolver(const SimpSolver &s) : Solver(s) - , grow (s.grow) - , clause_lim (s.clause_lim) - , subsumption_lim (s.subsumption_lim) - , simp_garbage_frac (s.simp_garbage_frac) - , use_asymm (s.use_asymm) - , use_rcheck (s.use_rcheck) - , use_elim (s.use_elim) - , merges (s.merges) - , asymm_lits (s.asymm_lits) - , eliminated_vars (s.eliminated_vars) - , use_simplification (s.use_simplification) - , elimorder (s.elimorder) - , occurs (ClauseDeleted(ca)) - , elim_heap (ElimLt(n_occ)) - , bwdsub_assigns (s.bwdsub_assigns) - , n_touched (s.n_touched) +SimpSolver::~SimpSolver() {} + +SimpSolver::SimpSolver(const SimpSolver& s) : + Solver(s), + grow(s.grow), + clause_lim(s.clause_lim), + subsumption_lim(s.subsumption_lim), + simp_garbage_frac(s.simp_garbage_frac), + use_asymm(s.use_asymm), + use_rcheck(s.use_rcheck), + use_elim(s.use_elim), + merges(s.merges), + asymm_lits(s.asymm_lits), + eliminated_vars(s.eliminated_vars), + use_simplification(s.use_simplification), + elimorder(s.elimorder), + occurs(ClauseDeleted(ca)), + elim_heap(ElimLt(n_occ)), + bwdsub_assigns(s.bwdsub_assigns), + n_touched(s.n_touched) { // TODO: Copy dummy... what is it??? - vec dummy(1,lit_Undef); - ca.extra_clause_field = true; // NOTE: must happen before allocating the dummy clause below. + vec dummy(1, lit_Undef); + ca.extra_clause_field = true; // NOTE: must happen before allocating the dummy clause below. bwdsub_tmpunit = ca.alloc(dummy); remove_satisfied = false; - //End TODO - + // End TODO s.elimclauses.memCopyTo(elimclauses); s.touched.memCopyTo(touched); @@ -140,28 +142,29 @@ SimpSolver::SimpSolver(const SimpSolver &s) : Solver(s) s.eliminated.memCopyTo(eliminated); use_simplification = s.use_simplification; - bwdsub_assigns = s.bwdsub_assigns; - n_touched = s.n_touched; - bwdsub_tmpunit = s.bwdsub_tmpunit; - qhead = s.qhead; - ok = s.ok; + bwdsub_assigns = s.bwdsub_assigns; + n_touched = s.n_touched; + bwdsub_tmpunit = s.bwdsub_tmpunit; + qhead = s.qhead; + ok = s.ok; } - - -Var SimpSolver::newVar(bool sign, bool dvar) { +Var SimpSolver::newVar(bool sign, bool dvar) +{ Var v = Solver::newVar(sign, dvar); - frozen .push((char)false); + frozen.push((char)false); eliminated.push((char)false); - if (use_simplification){ - n_occ .push(0); - n_occ .push(0); - occurs .init(v); - touched .push(0); - elim_heap .insert(v); + if (use_simplification) + { + n_occ.push(0); + n_occ.push(0); + occurs.init(v); + touched.push(0); + elim_heap.insert(v); } - return v; } + return v; +} lbool SimpSolver::solve_(bool do_simp, bool turn_off_simp) { @@ -169,19 +172,23 @@ lbool SimpSolver::solve_(bool do_simp, bool turn_off_simp) lbool result = l_True; do_simp &= use_simplification; - if (do_simp){ + if (do_simp) + { // Assumptions must be temporarily frozen to run variable elimination: - for (int i = 0; i < assumptions.size(); i++){ + for (int i = 0; i < assumptions.size(); i++) + { Var v = var(assumptions[i]); // If an assumption has been eliminated, remember it. assert(!isEliminated(v)); - if (!frozen[v]){ + if (!frozen[v]) + { // Freeze and store. setFrozen(v, true); extra_frozen.push(v); - } } + } + } result = lbool(eliminate(turn_off_simp)); } @@ -196,20 +203,15 @@ lbool SimpSolver::solve_(bool do_simp, bool turn_off_simp) if (do_simp) // Unfreeze the assumptions that were frozen: - for (int i = 0; i < extra_frozen.size(); i++) - setFrozen(extra_frozen[i], false); - + for (int i = 0; i < extra_frozen.size(); i++) setFrozen(extra_frozen[i], false); return result; } - - bool SimpSolver::addClause_(vec& ps) { #ifndef NDEBUG - for (int i = 0; i < ps.size(); i++) - assert(!isEliminated(var(ps[i]))); + for (int i = 0; i < ps.size(); i++) assert(!isEliminated(var(ps[i]))); #endif int nclauses = clauses.size(); @@ -219,21 +221,24 @@ bool SimpSolver::addClause_(vec& ps) if (!Solver::addClause_(ps)) return false; - if(!parsing && certifiedUNSAT) { - if (vbyte) { + if (!parsing && certifiedUNSAT) + { + if (vbyte) + { write_char('a'); - for (int i = 0; i < ps.size(); i++) - write_lit(2*(var(ps[i])+1) + sign(ps[i])); + for (int i = 0; i < ps.size(); i++) write_lit(2 * (var(ps[i]) + 1) + sign(ps[i])); write_lit(0); } - else { + else + { for (int i = 0; i < ps.size(); i++) - fprintf(certifiedOutput, "%i " , (var(ps[i]) + 1) * (-2 * sign(ps[i]) + 1) ); + fprintf(certifiedOutput, "%i ", (var(ps[i]) + 1) * (-2 * sign(ps[i]) + 1)); fprintf(certifiedOutput, "0\n"); } } - if (use_simplification && clauses.size() == nclauses + 1){ + if (use_simplification && clauses.size() == nclauses + 1) + { CRef cr = clauses.last(); const Clause& c = ca[cr]; @@ -244,7 +249,8 @@ bool SimpSolver::addClause_(vec& ps) // consequence of how backward subsumption is used to mimic // forward subsumption. subsumption_queue.insert(cr); - for (int i = 0; i < c.size(); i++){ + for (int i = 0; i < c.size(); i++) + { occurs[var(c[i])].push(cr); n_occ[toInt(c[i])]++; touched[var(c[i])] = 1; @@ -257,23 +263,21 @@ bool SimpSolver::addClause_(vec& ps) return true; } - - -void SimpSolver::removeClause(CRef cr,bool inPurgatory) +void SimpSolver::removeClause(CRef cr, bool inPurgatory) { const Clause& c = ca[cr]; if (use_simplification) - for (int i = 0; i < c.size(); i++){ + for (int i = 0; i < c.size(); i++) + { n_occ[toInt(c[i])]--; updateElimHeap(var(c[i])); occurs.smudge(var(c[i])); } - Solver::removeClause(cr,inPurgatory); + Solver::removeClause(cr, inPurgatory); } - bool SimpSolver::strengthenClause(CRef cr, Lit l) { Clause& c = ca[cr]; @@ -284,35 +288,45 @@ bool SimpSolver::strengthenClause(CRef cr, Lit l) // if (!find(subsumption_queue, &c)) subsumption_queue.insert(cr); - if (certifiedUNSAT) { - if (vbyte) { + if (certifiedUNSAT) + { + if (vbyte) + { write_char('a'); for (int i = 0; i < c.size(); i++) - if (c[i] != l) write_lit(2*(var(c[i])+1) + sign(c[i])); + if (c[i] != l) + write_lit(2 * (var(c[i]) + 1) + sign(c[i])); write_lit(0); } - else { + else + { for (int i = 0; i < c.size(); i++) - if (c[i] != l) fprintf(certifiedOutput, "%i " , (var(c[i]) + 1) * (-2 * sign(c[i]) + 1) ); + if (c[i] != l) + fprintf(certifiedOutput, "%i ", (var(c[i]) + 1) * (-2 * sign(c[i]) + 1)); fprintf(certifiedOutput, "0\n"); } } - if (c.size() == 2){ + if (c.size() == 2) + { removeClause(cr); c.strengthen(l); - }else{ - if (certifiedUNSAT) { - if (vbyte) { + } + else + { + if (certifiedUNSAT) + { + if (vbyte) + { write_char('d'); - for (int i = 0; i < c.size(); i++) - write_lit(2*(var(c[i])+1) + sign(c[i])); + for (int i = 0; i < c.size(); i++) write_lit(2 * (var(c[i]) + 1) + sign(c[i])); write_lit(0); } - else { + else + { fprintf(certifiedOutput, "d "); for (int i = 0; i < c.size(); i++) - fprintf(certifiedOutput, "%i " , (var(c[i]) + 1) * (-2 * sign(c[i]) + 1) ); + fprintf(certifiedOutput, "%i ", (var(c[i]) + 1) * (-2 * sign(c[i]) + 1)); fprintf(certifiedOutput, "0\n"); } } @@ -328,19 +342,20 @@ bool SimpSolver::strengthenClause(CRef cr, Lit l) return c.size() == 1 ? enqueue(c[0]) && propagate() == CRef_Undef : true; } - // Returns FALSE if clause is always satisfied ('out_clause' should not be used). bool SimpSolver::merge(const Clause& _ps, const Clause& _qs, Var v, vec& out_clause) { merges++; out_clause.clear(); - bool ps_smallest = _ps.size() < _qs.size(); - const Clause& ps = ps_smallest ? _qs : _ps; - const Clause& qs = ps_smallest ? _ps : _qs; + bool ps_smallest = _ps.size() < _qs.size(); + const Clause& ps = ps_smallest ? _qs : _ps; + const Clause& qs = ps_smallest ? _ps : _qs; - for (int i = 0; i < qs.size(); i++){ - if (var(qs[i]) != v){ + for (int i = 0; i < qs.size(); i++) + { + if (var(qs[i]) != v) + { for (int j = 0; j < ps.size(); j++) if (var(ps[j]) == var(qs[i])) if (ps[j] == ~qs[i]) @@ -349,7 +364,7 @@ bool SimpSolver::merge(const Clause& _ps, const Clause& _qs, Var v, vec& ou goto next; out_clause.push(qs[i]); } - next:; + next:; } for (int i = 0; i < ps.size(); i++) @@ -359,22 +374,23 @@ bool SimpSolver::merge(const Clause& _ps, const Clause& _qs, Var v, vec& ou return true; } - // Returns FALSE if clause is always satisfied. bool SimpSolver::merge(const Clause& _ps, const Clause& _qs, Var v, int& size) { merges++; - bool ps_smallest = _ps.size() < _qs.size(); - const Clause& ps = ps_smallest ? _qs : _ps; - const Clause& qs = ps_smallest ? _ps : _qs; - const Lit* __ps = (const Lit*)ps; - const Lit* __qs = (const Lit*)qs; + bool ps_smallest = _ps.size() < _qs.size(); + const Clause& ps = ps_smallest ? _qs : _ps; + const Clause& qs = ps_smallest ? _ps : _qs; + const Lit* __ps = (const Lit*)ps; + const Lit* __qs = (const Lit*)qs; - size = ps.size()-1; + size = ps.size() - 1; - for (int i = 0; i < qs.size(); i++){ - if (var(__qs[i]) != v){ + for (int i = 0; i < qs.size(); i++) + { + if (var(__qs[i]) != v) + { for (int j = 0; j < ps.size(); j++) if (var(__ps[j]) == var(__qs[i])) if (__ps[j] == ~__qs[i]) @@ -383,27 +399,29 @@ bool SimpSolver::merge(const Clause& _ps, const Clause& _qs, Var v, int& size) goto next; size++; } - next:; + next:; } return true; } - void SimpSolver::gatherTouchedClauses() { - if (n_touched == 0) return; + if (n_touched == 0) + return; - int i,j; + int i, j; for (i = j = 0; i < subsumption_queue.size(); i++) if (ca[subsumption_queue[i]].mark() == 0) ca[subsumption_queue[i]].mark(2); for (i = 0; i < touched.size(); i++) - if (touched[i]){ + if (touched[i]) + { const vec& cs = occurs.lookup(i); for (j = 0; j < cs.size(); j++) - if (ca[cs[j]].mark() == 0){ + if (ca[cs[j]].mark() == 0) + { subsumption_queue.insert(cs[j]); ca[cs[j]].mark(2); } @@ -417,17 +435,19 @@ void SimpSolver::gatherTouchedClauses() n_touched = 0; } - bool SimpSolver::implied(const vec& c) { assert(decisionLevel() == 0); trail_lim.push(trail.size()); for (int i = 0; i < c.size(); i++) - if (value(c[i]) == l_True){ + if (value(c[i]) == l_True) + { cancelUntil(0); return false; - }else if (value(c[i]) != l_False){ + } + else if (value(c[i]) != l_False) + { assert(value(c[i]) == l_Undef); uncheckedEnqueue(~c[i]); } @@ -437,39 +457,46 @@ bool SimpSolver::implied(const vec& c) return result; } - // Backward subsumption + backward subsumption resolution bool SimpSolver::backwardSubsumptionCheck(bool verbose) { - int cnt = 0; - int subsumed = 0; + int cnt = 0; + int subsumed = 0; int deleted_literals = 0; assert(decisionLevel() == 0); - while (subsumption_queue.size() > 0 || bwdsub_assigns < trail.size()){ + while (subsumption_queue.size() > 0 || bwdsub_assigns < trail.size()) + { // Empty subsumption queue and return immediately on user-interrupt: - if (asynch_interrupt){ + if (asynch_interrupt) + { subsumption_queue.clear(); bwdsub_assigns = trail.size(); - break; } + break; + } // Check top-level assignments by creating a dummy clause and placing it in the queue: - if (subsumption_queue.size() == 0 && bwdsub_assigns < trail.size()){ - Lit l = trail[bwdsub_assigns++]; + if (subsumption_queue.size() == 0 && bwdsub_assigns < trail.size()) + { + Lit l = trail[bwdsub_assigns++]; ca[bwdsub_tmpunit][0] = l; ca[bwdsub_tmpunit].calcAbstraction(); - subsumption_queue.insert(bwdsub_tmpunit); } + subsumption_queue.insert(bwdsub_tmpunit); + } - CRef cr = subsumption_queue.peek(); subsumption_queue.pop(); - Clause& c = ca[cr]; + CRef cr = subsumption_queue.peek(); + subsumption_queue.pop(); + Clause& c = ca[cr]; - if (c.mark()) continue; + if (c.mark()) + continue; if (verbose && verbosity >= 2 && cnt++ % 1000 == 0) - printf("subsumption left: %10d (%10d subsumed, %10d deleted literals)\r", subsumption_queue.size(), subsumed, deleted_literals); + printf("subsumption left: %10d (%10d subsumed, %10d deleted literals)\r", subsumption_queue.size(), + subsumed, deleted_literals); - assert(c.size() > 1 || value(c[0]) == l_True); // Unit-clauses should have been propagated before this point. + assert(c.size() > 1 || value(c[0]) == l_True); // Unit-clauses should have been propagated before this point. // Find best variable to scan: Var best = var(c[0]); @@ -479,17 +506,19 @@ bool SimpSolver::backwardSubsumptionCheck(bool verbose) // Search all candidates: vec& _cs = occurs.lookup(best); - CRef* cs = (CRef*)_cs; + CRef* cs = (CRef*)_cs; for (int j = 0; j < _cs.size(); j++) if (c.mark()) break; - else if (!ca[cs[j]].mark() && cs[j] != cr && (subsumption_lim == -1 || ca[cs[j]].size() < subsumption_lim)){ + else if (!ca[cs[j]].mark() && cs[j] != cr && (subsumption_lim == -1 || ca[cs[j]].size() < subsumption_lim)) + { Lit l = c.subsumes(ca[cs[j]]); if (l == lit_Undef) subsumed++, removeClause(cs[j]); - else if (l != lit_Error){ + else if (l != lit_Error) + { deleted_literals++; if (!strengthenClause(cs[j], ~l)) @@ -505,13 +534,13 @@ bool SimpSolver::backwardSubsumptionCheck(bool verbose) return true; } - bool SimpSolver::asymm(Var v, CRef cr) { Clause& c = ca[cr]; assert(decisionLevel() == 0); - if (c.mark() || satisfied(c)) return true; + if (c.mark() || satisfied(c)) + return true; trail_lim.push(trail.size()); Lit l = lit_Undef; @@ -521,18 +550,19 @@ bool SimpSolver::asymm(Var v, CRef cr) else l = c[i]; - if (propagate() != CRef_Undef){ + if (propagate() != CRef_Undef) + { cancelUntil(0); asymm_lits++; if (!strengthenClause(cr, l)) return false; - }else + } + else cancelUntil(0); return true; } - bool SimpSolver::asymmVar(Var v) { assert(use_simplification); @@ -549,14 +579,12 @@ bool SimpSolver::asymmVar(Var v) return backwardSubsumptionCheck(); } - static void mkElimClause(vec& elimclauses, Lit x) { elimclauses.push(toInt(x)); elimclauses.push(1); } - static void mkElimClause(vec& elimclauses, Var v, Clause& c) { int first = elimclauses.size(); @@ -564,7 +592,8 @@ static void mkElimClause(vec& elimclauses, Var v, Clause& c) // Copy clause to elimclauses-vector. Remember position where the // variable 'v' occurs: - for (int i = 0; i < c.size(); i++){ + for (int i = 0; i < c.size(); i++) + { elimclauses.push(toInt(c[i])); if (var(c[i]) == v) v_pos = i + first; @@ -573,7 +602,7 @@ static void mkElimClause(vec& elimclauses, Var v, Clause& c) // Swap the first literal with the 'v' literal, so that the literal // containing 'v' will occur first in the clause: - uint32_t tmp = elimclauses[v_pos]; + uint32_t tmp = elimclauses[v_pos]; elimclauses[v_pos] = elimclauses[first]; elimclauses[first] = tmp; @@ -581,8 +610,6 @@ static void mkElimClause(vec& elimclauses, Var v, Clause& c) elimclauses.push(c.size()); } - - bool SimpSolver::eliminateVar(Var v) { assert(!frozen[v]); @@ -593,8 +620,7 @@ bool SimpSolver::eliminateVar(Var v) // const vec& cls = occurs.lookup(v); vec pos, neg; - for (int i = 0; i < cls.size(); i++) - (find(ca[cls[i]], mkLit(v)) ? pos : neg).push(cls[i]); + for (int i = 0; i < cls.size(); i++) (find(ca[cls[i]], mkLit(v)) ? pos : neg).push(cls[i]); // Check wether the increase in number of clauses stays within the allowed ('grow'). Moreover, no // clause must exceed the limit on the maximal clause size (if it is set): @@ -604,7 +630,7 @@ bool SimpSolver::eliminateVar(Var v) for (int i = 0; i < pos.size(); i++) for (int j = 0; j < neg.size(); j++) - if (merge(ca[pos[i]], ca[neg[j]], v, clause_size) && + if (merge(ca[pos[i]], ca[neg[j]], v, clause_size) && (++cnt > cls.size() + grow || (clause_lim != -1 && clause_size > clause_lim))) return true; @@ -613,17 +639,17 @@ bool SimpSolver::eliminateVar(Var v) setDecisionVar(v, false); eliminated_vars++; - if (pos.size() > neg.size()){ - for (int i = 0; i < neg.size(); i++) - mkElimClause(elimclauses, v, ca[neg[i]]); + if (pos.size() > neg.size()) + { + for (int i = 0; i < neg.size(); i++) mkElimClause(elimclauses, v, ca[neg[i]]); mkElimClause(elimclauses, mkLit(v)); - }else{ - for (int i = 0; i < pos.size(); i++) - mkElimClause(elimclauses, v, ca[pos[i]]); + } + else + { + for (int i = 0; i < pos.size(); i++) mkElimClause(elimclauses, v, ca[pos[i]]); mkElimClause(elimclauses, ~mkLit(v)); } - // Produce clauses in cross product: vec& resolvent = add_tmp; for (int i = 0; i < pos.size(); i++) @@ -631,76 +657,78 @@ bool SimpSolver::eliminateVar(Var v) if (merge(ca[pos[i]], ca[neg[j]], v, resolvent) && !addClause_(resolvent)) return false; - for (int i = 0; i < cls.size(); i++) - removeClause(cls[i]); + for (int i = 0; i < cls.size(); i++) removeClause(cls[i]); // Free occurs list for this variable: occurs[v].clear(true); // Free watchers lists for this variable, if possible: - if (watches[ mkLit(v)].size() == 0) watches[ mkLit(v)].clear(true); - if (watches[~mkLit(v)].size() == 0) watches[~mkLit(v)].clear(true); + if (watches[mkLit(v)].size() == 0) + watches[mkLit(v)].clear(true); + if (watches[~mkLit(v)].size() == 0) + watches[~mkLit(v)].clear(true); return backwardSubsumptionCheck(); } - bool SimpSolver::substitute(Var v, Lit x) { assert(!frozen[v]); assert(!isEliminated(v)); assert(value(v) == l_Undef); - if (!ok) return false; + if (!ok) + return false; eliminated[v] = true; setDecisionVar(v, false); const vec& cls = occurs.lookup(v); - + vec& subst_clause = add_tmp; - for (int i = 0; i < cls.size(); i++){ + for (int i = 0; i < cls.size(); i++) + { Clause& c = ca[cls[i]]; subst_clause.clear(); - for (int j = 0; j < c.size(); j++){ + for (int j = 0; j < c.size(); j++) + { Lit p = c[j]; subst_clause.push(var(p) == v ? x ^ sign(p) : p); } - if (!addClause_(subst_clause)) return ok = false; - removeClause(cls[i]); - - } + removeClause(cls[i]); + } return true; } - void SimpSolver::extendModel() { int i, j; Lit x; - if(model.size()==0) model.growTo(nVars()); + if (model.size() == 0) + model.growTo(nVars()); - for (i = elimclauses.size()-1; i > 0; i -= j){ + for (i = elimclauses.size() - 1; i > 0; i -= j) + { for (j = elimclauses[i--]; j > 1; j--, i--) if (modelValue(toLit(elimclauses[i])) != l_False) goto next; - x = toLit(elimclauses[i]); + x = toLit(elimclauses[i]); model[var(x)] = lbool(!sign(x)); next:; } } - bool SimpSolver::eliminate(bool turn_off_elim) { - if (!simplify()) { + if (!simplify()) + { ok = false; return false; } @@ -710,64 +738,83 @@ bool SimpSolver::eliminate(bool turn_off_elim) // Main simplification loop: // - int toPerform = clauses.size()<=4800000; - - if(!toPerform) { - printf("c Too many clauses... No preprocessing\n"); + int toPerform = clauses.size() <= 4800000; + + if (!toPerform) + { + printf("c Too many clauses... No preprocessing\n"); } - while (toPerform && (n_touched > 0 || bwdsub_assigns < trail.size() || elim_heap.size() > 0)){ + while (toPerform && (n_touched > 0 || bwdsub_assigns < trail.size() || elim_heap.size() > 0)) + { gatherTouchedClauses(); - // printf(" ## (time = %6.2f s) BWD-SUB: queue = %d, trail = %d\n", cpuTime(), subsumption_queue.size(), trail.size() - bwdsub_assigns); - if ((subsumption_queue.size() > 0 || bwdsub_assigns < trail.size()) && - !backwardSubsumptionCheck(true)){ - ok = false; goto cleanup; } + // printf(" ## (time = %6.2f s) BWD-SUB: queue = %d, trail = %d\n", cpuTime(), subsumption_queue.size(), + // trail.size() - bwdsub_assigns); + if ((subsumption_queue.size() > 0 || bwdsub_assigns < trail.size()) && !backwardSubsumptionCheck(true)) + { + ok = false; + goto cleanup; + } // Empty elim_heap and return immediately on user-interrupt: - if (asynch_interrupt){ + if (asynch_interrupt) + { assert(bwdsub_assigns == trail.size()); assert(subsumption_queue.size() == 0); assert(n_touched == 0); elim_heap.clear(); - goto cleanup; } + goto cleanup; + } // printf(" ## (time = %6.2f s) ELIM: vars = %d\n", cpuTime(), elim_heap.size()); - for (int cnt = 0; !elim_heap.empty(); cnt++){ + for (int cnt = 0; !elim_heap.empty(); cnt++) + { Var elim = elim_heap.removeMin(); - - if (asynch_interrupt) break; - if (isEliminated(elim) || value(elim) != l_Undef) continue; + if (asynch_interrupt) + break; + + if (isEliminated(elim) || value(elim) != l_Undef) + continue; if (verbosity >= 2 && cnt % 100 == 0) printf("elimination left: %10d\r", elim_heap.size()); - if (use_asymm){ + if (use_asymm) + { // Temporarily freeze variable. Otherwise, it would immediately end up on the queue again: bool was_frozen = frozen[elim]; - frozen[elim] = true; - if (!asymmVar(elim)){ - ok = false; goto cleanup; } - frozen[elim] = was_frozen; } + frozen[elim] = true; + if (!asymmVar(elim)) + { + ok = false; + goto cleanup; + } + frozen[elim] = was_frozen; + } // At this point, the variable may have been set by assymetric branching, so check it // again. Also, don't eliminate frozen variables: - if (use_elim && value(elim) == l_Undef && !frozen[elim] && !eliminateVar(elim)){ - ok = false; goto cleanup; } + if (use_elim && value(elim) == l_Undef && !frozen[elim] && !eliminateVar(elim)) + { + ok = false; + goto cleanup; + } checkGarbage(simp_garbage_frac); } assert(subsumption_queue.size() == 0); } - cleanup: +cleanup: // If no more simplification is needed, free all simplification-related data structures: - if (turn_off_elim){ - touched .clear(true); - occurs .clear(true); - n_occ .clear(true); + if (turn_off_elim) + { + touched.clear(true); + occurs.clear(true); + n_occ.clear(true); elim_heap.clear(true); subsumption_queue.clear(true); @@ -778,73 +825,69 @@ bool SimpSolver::eliminate(bool turn_off_elim) // Force full cleanup (this is safe and desirable since it only happens once): rebuildOrderHeap(); garbageCollect(); - }else{ + } + else + { // Cheaper cleanup: - cleanUpClauses(); // TODO: can we make 'cleanUpClauses()' not be linear in the problem size somehow? + cleanUpClauses(); // TODO: can we make 'cleanUpClauses()' not be linear in the problem size somehow? checkGarbage(); } if (verbosity >= 0 && elimclauses.size() > 0) - printf("c | Eliminated clauses: %10.2f Mb |\n", - double(elimclauses.size() * sizeof(uint32_t)) / (1024*1024)); + printf( + "c | Eliminated clauses: %10.2f Mb |\n", + double(elimclauses.size() * sizeof(uint32_t)) / (1024 * 1024)); - return ok; - - } - void SimpSolver::cleanUpClauses() { occurs.cleanAll(); - int i,j; + int i, j; for (i = j = 0; i < clauses.size(); i++) if (ca[clauses[i]].mark() == 0) clauses[j++] = clauses[i]; clauses.shrink(i - j); } - //================================================================================================= // Garbage Collection methods: - void SimpSolver::relocAll(ClauseAllocator& to) { - if (!use_simplification) return; + if (!use_simplification) + return; // All occurs lists: // - for (int i = 0; i < nVars(); i++){ + for (int i = 0; i < nVars(); i++) + { vec& cs = occurs[i]; - for (int j = 0; j < cs.size(); j++) - ca.reloc(cs[j], to); + for (int j = 0; j < cs.size(); j++) ca.reloc(cs[j], to); } // Subsumption queue: // - for (int i = 0; i < subsumption_queue.size(); i++) - ca.reloc(subsumption_queue[i], to); + for (int i = 0; i < subsumption_queue.size(); i++) ca.reloc(subsumption_queue[i], to); // Temporary clause: // ca.reloc(bwdsub_tmpunit, to); } - void SimpSolver::garbageCollect() { // Initialize the next region to a size corresponding to the estimated utilization degree. This // is not precise but should avoid some unnecessary reallocations for the new region: - ClauseAllocator to(ca.size() - ca.wasted()); + ClauseAllocator to(ca.size() - ca.wasted()); cleanUpClauses(); - to.extra_clause_field = ca.extra_clause_field; // NOTE: this is important to keep (or lose) the extra fields. + to.extra_clause_field = ca.extra_clause_field; // NOTE: this is important to keep (or lose) the extra fields. relocAll(to); Solver::relocAll(to); if (verbosity >= 2) - printf("| Garbage collection: %12d bytes => %12d bytes |\n", - ca.size()*ClauseAllocator::Unit_Size, to.size()*ClauseAllocator::Unit_Size); + printf("| Garbage collection: %12d bytes => %12d bytes |\n", + ca.size() * ClauseAllocator::Unit_Size, to.size() * ClauseAllocator::Unit_Size); to.moveTo(ca); } diff --git a/vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.h b/vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.h new file mode 100644 index 0000000000..9611b32bf0 --- /dev/null +++ b/vendors/mugen/glucose-syrup-4.1/simp/SimpSolver.h @@ -0,0 +1,316 @@ +/***************************************************************************************[SimpSolver.h] + Glucose -- Copyright (c) 2009-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + LRI - Univ. Paris Sud, France (2009-2013) + Labri - Univ. Bordeaux, France + + Syrup (Glucose Parallel) -- Copyright (c) 2013-2014, Gilles Audemard, Laurent Simon + CRIL - Univ. Artois, France + Labri - Univ. Bordeaux, France + +Glucose sources are based on MiniSat (see below MiniSat copyrights). Permissions and copyrights of +Glucose (sources until 2013, Glucose 3.0, single core) are exactly the same as Minisat on which it +is based on. (see below). + +Glucose-Syrup sources are based on another copyright. Permissions and copyrights for the parallel +version of Glucose-Syrup (the "Software") are granted, free of charge, to deal with the Software +without restriction, including the rights to use, copy, modify, merge, publish, distribute, +sublicence, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +- The above and below copyrights notices and this permission notice shall be included in all +copies or substantial portions of the Software; +- The parallel version of Glucose (all files modified since Glucose 3.0 releases, 2013) cannot +be used in any competitive event (sat competitions/evaluations) without the express permission of +the authors (Gilles Audemard / Laurent Simon). This is also the case for any competitive event +using Glucose Parallel as an embedded SAT engine (single core or not). + + +--------------- Original Minisat Copyrights + +Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson +Copyright (c) 2007-2010, Niklas Sorensson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#ifndef Glucose_SimpSolver_h +#define Glucose_SimpSolver_h + +#include "core/Solver.h" +#include "mtl/Clone.h" +#include "mtl/Queue.h" + +namespace Glucose +{ + +//================================================================================================= + +class SimpSolver : public Solver +{ + public: + // Constructor/Destructor: + // + SimpSolver(); + ~SimpSolver(); + + SimpSolver(const SimpSolver& s); + + /** + * Clone function + */ + virtual Clone* clone() const + { + return new SimpSolver(*this); + } + + // Problem specification: + // + virtual Var newVar(bool polarity = true, + bool dvar = true); // Add a new variable with parameters specifying variable mode. + bool addClause(const vec& ps); + bool addEmptyClause(); // Add the empty clause to the solver. + bool addClause(Lit p); // Add a unit clause to the solver. + bool addClause(Lit p, Lit q); // Add a binary clause to the solver. + bool addClause(Lit p, Lit q, Lit r); // Add a ternary clause to the solver. + virtual bool addClause_(vec& ps); + bool substitute(Var v, Lit x); // Replace all occurences of v with x (may cause a contradiction). + + // Variable mode: + // + void setFrozen(Var v, bool b); // If a variable is frozen it will not be eliminated. + bool isEliminated(Var v) const; + + // Solving: + // + bool solve(const vec& assumps, bool do_simp = true, bool turn_off_simp = false); + lbool solveLimited(const vec& assumps, bool do_simp = true, bool turn_off_simp = false); + bool solve(bool do_simp = true, bool turn_off_simp = false); + bool solve(Lit p, bool do_simp = true, bool turn_off_simp = false); + bool solve(Lit p, Lit q, bool do_simp = true, bool turn_off_simp = false); + bool solve(Lit p, Lit q, Lit r, bool do_simp = true, bool turn_off_simp = false); + bool eliminate(bool turn_off_elim = false); // Perform variable elimination based simplification. + + // Memory managment: + // + virtual void garbageCollect(); + + // Generate a (possibly simplified) DIMACS file: + // +#if 0 + void toDimacs (const char* file, const vec& assumps); + void toDimacs (const char* file); + void toDimacs (const char* file, Lit p); + void toDimacs (const char* file, Lit p, Lit q); + void toDimacs (const char* file, Lit p, Lit q, Lit r); +#endif + + // Mode of operation: + // + int parsing; + int grow; // Allow a variable elimination step to grow by a number of clauses (default to zero). + int clause_lim; // Variables are not eliminated if it produces a resolvent with a length above this limit. + // -1 means no limit. + int subsumption_lim; // Do not check if subsumption against a clause larger than this. -1 means no limit. + double + simp_garbage_frac; // A different limit for when to issue a GC during simplification (Also see 'garbage_frac'). + + bool use_asymm; // Shrink clauses by asymmetric branching. + bool use_rcheck; // Check if a clause is already implied. Prett costly, and subsumes subsumptions :) + bool use_elim; // Perform variable elimination. + // Statistics: + // + int merges; + int asymm_lits; + int eliminated_vars; + bool use_simplification; + + protected: + // Helper structures: + // + struct ElimLt + { + const vec& n_occ; + explicit ElimLt(const vec& no) : n_occ(no) {} + + // TODO: are 64-bit operations here noticably bad on 32-bit platforms? Could use a saturating + // 32-bit implementation instead then, but this will have to do for now. + uint64_t cost(Var x) const + { + return (uint64_t)n_occ[toInt(mkLit(x))] * (uint64_t)n_occ[toInt(~mkLit(x))]; + } + bool operator()(Var x, Var y) const + { + return cost(x) < cost(y); + } + + // TODO: investigate this order alternative more. + // bool operator()(Var x, Var y) const { + // int c_x = cost(x); + // int c_y = cost(y); + // return c_x < c_y || c_x == c_y && x < y; } + }; + + struct ClauseDeleted + { + const ClauseAllocator& ca; + explicit ClauseDeleted(const ClauseAllocator& _ca) : ca(_ca) {} + bool operator()(const CRef& cr) const + { + return ca[cr].mark() == 1; + } + }; + + // Solver state: + // + int elimorder; + vec elimclauses; + vec touched; + OccLists, ClauseDeleted> occurs; + vec n_occ; + Heap elim_heap; + Queue subsumption_queue; + vec frozen; + vec eliminated; + int bwdsub_assigns; + int n_touched; + + // Temporaries: + // + CRef bwdsub_tmpunit; + + // Main internal methods: + // + virtual lbool solve_(bool do_simp = true, bool turn_off_simp = false); + bool asymm(Var v, CRef cr); + bool asymmVar(Var v); + void updateElimHeap(Var v); + void gatherTouchedClauses(); + bool merge(const Clause& _ps, const Clause& _qs, Var v, vec& out_clause); + bool merge(const Clause& _ps, const Clause& _qs, Var v, int& size); + bool backwardSubsumptionCheck(bool verbose = false); + bool eliminateVar(Var v); + void extendModel(); + + void removeClause(CRef cr, bool inPurgatory = false); + bool strengthenClause(CRef cr, Lit l); + void cleanUpClauses(); + bool implied(const vec& c); + virtual void relocAll(ClauseAllocator& to); +}; + +//================================================================================================= +// Implementation of inline methods: + +inline bool SimpSolver::isEliminated(Var v) const +{ + return eliminated[v]; +} +inline void SimpSolver::updateElimHeap(Var v) +{ + assert(use_simplification); + // if (!frozen[v] && !isEliminated(v) && value(v) == l_Undef) + if (elim_heap.inHeap(v) || (!frozen[v] && !isEliminated(v) && value(v) == l_Undef)) + elim_heap.update(v); +} + +inline bool SimpSolver::addClause(const vec& ps) +{ + ps.copyTo(add_tmp); + return addClause_(add_tmp); +} +inline bool SimpSolver::addEmptyClause() +{ + add_tmp.clear(); + return addClause_(add_tmp); +} +inline bool SimpSolver::addClause(Lit p) +{ + add_tmp.clear(); + add_tmp.push(p); + return addClause_(add_tmp); +} +inline bool SimpSolver::addClause(Lit p, Lit q) +{ + add_tmp.clear(); + add_tmp.push(p); + add_tmp.push(q); + return addClause_(add_tmp); +} +inline bool SimpSolver::addClause(Lit p, Lit q, Lit r) +{ + add_tmp.clear(); + add_tmp.push(p); + add_tmp.push(q); + add_tmp.push(r); + return addClause_(add_tmp); +} +inline void SimpSolver::setFrozen(Var v, bool b) +{ + frozen[v] = (char)b; + if (use_simplification && !b) + { + updateElimHeap(v); + } +} + +inline bool SimpSolver::solve(bool do_simp, bool turn_off_simp) +{ + budgetOff(); + assumptions.clear(); + return solve_(do_simp, turn_off_simp) == l_True; +} +inline bool SimpSolver::solve(Lit p, bool do_simp, bool turn_off_simp) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + return solve_(do_simp, turn_off_simp) == l_True; +} +inline bool SimpSolver::solve(Lit p, Lit q, bool do_simp, bool turn_off_simp) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + assumptions.push(q); + return solve_(do_simp, turn_off_simp) == l_True; +} +inline bool SimpSolver::solve(Lit p, Lit q, Lit r, bool do_simp, bool turn_off_simp) +{ + budgetOff(); + assumptions.clear(); + assumptions.push(p); + assumptions.push(q); + assumptions.push(r); + return solve_(do_simp, turn_off_simp) == l_True; +} +inline bool SimpSolver::solve(const vec& assumps, bool do_simp, bool turn_off_simp) +{ + budgetOff(); + assumps.copyTo(assumptions); + return solve_(do_simp, turn_off_simp) == l_True; +} + +inline lbool SimpSolver::solveLimited(const vec& assumps, bool do_simp, bool turn_off_simp) +{ + assumps.copyTo(assumptions); + return solve_(do_simp, turn_off_simp); +} + +//================================================================================================= +} // namespace Glucose + +#endif diff --git a/libs/mugen/glucose-syrup-4.1/utils/Makefile b/vendors/mugen/glucose-syrup-4.1/utils/Makefile similarity index 100% rename from libs/mugen/glucose-syrup-4.1/utils/Makefile rename to vendors/mugen/glucose-syrup-4.1/utils/Makefile diff --git a/libs/mugen/glucose-syrup-4.1/utils/Options.cc b/vendors/mugen/glucose-syrup-4.1/utils/Options.cc similarity index 80% rename from libs/mugen/glucose-syrup-4.1/utils/Options.cc rename to vendors/mugen/glucose-syrup-4.1/utils/Options.cc index 73d7f5805c..e6a60d0bc5 100644 --- a/libs/mugen/glucose-syrup-4.1/utils/Options.cc +++ b/vendors/mugen/glucose-syrup-4.1/utils/Options.cc @@ -17,8 +17,9 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ -#include "mtl/Sort.h" #include "utils/Options.h" + +#include "mtl/Sort.h" #include "utils/ParseUtils.h" using namespace Glucose; @@ -26,25 +27,33 @@ using namespace Glucose; void Glucose::parseOptions(int& argc, char** argv, bool strict) { int i, j; - for (i = j = 1; i < argc; i++){ + for (i = j = 1; i < argc; i++) + { const char* str = argv[i]; - if (match(str, "--") && match(str, Option::getHelpPrefixString()) && match(str, "help")){ + if (match(str, "--") && match(str, Option::getHelpPrefixString()) && match(str, "help")) + { if (*str == '\0') printUsageAndExit(argc, argv); else if (match(str, "-verb")) printUsageAndExit(argc, argv, true); - } else { + } + else + { bool parsed_ok = false; - - for (int k = 0; !parsed_ok && k < Option::getOptionList().size(); k++){ + + for (int k = 0; !parsed_ok && k < Option::getOptionList().size(); k++) + { parsed_ok = Option::getOptionList()[k]->parse(argv[i]); - // fprintf(stderr, "checking %d: %s against flag <%s> (%s)\n", i, argv[i], Option::getOptionList()[k]->name, parsed_ok ? "ok" : "skip"); + // fprintf(stderr, "checking %d: %s against flag <%s> (%s)\n", i, argv[i], + // Option::getOptionList()[k]->name, parsed_ok ? "ok" : "skip"); } if (!parsed_ok) if (strict && match(argv[i], "-")) - fprintf(stderr, "ERROR! Unknown flag \"%s\". Use '--%shelp' for help.\n", argv[i], Option::getHelpPrefixString()), exit(1); + fprintf(stderr, "ERROR! Unknown flag \"%s\". Use '--%shelp' for help.\n", argv[i], + Option::getHelpPrefixString()), + exit(1); else argv[j++] = argv[i]; } @@ -53,21 +62,27 @@ void Glucose::parseOptions(int& argc, char** argv, bool strict) argc -= (i - j); } - -void Glucose::setUsageHelp (const char* str){ Option::getUsageString() = str; } -void Glucose::setHelpPrefixStr (const char* str){ Option::getHelpPrefixString() = str; } -void Glucose::printUsageAndExit (int argc, char** argv, bool verbose) +void Glucose::setUsageHelp(const char* str) +{ + Option::getUsageString() = str; +} +void Glucose::setHelpPrefixStr(const char* str) +{ + Option::getHelpPrefixString() = str; +} +void Glucose::printUsageAndExit(int argc, char** argv, bool verbose) { const char* usage = Option::getUsageString(); if (usage != NULL) fprintf(stderr, usage, argv[0]); - sort(Option::getOptionList(), Option::OptionLt()); + sort(Option::getOptionList(), Option::OptionLt()); const char* prev_cat = NULL; const char* prev_type = NULL; - for (int i = 0; i < Option::getOptionList().size(); i++){ + for (int i = 0; i < Option::getOptionList().size(); i++) + { const char* cat = Option::getOptionList()[i]->category; const char* type = Option::getOptionList()[i]->type_name; @@ -88,4 +103,3 @@ void Glucose::printUsageAndExit (int argc, char** argv, bool verbose) fprintf(stderr, "\n"); exit(0); } - diff --git a/libs/mugen/glucose-syrup-4.1/utils/Options.h b/vendors/mugen/glucose-syrup-4.1/utils/Options.h similarity index 59% rename from libs/mugen/glucose-syrup-4.1/utils/Options.h rename to vendors/mugen/glucose-syrup-4.1/utils/Options.h index a86e4c7e47..ae3efbccf4 100644 --- a/libs/mugen/glucose-syrup-4.1/utils/Options.h +++ b/vendors/mugen/glucose-syrup-4.1/utils/Options.h @@ -20,122 +20,146 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #ifndef Glucose_Options_h #define Glucose_Options_h -#include -#include -#include -#include - #include "mtl/IntTypes.h" #include "mtl/Vec.h" #include "utils/ParseUtils.h" -namespace Glucose { +#include +#include +#include +#include + +namespace Glucose +{ //================================================================================================== // Top-level option parse/help functions: - -extern void parseOptions (int& argc, char** argv, bool strict = false); -extern void printUsageAndExit(int argc, char** argv, bool verbose = false); -extern void setUsageHelp (const char* str); -extern void setHelpPrefixStr (const char* str); - +extern void parseOptions(int& argc, char** argv, bool strict = false); +extern void printUsageAndExit(int argc, char** argv, bool verbose = false); +extern void setUsageHelp(const char* str); +extern void setHelpPrefixStr(const char* str); //================================================================================================== // Options is an abstract class that gives the interface for all types options: - class Option { - protected: + protected: const char* name; const char* description; const char* category; const char* type_name; - static vec& getOptionList () { static vec options; return options; } - static const char*& getUsageString() { static const char* usage_str; return usage_str; } - static const char*& getHelpPrefixString() { static const char* help_prefix_str = ""; return help_prefix_str; } + static vec& getOptionList() + { + static vec options; + return options; + } + static const char*& getUsageString() + { + static const char* usage_str; + return usage_str; + } + static const char*& getHelpPrefixString() + { + static const char* help_prefix_str = ""; + return help_prefix_str; + } - struct OptionLt { - bool operator()(const Option* x, const Option* y) { + struct OptionLt + { + bool operator()(const Option* x, const Option* y) + { int test1 = strcmp(x->category, y->category); return test1 < 0 || test1 == 0 && strcmp(x->type_name, y->type_name) < 0; } }; - Option(const char* name_, - const char* desc_, - const char* cate_, - const char* type_) : - name (name_) - , description(desc_) - , category (cate_) - , type_name (type_) - { - getOptionList().push(this); + Option(const char* name_, const char* desc_, const char* cate_, const char* type_) : + name(name_), + description(desc_), + category(cate_), + type_name(type_) + { + getOptionList().push(this); } - public: + public: virtual ~Option() {} - virtual bool parse (const char* str) = 0; - virtual void help (bool verbose = false) = 0; + virtual bool parse(const char* str) = 0; + virtual void help(bool verbose = false) = 0; - friend void parseOptions (int& argc, char** argv, bool strict); - friend void printUsageAndExit (int argc, char** argv, bool verbose); - friend void setUsageHelp (const char* str); - friend void setHelpPrefixStr (const char* str); + friend void parseOptions(int& argc, char** argv, bool strict); + friend void printUsageAndExit(int argc, char** argv, bool verbose); + friend void setUsageHelp(const char* str); + friend void setHelpPrefixStr(const char* str); }; - //================================================================================================== // Range classes with specialization for floating types: - -struct IntRange { +struct IntRange +{ int begin; int end; IntRange(int b, int e) : begin(b), end(e) {} }; -struct Int64Range { +struct Int64Range +{ int64_t begin; int64_t end; Int64Range(int64_t b, int64_t e) : begin(b), end(e) {} }; -struct DoubleRange { +struct DoubleRange +{ double begin; double end; - bool begin_inclusive; - bool end_inclusive; - DoubleRange(double b, bool binc, double e, bool einc) : begin(b), end(e), begin_inclusive(binc), end_inclusive(einc) {} + bool begin_inclusive; + bool end_inclusive; + DoubleRange(double b, bool binc, double e, bool einc) : begin(b), end(e), begin_inclusive(binc), end_inclusive(einc) + {} }; - //================================================================================================== // Double options: - class DoubleOption : public Option { - protected: + protected: DoubleRange range; double value; - public: - DoubleOption(const char* c, const char* n, const char* d, double def = double(), DoubleRange r = DoubleRange(-HUGE_VAL, false, HUGE_VAL, false)) - : Option(n, d, c, ""), range(r), value(def) { + public: + DoubleOption(const char* c, const char* n, const char* d, double def = double(), + DoubleRange r = DoubleRange(-HUGE_VAL, false, HUGE_VAL, false)) : + Option(n, d, c, ""), + range(r), + value(def) + { // FIXME: set LC_NUMERIC to "C" to make sure that strtof/strtod parses decimal point correctly. } - operator double (void) const { return value; } - operator double& (void) { return value; } - DoubleOption& operator=(double x) { value = x; return *this; } + operator double(void) const + { + return value; + } + operator double&(void) + { + return value; + } + DoubleOption& operator=(double x) + { + value = x; + return *this; + } - virtual bool parse(const char* str){ - const char* span = str; + virtual bool parse(const char* str) + { + const char* span = str; if (!match(span, "-") || !match(span, name) || !match(span, "=")) return false; @@ -143,14 +167,18 @@ class DoubleOption : public Option char* end; double tmp = strtod(span, &end); - if (end == NULL) + if (end == NULL) return false; - else if (tmp >= range.end && (!range.end_inclusive || tmp != range.end)){ + else if (tmp >= range.end && (!range.end_inclusive || tmp != range.end)) + { fprintf(stderr, "ERROR! value <%s> is too large for option \"%s\".\n", span, name); exit(1); - }else if (tmp <= range.begin && (!range.begin_inclusive || tmp != range.begin)){ + } + else if (tmp <= range.begin && (!range.begin_inclusive || tmp != range.begin)) + { fprintf(stderr, "ERROR! value <%s> is too small for option \"%s\".\n", span, name); - exit(1); } + exit(1); + } value = tmp; // fprintf(stderr, "READ VALUE: %g\n", value); @@ -158,42 +186,52 @@ class DoubleOption : public Option return true; } - virtual void help (bool verbose = false){ - fprintf(stderr, " -%-12s = %-8s %c%4.2g .. %4.2g%c (default: %g)\n", - name, type_name, - range.begin_inclusive ? '[' : '(', - range.begin, - range.end, - range.end_inclusive ? ']' : ')', - value); - if (verbose){ + virtual void help(bool verbose = false) + { + fprintf(stderr, " -%-12s = %-8s %c%4.2g .. %4.2g%c (default: %g)\n", name, type_name, + range.begin_inclusive ? '[' : '(', range.begin, range.end, range.end_inclusive ? ']' : ')', value); + if (verbose) + { fprintf(stderr, "\n %s\n", description); fprintf(stderr, "\n"); } } }; - //================================================================================================== // Int options: - class IntOption : public Option { - protected: + protected: IntRange range; int32_t value; - public: - IntOption(const char* c, const char* n, const char* d, int32_t def = int32_t(), IntRange r = IntRange(INT32_MIN, INT32_MAX)) - : Option(n, d, c, ""), range(r), value(def) {} - - operator int32_t (void) const { return value; } - operator int32_t& (void) { return value; } - IntOption& operator= (int32_t x) { value = x; return *this; } + public: + IntOption(const char* c, const char* n, const char* d, int32_t def = int32_t(), + IntRange r = IntRange(INT32_MIN, INT32_MAX)) : + Option(n, d, c, ""), + range(r), + value(def) + {} + + operator int32_t(void) const + { + return value; + } + operator int32_t&(void) + { + return value; + } + IntOption& operator=(int32_t x) + { + value = x; + return *this; + } - virtual bool parse(const char* str){ - const char* span = str; + virtual bool parse(const char* str) + { + const char* span = str; if (!match(span, "-") || !match(span, name) || !match(span, "=")) return false; @@ -201,21 +239,26 @@ class IntOption : public Option char* end; int32_t tmp = strtol(span, &end, 10); - if (end == NULL) + if (end == NULL) return false; - else if (tmp > range.end){ + else if (tmp > range.end) + { fprintf(stderr, "ERROR! value <%s> is too large for option \"%s\".\n", span, name); exit(1); - }else if (tmp < range.begin){ + } + else if (tmp < range.begin) + { fprintf(stderr, "ERROR! value <%s> is too small for option \"%s\".\n", span, name); - exit(1); } + exit(1); + } value = tmp; return true; } - virtual void help (bool verbose = false){ + virtual void help(bool verbose = false) + { fprintf(stderr, " -%-12s = %-8s [", name, type_name); if (range.begin == INT32_MIN) fprintf(stderr, "imin"); @@ -229,33 +272,48 @@ class IntOption : public Option fprintf(stderr, "%4d", range.end); fprintf(stderr, "] (default: %d)\n", value); - if (verbose){ + if (verbose) + { fprintf(stderr, "\n %s\n", description); fprintf(stderr, "\n"); } } }; - // Leave this out for visual C++ until Microsoft implements C99 and gets support for strtoll. #ifndef _MSC_VER class Int64Option : public Option { - protected: + protected: Int64Range range; - int64_t value; - - public: - Int64Option(const char* c, const char* n, const char* d, int64_t def = int64_t(), Int64Range r = Int64Range(INT64_MIN, INT64_MAX)) - : Option(n, d, c, ""), range(r), value(def) {} - - operator int64_t (void) const { return value; } - operator int64_t& (void) { return value; } - Int64Option& operator= (int64_t x) { value = x; return *this; } + int64_t value; + + public: + Int64Option(const char* c, const char* n, const char* d, int64_t def = int64_t(), + Int64Range r = Int64Range(INT64_MIN, INT64_MAX)) : + Option(n, d, c, ""), + range(r), + value(def) + {} + + operator int64_t(void) const + { + return value; + } + operator int64_t&(void) + { + return value; + } + Int64Option& operator=(int64_t x) + { + value = x; + return *this; + } - virtual bool parse(const char* str){ - const char* span = str; + virtual bool parse(const char* str) + { + const char* span = str; if (!match(span, "-") || !match(span, name) || !match(span, "=")) return false; @@ -263,21 +321,26 @@ class Int64Option : public Option char* end; int64_t tmp = strtoll(span, &end, 10); - if (end == NULL) + if (end == NULL) return false; - else if (tmp > range.end){ + else if (tmp > range.end) + { fprintf(stderr, "ERROR! value <%s> is too large for option \"%s\".\n", span, name); exit(1); - }else if (tmp < range.begin){ + } + else if (tmp < range.begin) + { fprintf(stderr, "ERROR! value <%s> is too small for option \"%s\".\n", span, name); - exit(1); } + exit(1); + } value = tmp; return true; } - virtual void help (bool verbose = false){ + virtual void help(bool verbose = false) + { fprintf(stderr, " -%-12s = %-8s [", name, type_name); if (range.begin == INT64_MIN) fprintf(stderr, "imin"); @@ -290,8 +353,9 @@ class Int64Option : public Option else fprintf(stderr, "%4" PRIi64, range.end); - fprintf(stderr, "] (default: %" PRIi64")\n", value); - if (verbose){ + fprintf(stderr, "] (default: %" PRIi64 ")\n", value); + if (verbose) + { fprintf(stderr, "\n %s\n", description); fprintf(stderr, "\n"); } @@ -302,20 +366,33 @@ class Int64Option : public Option //================================================================================================== // String option: - class StringOption : public Option { const char* value; - public: - StringOption(const char* c, const char* n, const char* d, const char* def = NULL) - : Option(n, d, c, ""), value(def) {} - operator const char* (void) const { return value; } - operator const char*& (void) { return value; } - StringOption& operator= (const char* x) { value = x; return *this; } + public: + StringOption(const char* c, const char* n, const char* d, const char* def = NULL) : + Option(n, d, c, ""), + value(def) + {} + + operator const char*(void) const + { + return value; + } + operator const char*&(void) + { + return value; + } + StringOption& operator=(const char* x) + { + value = x; + return *this; + } - virtual bool parse(const char* str){ - const char* span = str; + virtual bool parse(const char* str) + { + const char* span = str; if (!match(span, "-") || !match(span, name) || !match(span, "=")) return false; @@ -324,56 +401,70 @@ class StringOption : public Option return true; } - virtual void help (bool verbose = false){ + virtual void help(bool verbose = false) + { fprintf(stderr, " -%-10s = %8s\n", name, type_name); - if (verbose){ + if (verbose) + { fprintf(stderr, "\n %s\n", description); fprintf(stderr, "\n"); } - } + } }; - //================================================================================================== // Bool option: - class BoolOption : public Option { bool value; - public: - BoolOption(const char* c, const char* n, const char* d, bool v) - : Option(n, d, c, ""), value(v) {} + public: + BoolOption(const char* c, const char* n, const char* d, bool v) : Option(n, d, c, ""), value(v) {} + + operator bool(void) const + { + return value; + } + operator bool&(void) + { + return value; + } + BoolOption& operator=(bool b) + { + value = b; + return *this; + } - operator bool (void) const { return value; } - operator bool& (void) { return value; } - BoolOption& operator=(bool b) { value = b; return *this; } + virtual bool parse(const char* str) + { + const char* span = str; - virtual bool parse(const char* str){ - const char* span = str; - - if (match(span, "-")){ + if (match(span, "-")) + { bool b = !match(span, "no-"); - if (strcmp(span, name) == 0){ + if (strcmp(span, name) == 0) + { value = b; - return true; } + return true; + } } return false; } - virtual void help (bool verbose = false){ + virtual void help(bool verbose = false) + { fprintf(stderr, " -%s, -no-%s", name, name); - for (uint32_t i = 0; i < 32 - strlen(name)*2; i++) - fprintf(stderr, " "); + for (uint32_t i = 0; i < 32 - strlen(name) * 2; i++) fprintf(stderr, " "); fprintf(stderr, " "); fprintf(stderr, "(default: %s)\n", value ? "on" : "off"); - if (verbose){ + if (verbose) + { fprintf(stderr, "\n %s\n", description); fprintf(stderr, "\n"); } @@ -381,6 +472,6 @@ class BoolOption : public Option }; //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/utils/ParseUtils.h b/vendors/mugen/glucose-syrup-4.1/utils/ParseUtils.h similarity index 52% rename from libs/mugen/glucose-syrup-4.1/utils/ParseUtils.h rename to vendors/mugen/glucose-syrup-4.1/utils/ParseUtils.h index f411086100..17cf56910d 100644 --- a/libs/mugen/glucose-syrup-4.1/utils/ParseUtils.h +++ b/vendors/mugen/glucose-syrup-4.1/utils/ParseUtils.h @@ -21,111 +21,146 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #ifndef Glucose_ParseUtils_h #define Glucose_ParseUtils_h -#include -#include #include - +#include +#include #include -namespace Glucose { +namespace Glucose +{ //------------------------------------------------------------------------------------------------- // A simple buffered character stream class: static const int buffer_size = 1048576; - -class StreamBuffer { +class StreamBuffer +{ gzFile in; unsigned char buf[buffer_size]; int pos; int size; - void assureLookahead() { - if (pos >= size) { + void assureLookahead() + { + if (pos >= size) + { pos = 0; - size = gzread(in, buf, sizeof(buf)); } } - -public: - explicit StreamBuffer(gzFile i) : in(i), pos(0), size(0) { assureLookahead(); } - - int operator * () const { return (pos >= size) ? EOF : buf[pos]; } - void operator ++ () { pos++; assureLookahead(); } - int position () const { return pos; } + size = gzread(in, buf, sizeof(buf)); + } + } + + public: + explicit StreamBuffer(gzFile i) : in(i), pos(0), size(0) + { + assureLookahead(); + } + + int operator*() const + { + return (pos >= size) ? EOF : buf[pos]; + } + void operator++() + { + pos++; + assureLookahead(); + } + int position() const + { + return pos; + } }; - //------------------------------------------------------------------------------------------------- // End-of-file detection functions for StreamBuffer and char*: - -static inline bool isEof(StreamBuffer& in) { return *in == EOF; } -static inline bool isEof(const char* in) { return *in == '\0'; } +static inline bool isEof(StreamBuffer& in) +{ + return *in == EOF; +} +static inline bool isEof(const char* in) +{ + return *in == '\0'; +} //------------------------------------------------------------------------------------------------- // Generic parse functions parametrized over the input-stream type. +template +static void skipWhitespace(B& in) +{ + while ((*in >= 9 && *in <= 13) || *in == 32) ++in; +} -template -static void skipWhitespace(B& in) { - while ((*in >= 9 && *in <= 13) || *in == 32) - ++in; } - +template +static void skipLine(B& in) +{ + for (;;) + { + if (isEof(in)) + return; + if (*in == '\n') + { + ++in; + return; + } + ++in; + } +} -template -static void skipLine(B& in) { - for (;;){ - if (isEof(in)) return; - if (*in == '\n') { ++in; return; } - ++in; } } +template +static double parseDouble(B& in) +{ // only in the form X.XXXXXe-XX + bool neg = false; + double accu = 0.0; + double currentExponent = 1; + int exponent; -template -static double parseDouble(B& in) { // only in the form X.XXXXXe-XX - bool neg= false; - double accu = 0.0; - double currentExponent = 1; - int exponent; - skipWhitespace(in); - if(*in == EOF) return 0; - if (*in == '-') neg = true, ++in; - else if (*in == '+') ++in; - if (*in < '1' || *in > '9') printf("PARSE ERROR! Unexpected char: %c\n", *in), exit(3); - accu = (double)(*in - '0'); - ++in; - if (*in != '.') printf("PARSE ERROR! Unexpected char: %c\n", *in),exit(3); - ++in; // skip dot - currentExponent = 0.1; - while (*in >= '0' && *in <= '9') - accu = accu + currentExponent * ((double)(*in - '0')), - currentExponent /= 10, + if (*in == EOF) + return 0; + if (*in == '-') + neg = true, ++in; + else if (*in == '+') ++in; - if (*in != 'e') printf("PARSE ERROR! Unexpected char: %c\n", *in),exit(3); - ++in; // skip dot - exponent = parseInt(in); // read exponent - accu *= pow(10,exponent); - return neg ? -accu:accu; + if (*in < '1' || *in > '9') + printf("PARSE ERROR! Unexpected char: %c\n", *in), exit(3); + accu = (double)(*in - '0'); + ++in; + if (*in != '.') + printf("PARSE ERROR! Unexpected char: %c\n", *in), exit(3); + ++in; // skip dot + currentExponent = 0.1; + while (*in >= '0' && *in <= '9') accu = accu + currentExponent * ((double)(*in - '0')), currentExponent /= 10, ++in; + if (*in != 'e') + printf("PARSE ERROR! Unexpected char: %c\n", *in), exit(3); + ++in; // skip dot + exponent = parseInt(in); // read exponent + accu *= pow(10, exponent); + return neg ? -accu : accu; } - -template -static int parseInt(B& in) { - int val = 0; - bool neg = false; +template +static int parseInt(B& in) +{ + int val = 0; + bool neg = false; skipWhitespace(in); - if (*in == '-') neg = true, ++in; - else if (*in == '+') ++in; - if (*in < '0' || *in > '9') fprintf(stderr, "PARSE ERROR! Unexpected char: %c\n", *in), exit(3); - while (*in >= '0' && *in <= '9') - val = val*10 + (*in - '0'), + if (*in == '-') + neg = true, ++in; + else if (*in == '+') ++in; - return neg ? -val : val; } - + if (*in < '0' || *in > '9') + fprintf(stderr, "PARSE ERROR! Unexpected char: %c\n", *in), exit(3); + while (*in >= '0' && *in <= '9') val = val * 10 + (*in - '0'), ++in; + return neg ? -val : val; +} // String matching: in case of a match the input iterator will be advanced the corresponding // number of characters. -template -static bool match(B& in, const char* str) { +template +static bool match(B& in, const char* str) +{ int i; for (i = 0; str[i] != '\0'; i++) if (in[i] != str[i]) @@ -133,19 +168,20 @@ static bool match(B& in, const char* str) { in += i; - return true; + return true; } // String matching: consumes characters eagerly, but does not require random access iterator. -template -static bool eagerMatch(B& in, const char* str) { +template +static bool eagerMatch(B& in, const char* str) +{ for (; *str != '\0'; ++str, ++in) if (*str != *in) return false; - return true; } - + return true; +} //================================================================================================= -} +} // namespace Glucose #endif diff --git a/libs/mugen/glucose-syrup-4.1/utils/System.cc b/vendors/mugen/glucose-syrup-4.1/utils/System.cc similarity index 81% rename from libs/mugen/glucose-syrup-4.1/utils/System.cc rename to vendors/mugen/glucose-syrup-4.1/utils/System.cc index a516e0b9de..17a0ecc812 100644 --- a/libs/mugen/glucose-syrup-4.1/utils/System.cc +++ b/vendors/mugen/glucose-syrup-4.1/utils/System.cc @@ -38,7 +38,8 @@ static inline int memReadStat(int field) sprintf(name, "/proc/%d/statm", pid); FILE* in = fopen(name, "rb"); - if (in == NULL) return 0; + if (in == NULL) + return 0; for (; field >= 0; field--) if (fscanf(in, "%d", &value) != 1) @@ -47,7 +48,6 @@ static inline int memReadStat(int field) return value; } - static inline int memReadPeak(void) { char name[256]; @@ -55,41 +55,54 @@ static inline int memReadPeak(void) sprintf(name, "/proc/%d/status", pid); FILE* in = fopen(name, "rb"); - if (in == NULL) return 0; + if (in == NULL) + return 0; // Find the correct line, beginning with "VmPeak:": int peak_kb = 0; while (!feof(in) && fscanf(in, "VmPeak: %d kB", &peak_kb) != 1) - while (!feof(in) && fgetc(in) != '\n') - ; + while (!feof(in) && fgetc(in) != '\n'); fclose(in); return peak_kb; } -double Glucose::memUsed() { return (double)memReadStat(0) * (double)getpagesize() / (1024*1024); } -double Glucose::memUsedPeak() { +double Glucose::memUsed() +{ + return (double)memReadStat(0) * (double)getpagesize() / (1024 * 1024); +} +double Glucose::memUsedPeak() +{ double peak = memReadPeak() / 1024; - return peak == 0 ? memUsed() : peak; } + return peak == 0 ? memUsed() : peak; +} #elif defined(__FreeBSD__) -double Glucose::memUsed(void) { +double Glucose::memUsed(void) +{ struct rusage ru; getrusage(RUSAGE_SELF, &ru); - return (double)ru.ru_maxrss / 1024; } -double MiniSat::memUsedPeak(void) { return memUsed(); } - + return (double)ru.ru_maxrss / 1024; +} +double MiniSat::memUsedPeak(void) +{ + return memUsed(); +} #elif defined(__APPLE__) #include -double Glucose::memUsed(void) { +double Glucose::memUsed(void) +{ malloc_statistics_t t; malloc_zone_statistics(NULL, &t); - return (double)t.max_size_in_use / (1024*1024); } + return (double)t.max_size_in_use / (1024 * 1024); +} #else -double Glucose::memUsed() { - return 0; } +double Glucose::memUsed() +{ + return 0; +} #endif diff --git a/libs/mugen/glucose-syrup-4.1/utils/System.h b/vendors/mugen/glucose-syrup-4.1/utils/System.h similarity index 78% rename from libs/mugen/glucose-syrup-4.1/utils/System.h rename to vendors/mugen/glucose-syrup-4.1/utils/System.h index 9b43c12a03..d629097dbc 100644 --- a/libs/mugen/glucose-syrup-4.1/utils/System.h +++ b/vendors/mugen/glucose-syrup-4.1/utils/System.h @@ -21,19 +21,19 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA #ifndef Glucose_System_h #define Glucose_System_h - #include "mtl/IntTypes.h" //------------------------------------------------------------------------------------------------- -namespace Glucose { +namespace Glucose +{ -static inline double cpuTime(void); // CPU-time in seconds. +static inline double cpuTime(void); // CPU-time in seconds. static inline double realTime(void); -extern double memUsed(); // Memory in mega bytes (returns 0 for unsupported architectures). -extern double memUsedPeak(); // Peak-memory in mega bytes (returns 0 for unsupported architectures). +extern double memUsed(); // Memory in mega bytes (returns 0 for unsupported architectures). +extern double memUsedPeak(); // Peak-memory in mega bytes (returns 0 for unsupported architectures). -} +} // namespace Glucose //------------------------------------------------------------------------------------------------- // Implementation of inline functions: @@ -41,24 +41,31 @@ extern double memUsedPeak(); // Peak-memory in mega bytes (returns 0 for #if defined(_MSC_VER) || defined(__MINGW32__) #include -static inline double Glucose::cpuTime(void) { return (double)clock() / CLOCKS_PER_SEC; } +static inline double Glucose::cpuTime(void) +{ + return (double)clock() / CLOCKS_PER_SEC; +} #else -#include #include +#include #include -static inline double Glucose::cpuTime(void) { +static inline double Glucose::cpuTime(void) +{ struct rusage ru; getrusage(RUSAGE_SELF, &ru); - return (double)ru.ru_utime.tv_sec + (double)ru.ru_utime.tv_usec / 1000000; } + return (double)ru.ru_utime.tv_sec + (double)ru.ru_utime.tv_usec / 1000000; +} #endif // Laurent: I know that this will not compile directly under Windows... sorry for that -static inline double Glucose::realTime() { +static inline double Glucose::realTime() +{ struct timeval tv; gettimeofday(&tv, NULL); - return (double)tv.tv_sec + (double) tv.tv_usec / 1000000; } + return (double)tv.tv_sec + (double)tv.tv_usec / 1000000; +} #endif diff --git a/libs/mugen/install_python_libraries.sh b/vendors/mugen/install_python_libraries.sh similarity index 100% rename from libs/mugen/install_python_libraries.sh rename to vendors/mugen/install_python_libraries.sh diff --git a/libs/mugen/mugen.py b/vendors/mugen/mugen.py similarity index 69% rename from libs/mugen/mugen.py rename to vendors/mugen/mugen.py index 1baf94a162..d85efb672f 100644 --- a/libs/mugen/mugen.py +++ b/vendors/mugen/mugen.py @@ -1,145 +1,155 @@ -try: - from graphviz import Digraph -except ModuleNotFoundError: - pass +import contextlib + +with contextlib.suppress(ModuleNotFoundError): + from graphviz import Digraph import itertools -from math import log2 -from pysat.solvers import Glucose3 -from pysat.card import * -import sys -import subprocess import os +import subprocess import tempfile +from math import log2 + import wrapt_timeout_decorator +from pysat.card import * +from pysat.solvers import Glucose3 + class SynthesisException(Exception): - ''' + """ Mugen's generic exception class that is thrown whenever something unexpected happens during synthesis. - ''' + """ - def __init__(self, message): - ''' + def __init__(self, message) -> None: + """ :param message: The message to show when the exception is thrown. - ''' + """ self.message = message -CARDINAL_DIRECTIONS = set(['NORTH', 'EAST', 'SOUTH', 'WEST']) + +CARDINAL_DIRECTIONS = {"NORTH", "EAST", "SOUTH", "WEST"} OPPOSITE_DIRECTION = { - 'NORTH': 'SOUTH', - 'EAST' : 'WEST', - 'SOUTH': 'NORTH', - 'WEST' : 'EAST' + "NORTH": "SOUTH", + "EAST": "WEST", + "SOUTH": "NORTH", + "WEST": "EAST", } # Mapping gate types to the corresponding number of fanins. GATE_FANIN_RANGE = { - 'NOT': 1, - 'AND': 2, - 'OR': 2, - 'MAJ': 3, - 'WIRE': 1, - 'EMPTY': 0, - 'CROSS': 2 + "NOT": 1, + "AND": 2, + "OR": 2, + "MAJ": 3, + "WIRE": 1, + "EMPTY": 0, + "CROSS": 2, } + # Some utility functions. def is_north(coords1, coords2): - ''' + """ Returns true if and only if coords1 lies to the north of coords2. We say this is the case if they have the same horizontal position but coords1 has a lower vertical position. - ''' + """ return coords1[0] == coords2[0] and coords1[1] < coords2[1] + def is_east(coords1, coords2): - ''' + """ Returns true if and only if coords1 lies to the east of coords2. We say this is the case if they have the same vertical position but coords1 has a higher horizontal position. - ''' + """ return coords1[0] > coords2[0] and coords1[1] == coords2[1] + def is_south(coords1, coords2): - ''' + """ Returns true if and only if coords1 lies to the south of coords2. We say this is the case if they have the same horizontal position but coords1 has a higher vertical position. - ''' + """ return coords1[0] == coords2[0] and coords1[1] > coords2[1] + def is_west(coords1, coords2): - ''' + """ Returns true if and only if coords1 lies to the west of coords2. We say this is the case if they have the same vertical position but coords1 has a lower horizontal position. - ''' + """ return coords1[0] < coords2[0] and coords1[1] == coords2[1] + def get_direction(coords1, coords2): - ''' + """ Given a pair of coordinates, determines in which cardinal direction the information flows. Returns the result as a pair (d1, d2), where d1 is the direction in which the signal leaves from coords1 and d2 is the direction in which it arrives at coords2. - ''' + """ if is_north(coords1, coords2): - return ('SOUTH', 'NORTH') - elif is_east(coords1, coords2): - return ('WEST', 'EAST') - elif is_south(coords1, coords2): - return ('NORTH', 'SOUTH') - elif is_west(coords1, coords2): - return ('EAST', 'WEST') - else: - raise SynthesisException('Unknown direction') + return ("SOUTH", "NORTH") + if is_east(coords1, coords2): + return ("WEST", "EAST") + if is_south(coords1, coords2): + return ("NORTH", "SOUTH") + if is_west(coords1, coords2): + return ("EAST", "WEST") + msg = "Unknown direction" + raise SynthesisException(msg) + def get_coords_in_direction(coords, direction): - ''' + """ Given a set of coordinates and a cardinal direction. Returns the coordinates of a node immediately adjacent to the current one in the given direction. Note that this may result in a set of coordinates that are not within the bounds of a clocking scheme (e.g. this may result in negative coordinates). - ''' - if direction == 'NORTH': + """ + if direction == "NORTH": return (coords[0], coords[1] - 1) - elif direction == 'EAST': + if direction == "EAST": return (coords[0] + 1, coords[1]) - elif direction == 'SOUTH': + if direction == "SOUTH": return (coords[0], coords[1] + 1) - elif direction == 'WEST': + if direction == "WEST": return (coords[0] - 1, coords[1]) - else: - raise SynthesisException("Unknown cardinal direction: '{}'".format(direction)) + msg = f"Unknown cardinal direction: '{direction}'" + raise SynthesisException(msg) + def eval_gate(gate_type, inputs): - ''' + """ Evaluates a certain gate type on a list of binary input values. Returns the result. :param gate_type: The type of gate to evaluate. Choices are EMPTY, WIRE, NOT, AND, OR, and MAJ. :param inputs: List of input values. - ''' - if gate_type == 'EMPTY': + """ + if gate_type == "EMPTY": return 0 - elif gate_type == 'WIRE': + if gate_type == "WIRE": return inputs[0] - elif gate_type == 'NOT': + if gate_type == "NOT": return 1 - inputs[0] - elif gate_type == 'AND': + if gate_type == "AND": return inputs[0] & inputs[1] - elif gate_type == 'OR': + if gate_type == "OR": return inputs[0] | inputs[1] - elif gate_type == 'MAJ': + if gate_type == "MAJ": return (inputs[0] & inputs[1]) | (inputs[0] & inputs[2]) | (inputs[1] & inputs[2]) - else: - raise SynthesisException("No evaluation support for gate type '{}'".format(gate_type)) + msg = f"No evaluation support for gate type '{gate_type}'" + raise SynthesisException(msg) + class node: - ''' + """ A generic node class, used by both clocking scheme graphs and logic networks. @@ -151,17 +161,17 @@ class node: :ivar is_border_node: This Boolean flag is True iff the node lies on the border of the clocking scheme. :ivar gate_type: The gate type of the node. :ivar dir_map: This optional attribute is set only for nodes with gate_type CROSS. It is a dictionary which maps fanin directions to output directions. For example, if dir_map = { 'WEST': 'NORTH', 'EAST': 'SOUTH'}, then the western fanin is mapped to the northern fanout port and the eastern fanin is mapped to the southern fanout port. - ''' + """ - def __init__(self, *, coords=None, is_pi=False, is_po=False): - ''' + def __init__(self, *, coords=None, is_pi=False, is_po=False) -> None: + """ Creates a new logic node. :param coords: the grid coordinates of the node in the clocking scheme. :param is_pi: is the node a primary input. :param is_po: is the node a primary output. - ''' + """ self.coords = coords self.is_pi = is_pi self.virtual_fanin = [] @@ -173,26 +183,25 @@ def __init__(self, *, coords=None, is_pi=False, is_po=False): self.gate_type = None # self.dir_map = {} - def set_fanin(self, in_dir, innode, out_dir): - ''' + def set_fanin(self, in_dir, innode, out_dir) -> None: + """ Sets the fanin port at direction d of this node to innode and updates the fanout of innode by appending this node to it. - ''' + """ self.fanin[in_dir] = innode innode.fanout[out_dir] = self - def __repr__(self): + def __repr__(self) -> str: if self.is_pi: - return 'PI{}'.format(self.coords) - else: - return ''.format(self.coords) - - def __lt__(self, other): + return f"PI{self.coords}" + return f"" + + def __lt__(self, other): return self.coords < other.coords -class logic_network: - ''' +class logic_network: + """ A logic_network is the result of synthesis. Its design is similar to a clocking scheme graph. A major difference is that it cannot contain cycles. However, it can also be accessed using the same @@ -205,24 +214,23 @@ class logic_network: :ivar nodes: A list of all the nodes in the logic network, including the PIs. :ivar node_map: A map from tile coordinates to nodes nodes in the logic network. E.g. to access the node corresponding to tile (0,0) one refers to node_map[(0,0)]. :ivar po_map: A list of size nr_pos mapping output indices to nodes in the network. E.g. to access the first output, one refers to po_map[0]. - ''' - - def __init__(self, shape, nr_pis, nr_pos): - ''' + """ + + def __init__(self, shape, nr_pis, nr_pos) -> None: + """ Creates a new logic network. :param shape: a 2-tuple containing the size of the grid (width x height). :param nr_pis: number of PIs. :param nr_pos: number of POs. - ''' - + """ self.shape = shape self.nr_pis = nr_pis self.nr_pos = nr_pos - - self.nodes = [node(coords=i, is_pi=True) for i in range(nr_pis)] + + self.nodes = [node(coords=i, is_pi=True) for i in range(nr_pis)] self.node_map = {} for y in range(shape[1]): for x in range(shape[0]): @@ -230,46 +238,46 @@ def __init__(self, shape, nr_pis, nr_pos): if x == 0 or x == (shape[0] - 1) or y == 0 or y == (shape[1] - 1): n.is_border_node = True self.nodes.append(n) - self.node_map[(x,y)] = n + self.node_map[x, y] = n self.po_map = [None] * nr_pos - def set_output(self, h, coords, d): - ''' + def set_output(self, h, coords, d) -> None: + """ Marks the output port in direction d for the node at coords as the h-th output of the network. - ''' + """ n = self.node_map[coords] - n.fanout[d] = 'PO{}'.format(h) + n.fanout[d] = f"PO{h}" n.is_po = True self.po_map[h] = (n, d) - def __repr__(self): - r = '\n' + def __repr__(self) -> str: + r = "\n" for n in self.nodes: if n.is_pi: continue - r += ' bool: + """ Checks if only border nodes are connected to PIs and POs. Returns True if this is the case and False otherwise. - ''' + """ for n in self.nodes: if n.is_pi: continue @@ -277,21 +285,18 @@ def has_border_io(self): for innode in n.fanin.values(): if innode.is_pi: return False - for (n, d) in self.po_map: - if not n.is_border_node: - return False - return True + return all(n.is_border_node for n, d in self.po_map) - def has_designated_pi(self): - ''' + def has_designated_pi(self) -> bool: + """ Checks if only WIREs are connected to PIs. Moreover, verifies that those designated PI WIREs have only one fanout. Returns True of this is the case and False otherwise. - ''' + """ for n in self.nodes: if n.is_pi: continue - if n.gate_type != 'WIRE': + if n.gate_type != "WIRE": for innode in n.fanin.values(): if innode.is_pi: return False @@ -302,97 +307,99 @@ def has_designated_pi(self): return False return True - def verify_designated_pi(self): - ''' + def verify_designated_pi(self) -> None: + """ The same as :func:`has_designated_pi` but raises a :class:`SynthesisException` if the spec is not met. - ''' + """ for n in self.nodes: if n.is_pi: continue - if n.gate_type != 'WIRE': + if n.gate_type != "WIRE": for innode in n.fanin.values(): if innode.is_pi: - raise SynthesisException('{} has gate type {} and fanin PI_{}'.format( - n.coords, n.gate_type, innode.coords)) + msg = f"{n.coords} has gate type {n.gate_type} and fanin PI_{innode.coords}" + raise SynthesisException(msg) else: nr_fanout = len(n.fanout) for innode in n.fanin.values(): if innode.is_pi and nr_fanout > 1: - raise SynthesisException('{} is designated PI WIRE and has multiple fanout') + msg = "{} is designated PI WIRE and has multiple fanout" + raise SynthesisException(msg) - def has_designated_po(self): - ''' + def has_designated_po(self) -> bool: + """ Checks if only WIREs are connected to POs. Moreover, verifies that those designated PO WIREs have no other fanout. Returns True of this is the case and False otherwise. - ''' - for (n, d) in self.po_map: - if n.gate_type != 'WIRE': + """ + for n, _d in self.po_map: + if n.gate_type != "WIRE": return False if len(n.fanout) > 1: return False return True - def verify_designated_po(self): - ''' + def verify_designated_po(self) -> None: + """ The same as :func:`has_designated_po` but raises a :class:`SynthesisException` if the spec is not met. - ''' - for (n, d) in self.po_map: - if n.gate_type != 'WIRE': - raise SynthesisException('{} is designated PO but has gate type {}'.format( - n.coords, n.gate_type)) + """ + for n, _d in self.po_map: + if n.gate_type != "WIRE": + msg = f"{n.coords} is designated PO but has gate type {n.gate_type}" + raise SynthesisException(msg) if len(n.fanout) > 1: - raise SynthesisException('{} is designated PO but has multiple fanout'.format( - n.coords)) + msg = f"{n.coords} is designated PO but has multiple fanout" + raise SynthesisException(msg) - def verify_consecutive_not(self): - ''' + def verify_consecutive_not(self) -> None: + """ Verifies that the network contains no consecutive NOT gates. Raises a :class:`SynthesisException` if it does. - ''' + """ for n in self.nodes: if n.is_pi: continue - if n.gate_type == 'NOT': - for _, innode in n.fanin.items(): - if not innode.is_pi and innode.gate_type == 'NOT': - raise SynthesisException('{} is NOT gate and has NOT fanin {}'.format( - n.coords, innode.coords)) - - def verify_no_crossing_io(self): - ''' + if n.gate_type == "NOT": + for innode in n.fanin.values(): + if not innode.is_pi and innode.gate_type == "NOT": + msg = f"{n.coords} is NOT gate and has NOT fanin {innode.coords}" + raise SynthesisException(msg) + + def verify_no_crossing_io(self) -> None: + """ Verifies that the network contains no crossings that are directly connected to I/O pins. Raises a :class:`SynthesisException` if it does. - ''' + """ for n in self.nodes: if n.is_pi: continue - if n.gate_type == 'CROSS': + if n.gate_type == "CROSS": if n.is_po: - raise SynthesisException('{} is CROSS so cannot be PO'.format(n.coords)) - for _, innode in n.fanin.items(): + msg = f"{n.coords} is CROSS so cannot be PO" + raise SynthesisException(msg) + for innode in n.fanin.values(): if innode.is_pi: - raise SynthesisException('{} is CROSS so cannot have PI fanin {}'.format( - n.coords, innode.coords)) + msg = f"{n.coords} is CROSS so cannot have PI fanin {innode.coords}" + raise SynthesisException(msg) - def to_png(self, filename): - ''' + def to_png(self, filename) -> None: + """ Creates a PNG of the logic network using Graphviz. In the resulting PNG, all border nodes are filled in with a gray color. All internal nodes are white. PO nodes are marked by a double border. Every non-PI node is also marked with its tile-space coordinates as well as the function it computes. - ''' + """ dot = Digraph() - dot.attr(newrank='true') - dot.attr(rankdir='TB') - dot.attr('node', shape='circle') - dot.attr('node', fixedsize='true') - dot.attr('node', width='1.1') - dot.attr('node', height='1.1') + dot.attr(newrank="true") + dot.attr(rankdir="TB") + dot.attr("node", shape="circle") + dot.attr("node", fixedsize="true") + dot.attr("node", width="1.1") + dot.attr("node", height="1.1") # Find I/O coords. pi_coords = {} @@ -411,76 +418,71 @@ def to_png(self, filename): # Draw nodes in a grid. Make the grid a bit bigger so we can # fit the PI nodes on there neatly. - y_range = (-1, self.shape[1] + 1) # y coordinate range (top value exclusive) - x_range = (-1, self.shape[0] + 1) # x " " " + y_range = (-1, self.shape[1] + 1) # y coordinate range (top value exclusive) + x_range = (-1, self.shape[0] + 1) # x " " " boundary_counter = 0 coord_names = {} for y in range(y_range[0], y_range[1]): with dot.subgraph() as s: - s.attr(rank='same') + s.attr(rank="same") for x in range(x_range[0], x_range[1]): - if (x,y) in pi_coords: - n = pi_coords[(x,y)] - name = 'PI{}'.format(n.coords) - label = 'x[{}]'.format(n.coords) - s.node(name, label, fillcolor='deepskyblue1', style='filled') - elif (x,y) in po_coords: - h = po_coords[(x,y)] - name = 'PO{}'.format(h) - label = 'f[{}]'.format(h) - s.node(name, label, fillcolor='coral1', style='filled') - elif (x,y) in self.node_map: - n = self.node_map[(x, y)] - name = 'N_{}_{}'.format(x, y) - label = '{}\n{}'.format(n.coords, n.gate_type) - fill = 'gray' if n.is_border_node else 'white' - s.node(name, label, fillcolor=fill, style='filled') - else: # Empty boundary node - name = 'B_{}'.format(boundary_counter) + if (x, y) in pi_coords: + n = pi_coords[x, y] + name = f"PI{n.coords}" + label = f"x[{n.coords}]" + s.node(name, label, fillcolor="deepskyblue1", style="filled") + elif (x, y) in po_coords: + h = po_coords[x, y] + name = f"PO{h}" + label = f"f[{h}]" + s.node(name, label, fillcolor="coral1", style="filled") + elif (x, y) in self.node_map: + n = self.node_map[x, y] + name = f"N_{x}_{y}" + label = f"{n.coords}\n{n.gate_type}" + fill = "gray" if n.is_border_node else "white" + s.node(name, label, fillcolor=fill, style="filled") + else: # Empty boundary node + name = f"B_{boundary_counter}" boundary_counter += 1 - s.node(name, name, style='invis') - coord_names[(x,y)] = name + s.node(name, name, style="invis") + coord_names[x, y] = name for coord, name in coord_names.items(): if coord[0] < self.shape[0]: - dot.edge(name, coord_names[(coord[0]+1, coord[1])], style='invis') + dot.edge(name, coord_names[coord[0] + 1, coord[1]], style="invis") if coord[1] < self.shape[1]: - dot.edge(name, coord_names[(coord[0], coord[1]+1)], style='invis') - + dot.edge(name, coord_names[coord[0], coord[1] + 1], style="invis") + for n in self.nodes: if n.is_pi: continue - name = 'N_{}_{}'.format(n.coords[0], n.coords[1]) + name = f"N_{n.coords[0]}_{n.coords[1]}" for in_dir, innode in n.fanin.items(): - if innode.is_pi: - inname = 'PI{}'.format(innode.coords) - else: - inname = 'N_{}_{}'.format(innode.coords[0], innode.coords[1]) - dot.edge(inname, name, - tailport=OPPOSITE_DIRECTION[in_dir][0:1].lower(), - headport=in_dir[0:1].lower()) + inname = f"PI{innode.coords}" if innode.is_pi else f"N_{innode.coords[0]}_{innode.coords[1]}" + dot.edge(inname, name, tailport=OPPOSITE_DIRECTION[in_dir][0:1].lower(), headport=in_dir[0:1].lower()) for h in range(self.nr_pos): n, d = self.po_map[h] - name = 'N_{}_{}'.format(n.coords[0], n.coords[1]) - oname = 'PO{}'.format(h) - olabel = 'PO{}'.format(h) + name = f"N_{n.coords[0]}_{n.coords[1]}" + oname = f"PO{h}" + f"PO{h}" dot.edge(name, oname, tailport=d[0:1].lower(), headport=OPPOSITE_DIRECTION[d][0:1].lower()) - dot.render(filename=filename, format='png', cleanup=True) + dot.render(filename=filename, format="png", cleanup=True) - def rec_simulate(self, n, sim_vals, marked_nodes): - ''' + def rec_simulate(self, n, sim_vals, marked_nodes) -> None: + """ Recursive helper method for :func:`simulate`. - ''' + """ if n.is_pi: return for innode in n.fanin.values(): - if not innode in marked_nodes: + if innode not in marked_nodes: self.rec_simulate(innode, sim_vals, marked_nodes) marked_nodes.add(n) - for out_dir in n.fanout.keys(): - if n.gate_type == 'EMPTY': + for out_dir in n.fanout: + if n.gate_type == "EMPTY": # Empty gates are not referred to by anything. continue invals = [] @@ -489,20 +491,20 @@ def rec_simulate(self, n, sim_vals, marked_nodes): invals.append(sim_vals[innode][None]) else: invals.append(sim_vals[innode][OPPOSITE_DIRECTION[in_dir]]) - - if n.gate_type == 'WIRE': + + if n.gate_type == "WIRE": # Only one fanin, retrieve its sim_val and copy it. sim_vals[n][out_dir] = invals[0] - elif n.gate_type == 'NOT': + elif n.gate_type == "NOT": # Only one fanin, retrieve its sim_val and negate it. sim_vals[n][out_dir] = 1 - invals[0] - elif n.gate_type == 'AND': + elif n.gate_type == "AND": sim_vals[n][out_dir] = invals[0] & invals[1] - elif n.gate_type == 'OR': + elif n.gate_type == "OR": sim_vals[n][out_dir] = invals[0] | invals[1] - elif n.gate_type == 'MAJ': - sim_vals[n][out_dir] = eval_gate('MAJ', invals) - elif n.gate_type == 'CROSS': + elif n.gate_type == "MAJ": + sim_vals[n][out_dir] = eval_gate("MAJ", invals) + elif n.gate_type == "CROSS": # Copy input to direction described in dir_map for indir, outdir in n.dir_map.items(): if outdir == out_dir: @@ -511,16 +513,17 @@ def rec_simulate(self, n, sim_vals, marked_nodes): else: sim_vals[n][out_dir] = sim_vals[n.fanin[indir]][OPPOSITE_DIRECTION[indir]] else: - raise SynthesisException("Unknown gate type '{}' in simulation".format(n.gate_type)) + msg = f"Unknown gate type '{n.gate_type}' in simulation" + raise SynthesisException(msg) def simulate(self): - ''' + """ Simulates the logic network and returns a list which contains the simulated function for each output. - ''' - sim_tt = [[0] * (2 ** self.nr_pis) for i in range(self.nr_pos)] + """ + sim_tt = [[0] * (2**self.nr_pis) for i in range(self.nr_pos)] sim_idx = 0 - for input_pattern in itertools.product('01', repeat=self.nr_pis): + for input_pattern in itertools.product("01", repeat=self.nr_pis): # Reverse input pattern, since our PI ordering is the # inverse of itertools.product. input_pattern = input_pattern[::-1] @@ -541,19 +544,30 @@ def simulate(self): sim_tt[i] = sim_tt[i] + sim_tt[i] return sim_tt + class scheme_graph: - ''' + """ A scheme_graph (short for clocking-scheme graph) is used to specify a clocking scheme and to synthesize logic networks according to that specification. - ''' - - def __init__(self, *, shape=(1,1), - enable_wire=True, enable_not=True, enable_and=True, - enable_or=True, enable_maj=True, enable_crossings=True, - designated_pi=False, designated_po=False, nr_threads=1, - timeout=0): - ''' + """ + + def __init__( + self, + *, + shape=(1, 1), + enable_wire=True, + enable_not=True, + enable_and=True, + enable_or=True, + enable_maj=True, + enable_crossings=True, + designated_pi=False, + designated_po=False, + nr_threads=1, + timeout=0, + ) -> None: + """ Creates a new clocking scheme graph. :param shape: A 2-tuple specifying the dimensions of the clocking scheme. @@ -568,7 +582,7 @@ def __init__(self, *, shape=(1,1), :param nr_threads: How many threads to use in parallel solving. :param timeout: the timeout for the synthesize call (in seconds) - ''' + """ self.shape = shape self.node_map = {} for y in range(shape[1]): @@ -576,7 +590,7 @@ def __init__(self, *, shape=(1,1), n = node(coords=(x, y)) if x == 0 or x == (shape[0] - 1) or y == 0 or y == (shape[1] - 1): n.is_border_node = True - self.node_map[(x,y)] = n + self.node_map[x, y] = n self.enable_wire = enable_wire self.enable_not = enable_not @@ -590,109 +604,116 @@ def __init__(self, *, shape=(1,1), self.model = None self.timeout = timeout - def add_virtual_edge(self, coords1, coords2): - ''' + def add_virtual_edge(self, coords1, coords2) -> None: + """ Adds a virtual edge from the node corresponding to the tile at coords1 to the node corresponding to the tile at coords2. A virtual edge specifies that the node at coords2 may have the node at coords1 in its fanin. However, it does not force this to happen. Hence, the connection is virtual and may be actualized by the synthesis process. - ''' + """ node1 = self.node_map[coords1] node2 = self.node_map[coords2] node1.virtual_fanout.append(node2) node2.virtual_fanin.append(node1) - def _dfs_find_cycles(self, cycles, start, n, path): + def _dfs_find_cycles(self, cycles, start, n, path) -> None: if n in path: if n == start: - cycles.append([n] + path) + cycles.append([n, *path]) return for innode in n.virtual_fanin: - self._dfs_find_cycles(self, cycles, start, innode, [n] + path) + self._dfs_find_cycles(self, cycles, start, innode, [n, *path]) def find_cycles(self): - ''' + """ Examines the clocking scheme graph and finds any cycles it may contain. - ''' + """ cycles = [] for n in self.node_map.values(): for innode in n.virtual_fanin: self._dfs_find_cycles(self, cycles, n, innode, [n]) return cycles - def to_png(self, filename): - ''' - Creates a PNG of the graph underlying the clock scheme + def to_png(self, filename) -> None: + """ + Creates a PNG of the graph underlying the clock scheme using Graphviz. - ''' + """ dot = Digraph() - dot.attr('node', shape='box') - dot.attr(splines='ortho') + dot.attr("node", shape="box") + dot.attr(splines="ortho") for y in range(self.shape[1]): with dot.subgraph() as s: - s.attr(rank='same') + s.attr(rank="same") for x in range(self.shape[0]): - n = self.node_map[(x, y)] - name = 'N_{}_{}'.format(x, y) + self.node_map[x, y] + name = f"N_{x}_{y}" label = str((x, y)) s.node(name, label) if x > 0: - prevname = 'N_{}_{}'.format(x-1, y) - s.edge(prevname, name, style='invis') - + prevname = f"N_{x - 1}_{y}" + s.edge(prevname, name, style="invis") for y in range(self.shape[1]): for x in range(self.shape[0]): - n1 = self.node_map[(x, y)] - n1name = 'N_{}_{}'.format(x, y) + n1 = self.node_map[x, y] + n1name = f"N_{x}_{y}" for n2 in n1.virtual_fanout: - n2name = 'N_{}_{}'.format(n2.coords[0], n2.coords[1]) + n2name = f"N_{n2.coords[0]}_{n2.coords[1]}" dot.edge(n1name, n2name) - dot.render(filename=filename, format='png', cleanup=True) + dot.render(filename=filename, format="png", cleanup=True) - def satisfies_spec(self, net, functions): - ''' + def satisfies_spec(self, net, functions) -> None: + """ Verifies that a network satisfies the specifications represented by this scheme_graph object. Raises a :class:`SynthesisException` if this is not the case. - ''' + """ # Make sure PIs do not have more than one fanout. for n in net.nodes: if not n.is_pi: continue if len(n.fanout) > 1: - raise SynthesisException('PI_{} has more than one fanin'.format(n.coords)) + msg = f"PI_{n.coords} has more than one fanin" + raise SynthesisException(msg) if not self.enable_wire: for n in net.nodes: - if not n.is_pi and n.gate_type == 'WIRE': - raise SynthesisException('{} has type WIRE'.format(n.coords)) + if not n.is_pi and n.gate_type == "WIRE": + msg = f"{n.coords} has type WIRE" + raise SynthesisException(msg) if not self.enable_not: for n in net.nodes: - if not n.is_pi and n.gate_type == 'NOT': - raise SynthesisException('{} has type NOT'.format(n.coords)) + if not n.is_pi and n.gate_type == "NOT": + msg = f"{n.coords} has type NOT" + raise SynthesisException(msg) if not self.enable_and: for n in net.nodes: - if not n.is_pi and n.gate_type == 'AND': - raise SynthesisException('{} has type AND'.format(n.coords)) + if not n.is_pi and n.gate_type == "AND": + msg = f"{n.coords} has type AND" + raise SynthesisException(msg) if not self.enable_or: for n in net.nodes: - if not n.is_pi and n.gate_type == 'OR': - raise SynthesisException('{} has type OR'.format(n.coords)) + if not n.is_pi and n.gate_type == "OR": + msg = f"{n.coords} has type OR" + raise SynthesisException(msg) if not self.enable_maj: for n in net.nodes: - if not n.is_pi and n.gate_type == 'MAJ': - raise SynthesisException('{} has type MAJ'.format(n.coords)) + if not n.is_pi and n.gate_type == "MAJ": + msg = f"{n.coords} has type MAJ" + raise SynthesisException(msg) if not self.enable_crossings: for n in net.nodes: - if not n.is_pi and n.gate_type == 'CROSS': - raise SynthesisException('{} has type CROSS'.format(n.coords)) + if not n.is_pi and n.gate_type == "CROSS": + msg = f"{n.coords} has type CROSS" + raise SynthesisException(msg) if not net.has_border_io(): - raise SynthesisException('Net does not have border I/O') + msg = "Net does not have border I/O" + raise SynthesisException(msg) if self.designated_pi: net.verify_designated_pi() if self.designated_po: @@ -702,10 +723,10 @@ def satisfies_spec(self, net, functions): sim_tts = net.simulate() for i in range(len(functions)): if functions[i] != sim_tts[i]: - raise SynthesisException('Specified f[{}] = {}, net out[{}] = {}'.format( - i, functions[i], i, sim_tts[i])) + msg = f"Specified f[{i}] = {functions[i]}, net out[{i}] = {sim_tts[i]}" + raise SynthesisException(msg) - def _discover_connectivity(self, n, pi_fanin_options): + def _discover_connectivity(self, n, pi_fanin_options) -> None: # Check which directions support fanouts. fanout_directions = set() for outnode in n.virtual_fanout: @@ -721,9 +742,11 @@ def _discover_connectivity(self, n, pi_fanin_options): # directions that are not used by virtual fanin or fanout. io_directions = CARDINAL_DIRECTIONS.difference(fanout_directions).difference(fanin_directions) if n.is_border_node and len(io_directions) == 0: - raise SynthesisException('Unexpected I/O state at border node') - elif not n.is_border_node and len(io_directions) > 0: - raise SynthesisException('Unexpected I/O state at internal node') + msg = "Unexpected I/O state at border node" + raise SynthesisException(msg) + if not n.is_border_node and len(io_directions) > 0: + msg = "Unexpected I/O state at internal node" + raise SynthesisException(msg) # Add I/O directions to potential fanin/fanout directions. for direction in io_directions: fanout_directions.add(direction) @@ -734,7 +757,7 @@ def _discover_connectivity(self, n, pi_fanin_options): fanin_options[d] = [(pi, None) for pi in pi_fanin_options] for innode in n.virtual_fanin: outdir, indir = get_direction(innode.coords, n.coords) - assert(indir not in fanin_options) + assert indir not in fanin_options fanin_options[indir] = [(innode, outdir)] n.fanout_directions = fanout_directions n.fanin_directions = fanin_directions @@ -742,20 +765,21 @@ def _discover_connectivity(self, n, pi_fanin_options): n.io_directions = io_directions def synthesize(self, functions, verbosity=0): - ''' + """ Synthesizes the given list of functions. Returns an iterator of :class:`logic_network` objects, so the caller may iterate on this method to synthesize all networks that satisfy the specifications given - by the clocking scheme and the functions. + by the clocking scheme and the functions. :param functions: A list of lists of binary integers. Every list is a function to be synthesized. Every list is to be computed by the resulting logic network and corresponds to one of its outputs. The n-th list corresponds to the n-th logic network output. :param verbosity: Parameter to view debugging output. - ''' + """ @wrapt_timeout_decorator.timeout(self.timeout) def timeout_call(self, functions, verbosity): for net in self._synthesize(self, functions, verbosity): return net + return None if self.timeout <= 0: for net in self._synthesize(self, functions, verbosity): @@ -765,26 +789,26 @@ def timeout_call(self, functions, verbosity): yield net def _synthesize(self, functions, verbosity): - ''' + """ Synthesizes a logic network according to the clocking scheme specifications encoded in the graph and the functional specification encoded by the truth tables in the functions list. - + NOTE: this function may be called multiple times, which will result in it generating zero or more logic networks. - ''' - assert(len(functions) > 0) - assert(log2(len(functions[0])).is_integer()) + """ + assert len(functions) > 0 + assert log2(len(functions[0])).is_integer() self.nr_pis = round(log2(len(functions[0]))) self.nr_pos = len(functions) var_idx = 1 - - self.nodes = [node(coords=i,is_pi=True) for i in range(self.nr_pis)] + + self.nodes = [node(coords=i, is_pi=True) for i in range(self.nr_pis)] for y in range(self.shape[1]): for x in range(self.shape[0]): - self.nodes.append(self.node_map[(x,y)]) + self.nodes.append(self.node_map[x, y]) legend = {} @@ -805,23 +829,22 @@ def _synthesize(self, functions, verbosity): for n in self.nodes: if n.is_pi: continue - enabled_gates = ['EMPTY'] + enabled_gates = ["EMPTY"] if self.enable_wire: - enabled_gates.append('WIRE') + enabled_gates.append("WIRE") if self.enable_not: - enabled_gates.append('NOT') + enabled_gates.append("NOT") if self.enable_and: - enabled_gates.append('AND') + enabled_gates.append("AND") if self.enable_or: - enabled_gates.append('OR') + enabled_gates.append("OR") if self.enable_maj and len(n.fanin_options) > 2: # assert(self.nr_pis > 2) - enabled_gates.append('MAJ') - if self.enable_crossings: - if not n.is_border_node and (len(n.virtual_fanin) == 2): - enabled_gates.append('CROSS') + enabled_gates.append("MAJ") + if self.enable_crossings and not n.is_border_node and (len(n.virtual_fanin) == 2): + enabled_gates.append("CROSS") n.enabled_gate_types = enabled_gates - + # Based on the enabled gates we can determine the simulation # variables and the gate type variables. nr_local_sim_vars = len(functions[0]) @@ -831,7 +854,7 @@ def _synthesize(self, functions, verbosity): varlist = [0] * nr_local_sim_vars for i in range(nr_local_sim_vars): varlist[i] = var_idx - legend[var_idx] = 'sim_vars[PI{}][None][{}]'.format(n.coords, i) + legend[var_idx] = f"sim_vars[PI{n.coords}][None][{i}]" var_idx += 1 sim_vars[None] = varlist else: @@ -839,7 +862,7 @@ def _synthesize(self, functions, verbosity): varlist = [0] * nr_local_sim_vars for i in range(nr_local_sim_vars): varlist[i] = var_idx - legend[var_idx] = 'sim_var[{}][d][{}]'.format(n.coords, i) + legend[var_idx] = f"sim_var[{n.coords}][d][{i}]" var_idx += 1 sim_vars[d] = varlist n.sim_vars = sim_vars @@ -852,7 +875,7 @@ def _synthesize(self, functions, verbosity): for t in n.enabled_gate_types: gate_type_vars.append(var_idx) gate_type_map[t] = var_idx - legend[var_idx] = 'gate {} has type {}'.format(n.coords, t) + legend[var_idx] = f"gate {n.coords} has type {t}" var_idx += 1 n.gate_type_vars = gate_type_vars n.gate_type_map = gate_type_map @@ -881,11 +904,11 @@ def _synthesize(self, functions, verbosity): dir_map = {} # Track all selection variables for a given fanin # direction. - svar_direction_map = {} + svar_direction_map = {} for direction in n.fanin_directions: svar_direction_map[direction] = [] svars = [] - fanin_size_options = set([GATE_FANIN_RANGE[gate] for gate in n.enabled_gate_types]) + fanin_size_options = {GATE_FANIN_RANGE[gate] for gate in n.enabled_gate_types} for size_option in fanin_size_options: if size_option == 0: # Handle 0 as a special case where this node @@ -895,36 +918,38 @@ def _synthesize(self, functions, verbosity): svar_map[size_option] = {} dir_list = list(n.fanin_options.keys()) for directions in itertools.combinations(dir_list, size_option): - dir_opt_list = [] - for d in directions: - dir_opt_list.append([(d, o) for o in n.fanin_options[d]]) + dir_opt_list = [[(d, o) for o in n.fanin_options[d]] for d in directions] fanin_combinations = itertools.product(*dir_opt_list) -# Warning: enabling the print statement below iterates over fanin_combinations, rendering the for loop useless! -# print('{} fanin combinations: {}'.format(n.coords, list(fanin_combinations))) + # Warning: enabling the print statement below iterates over fanin_combinations, rendering the for loop useless! + # print('{} fanin combinations: {}'.format(n.coords, list(fanin_combinations))) for comb in fanin_combinations: # Filter out redundant combinations. - if size_option == 2 and comb[0][1][0] == comb[1][1][0]: - continue - elif size_option == 3 and ( - (comb[0][1][0] == comb[1][1][0]) or - (comb[0][1][0] == comb[2][1][0]) or - (comb[1][1][0] == comb[2][1][0])): + if (size_option == 2 and comb[0][1][0] == comb[1][1][0]) or ( + size_option == 3 + and ( + (comb[0][1][0] == comb[1][1][0]) + or (comb[0][1][0] == comb[2][1][0]) + or (comb[1][1][0] == comb[2][1][0]) + ) + ): continue # If designated PIs are enabled, we don't want # gates with more than 1 fanin referring to # PIs. - if self.designated_pi and size_option == 2 and ( - comb[0][1][0].is_pi or comb[1][1][0].is_pi): + if ( + self.designated_pi and size_option == 2 and (comb[0][1][0].is_pi or comb[1][1][0].is_pi) + ) or ( + self.designated_pi + and size_option == 3 + and (comb[0][1][0].is_pi or comb[1][1][0].is_pi or comb[2][1][0].is_pi) + ): continue - elif self.designated_pi and size_option == 3 and ( - comb[0][1][0].is_pi or comb[1][1][0].is_pi or comb[2][1][0].is_pi): - continue - + if not self.enable_crossings or size_option != 2: svar_map[size_option][var_idx] = comb - legend[var_idx] = '{} has fanin {}'.format(n.coords, comb) + legend[var_idx] = f"{n.coords} has fanin {comb}" svars.append(var_idx) - # print(comb) + # print(comb) for direction, option in comb: # option[0] is the node, option[1] is the # output port direction. @@ -938,12 +963,15 @@ def _synthesize(self, functions, verbosity): # selection variable for every possible mapping # from the input directions to the output # directions. - input_directions = set([comb[0][0], comb[1][0]]) + input_directions = {comb[0][0], comb[1][0]} output_directions = list(CARDINAL_DIRECTIONS.difference(input_directions)) for i in range(2): svar_map[size_option][var_idx] = comb - dir_map[var_idx] = { comb[0][0] : output_directions[i], comb[1][0] : output_directions[1 - i] } - legend[var_idx] = '{} has fanin {}'.format(n.coords, comb) + dir_map[var_idx] = { + comb[0][0]: output_directions[i], + comb[1][0]: output_directions[1 - i], + } + legend[var_idx] = f"{n.coords} has fanin {comb}" svars.append(var_idx) for direction, option in comb: # option[0] is the node, option[1] is the @@ -953,7 +981,7 @@ def _synthesize(self, functions, verbosity): option[0].ref_var_map[var_idx] = n svar_direction_map[direction].append(var_idx) var_idx += 1 - + n.svar_map = svar_map n.dir_map = dir_map n.svar_direction_map = svar_direction_map @@ -971,12 +999,11 @@ def _synthesize(self, functions, verbosity): houtvars[var_idx] = (n, direction) n.ref_var_direction_map[direction].append(var_idx) n.ref_vars.append(var_idx) - legend[var_idx] = 'PO_{} points to ({}, {})'.format(h, n.coords, direction) + legend[var_idx] = f"PO_{h} points to ({n.coords}, {direction})" var_idx += 1 out_vars[h] = houtvars - - ''' + """ print('{}.enabled_gate_types: {}'.format((0,0), self.node_map[(0,0)].enabled_gate_types)) print('{}.svar_map: {}'.format((0,0), self.node_map[(0,0)].svar_map)) print('{}.svar_direction_map: {}'.format((0,0), self.node_map[(0,0)].svar_direction_map)) @@ -984,8 +1011,8 @@ def _synthesize(self, functions, verbosity): print('{}.ref_var_map = {}'.format((0,0), self.node_map[(0,0)].ref_var_map)) print('{}.ref_var_direction_map = {}'.format((0,0), self.node_map[(0,0)].ref_var_direction_map)) print('{}.ref_vars = {}'.format((0,0), self.node_map[(0,0)].ref_vars)) - ''' - + """ + # Create graph connection and path variables cycles = self.find_cycles(self) connection_vars = {} @@ -998,7 +1025,7 @@ def _synthesize(self, functions, verbosity): continue for np in n.virtual_fanin: connection_vars[np][n] = var_idx - legend[var_idx] = '{} and {} are connected'.format(np.coords, n.coords) + legend[var_idx] = f"{np.coords} and {n.coords} are connected" var_idx += 1 # Create the simulation propagation constraints. @@ -1007,27 +1034,27 @@ def _synthesize(self, functions, verbosity): if n.is_pi: continue for gate_type in n.enabled_gate_types: - if gate_type == 'EMPTY': + if gate_type == "EMPTY": # We handle the empty gate as a special case. continue - elif gate_type == 'CROSS': + elif gate_type == "CROSS": # CROSS is also handled as a special case. - gate_var = n.gate_type_map['CROSS'] + gate_var = n.gate_type_map["CROSS"] fanin_options = n.svar_map[2] for svar, fanins in fanin_options.items(): inport1, (innode1, outport1) = fanins[0] inport2, (innode2, outport2) = fanins[1] - assert(inport1 == OPPOSITE_DIRECTION[outport1]) - assert(inport2 == OPPOSITE_DIRECTION[outport2]) + assert inport1 == OPPOSITE_DIRECTION[outport1] + assert inport2 == OPPOSITE_DIRECTION[outport2] if innode1.is_pi or innode2.is_pi: # Crossings cannot have PI fanin. clauses.append([-gate_var, -svar]) continue out_directions = n.dir_map[svar] - assert(out_directions[inport1] in n.sim_vars.keys()) - assert(out_directions[inport2] in n.sim_vars.keys()) + assert out_directions[inport1] in n.sim_vars + assert out_directions[inport2] in n.sim_vars for tt_idx in range(nr_local_sim_vars): - permutations = list(itertools.product('01', repeat=2)) + permutations = list(itertools.product("01", repeat=2)) for permutation in permutations: clause1 = [0] * 5 clause2 = [0] * 5 @@ -1038,11 +1065,11 @@ def _synthesize(self, functions, verbosity): clause2[1] = -gate_var for i in range(2): if const_vals[i] == 1: - clause1[i+2] = -innode1.sim_vars[outport1][tt_idx] - clause2[i+2] = -innode2.sim_vars[outport2][tt_idx] + clause1[i + 2] = -innode1.sim_vars[outport1][tt_idx] + clause2[i + 2] = -innode2.sim_vars[outport2][tt_idx] else: - clause1[i+2] = innode1.sim_vars[outport1][tt_idx] - clause2[i+2] = innode2.sim_vars[outport2][tt_idx] + clause1[i + 2] = innode1.sim_vars[outport1][tt_idx] + clause2[i + 2] = innode2.sim_vars[outport2][tt_idx] if const_vals[0] == 1: clause1[4] = n.sim_vars[out_directions[inport1]][tt_idx] else: @@ -1051,9 +1078,8 @@ def _synthesize(self, functions, verbosity): clause2[4] = n.sim_vars[out_directions[inport2]][tt_idx] else: clause2[4] = -n.sim_vars[out_directions[inport2]][tt_idx] - - clauses.append(clause1) - clauses.append(clause2) + + clauses.extend((clause1, clause2)) else: fanin_size = GATE_FANIN_RANGE[gate_type] gate_var = n.gate_type_map[gate_type] @@ -1061,7 +1087,7 @@ def _synthesize(self, functions, verbosity): for svar, fanins in fanin_options.items(): for fanout_direction in n.fanout_directions: for tt_idx in range(nr_local_sim_vars): - permutations = list(itertools.product('01', repeat=(fanin_size))) + permutations = list(itertools.product("01", repeat=(fanin_size))) for permutation in permutations: const_vals = [] for i in range(fanin_size): @@ -1074,16 +1100,15 @@ def _synthesize(self, functions, verbosity): for i in range(len(const_vals)): _, (innode, output_port) = fanins[i] if const_vals[i] == 1: - clause[i+2] = -innode.sim_vars[output_port][tt_idx] + clause[i + 2] = -innode.sim_vars[output_port][tt_idx] else: - clause[i+2] = innode.sim_vars[output_port][tt_idx] + clause[i + 2] = innode.sim_vars[output_port][tt_idx] if function_output == 1: - clause[fanin_size+2] = n.sim_vars[fanout_direction][tt_idx] + clause[fanin_size + 2] = n.sim_vars[fanout_direction][tt_idx] else: - clause[fanin_size+2] = -n.sim_vars[fanout_direction][tt_idx] + clause[fanin_size + 2] = -n.sim_vars[fanout_direction][tt_idx] clauses.append(clause) - # Make sure that every I/O port is used at most once, and that # PIs are used at most once. for n in self.nodes: @@ -1094,7 +1119,7 @@ def _synthesize(self, functions, verbosity): else: cnf = CardEnc.atmost(lits=n.svars, encoding=EncType.pairwise) for clause in cnf.clauses: - clauses.append(clause) + clauses.append(clause) for direction, svars in n.ref_var_direction_map.items(): cnf = CardEnc.atmost(lits=svars, encoding=EncType.pairwise) for clause in cnf.clauses: @@ -1107,14 +1132,14 @@ def _synthesize(self, functions, verbosity): for n in self.nodes: if n.is_pi: continue - empty_var = n.gate_type_map['EMPTY'] + empty_var = n.gate_type_map["EMPTY"] for gate_type in n.enabled_gate_types: - if gate_type == 'EMPTY': + if gate_type == "EMPTY": continue gate_type_var = n.gate_type_map[gate_type] fanin_range = GATE_FANIN_RANGE[gate_type] svars = list(n.svar_map[fanin_range].keys()) - clauses.append([empty_var, -gate_type_var] + svars) + clauses.append([empty_var, -gate_type_var, *svars]) # Create cycle-prevention constraints. for n in self.nodes: @@ -1137,13 +1162,13 @@ def _synthesize(self, functions, verbosity): for svar, outnode in innode.ref_var_map.items(): if outnode == n: potential_svars.append(svar) - clause = [-connection_vars[innode][n]] + potential_svars + clause = [-connection_vars[innode][n], *potential_svars] clauses.append(clause) - + # For every cycle in the graph, one of the variables # representing a step on the cycle must be false. for cycle in cycles: - cycle_steps = zip(cycle, cycle[1:]) + cycle_steps = itertools.pairwise(cycle) cycle_lits = [-connection_vars[s[0]][s[1]] for s in cycle_steps] clauses.append(cycle_lits) @@ -1188,8 +1213,8 @@ def _synthesize(self, functions, verbosity): for n in self.nodes: if n.is_pi: continue - empty_var = n.gate_type_map['EMPTY'] - clauses.append([empty_var] + n.ref_vars) + empty_var = n.gate_type_map["EMPTY"] + clauses.append([empty_var, *n.ref_vars]) # Add cardinality constraints on gate fanouts. These # constraints depend on the gate type. AND/OR/NOT/MAJ gates @@ -1203,22 +1228,18 @@ def _synthesize(self, functions, verbosity): continue for gate_type in n.enabled_gate_types: gate_var = n.gate_type_map[gate_type] - if gate_type == 'WIRE': + if gate_type == "WIRE": cnf = CardEnc.atmost(lits=n.ref_vars, encoding=EncType.pairwise, bound=3) - for clause in cnf.clauses: - clauses.append([-gate_var] + clause) - elif gate_type == 'EMPTY': + clauses.extend([-gate_var, *clause] for clause in cnf.clauses) + elif gate_type == "EMPTY": # We'll handle EMPTY gates elsewhere. continue - elif gate_type == 'CROSS': + elif gate_type == "CROSS": cnf = CardEnc.equals(lits=n.ref_vars, encoding=EncType.pairwise, bound=2) - for clause in cnf.clauses: - clauses.append([-gate_var] + clause) + clauses.extend([-gate_var, *clause] for clause in cnf.clauses) else: cnf = CardEnc.equals(lits=n.ref_vars, encoding=EncType.pairwise) - for clause in cnf.clauses: - clauses.append([-gate_var] + clause) - + clauses.extend([-gate_var, *clause] for clause in cnf.clauses) # If a tile has the EMPTY gate make sure it does not select # any fanin and that no gate selects it as fanin. Moreover, @@ -1227,14 +1248,11 @@ def _synthesize(self, functions, verbosity): for n in self.nodes: if n.is_pi: continue - empty_var = n.gate_type_map['EMPTY'] - for svar in n.svars: - clauses.append([-empty_var, -svar]) - for ref_var in n.ref_vars: - clauses.append([-empty_var, -ref_var]) + empty_var = n.gate_type_map["EMPTY"] + clauses.extend([-empty_var, -svar] for svar in n.svars) + clauses.extend([-empty_var, -ref_var] for ref_var in n.ref_vars) for direction in n.fanout_directions: - for tt_idx in range(nr_local_sim_vars): - clauses.append([-empty_var, -n.sim_vars[direction][tt_idx]]) + clauses.extend([-empty_var, -n.sim_vars[direction][tt_idx]] for tt_idx in range(nr_local_sim_vars)) # We cannot have a PI and a PO on the same I/O port. for n in self.nodes: @@ -1249,53 +1267,50 @@ def _synthesize(self, functions, verbosity): if po_n == n and d == direction: po_vars.append(houtvar) for pi_var in pi_vars: - for po_var in po_vars: - clauses.append([-pi_var, -po_var]) + clauses.extend([-pi_var, -po_var] for po_var in po_vars) # If designated_io is enabled only WIRE elements can have # PI/PO fanin/fanout. if self.designated_pi: - assert(self.enable_wire) + assert self.enable_wire for n in self.nodes: if not n.is_pi: continue for svar, out_node in n.ref_var_map.items(): # If this svar refers to - wire_var = out_node.gate_type_map['WIRE'] + wire_var = out_node.gate_type_map["WIRE"] clauses.append([-svar, wire_var]) # A designated PI can only have a single fanout. cnf = CardEnc.atmost(lits=out_node.ref_vars, encoding=EncType.pairwise) - for clause in cnf.clauses: - clauses.append([-svar] + clause) - + clauses.extend([-svar, *clause] for clause in cnf.clauses) + if self.designated_po: - assert(self.enable_wire) + assert self.enable_wire for n in self.nodes: if n.is_pi or not n.is_border_node: continue # If one of the POs points to this gate, it has to be # a WIRE. Moreover, it cannot have any other fanout. - wire_type_var = n.gate_type_map['WIRE'] + wire_type_var = n.gate_type_map["WIRE"] for h in range(nr_outputs): houtvars = out_vars[h] for houtvar, (out_node, _) in houtvars.items(): if out_node == n: clauses.append([-houtvar, wire_type_var]) cnf = CardEnc.atmost(lits=out_node.ref_vars, encoding=EncType.pairwise) - for clause in cnf.clauses: - clauses.append([-houtvar] + clause) + clauses.extend([-houtvar, *clause] for clause in cnf.clauses) # Symmetry break: disallow consecutive NOT gates. if self.enable_not: for n in self.nodes: if n.is_pi: continue - not_type_var = n.gate_type_map['NOT'] + not_type_var = n.gate_type_map["NOT"] for svar, fanins in n.svar_map[1].items(): innode = fanins[0][1][0] if innode.is_pi: continue - innode_not_var = innode.gate_type_map['NOT'] + innode_not_var = innode.gate_type_map["NOT"] clauses.append([-not_type_var, -svar, -innode_not_var]) if self.nr_threads <= 1: @@ -1309,17 +1324,15 @@ def _synthesize(self, functions, verbosity): prev_model = None for model in solver.enum_models(): if verbosity > 1: - logfile = open('model-{}.log'.format(model_idx), 'w') - if prev_model != None: + logfile = open(f"model-{model_idx}.log", "w") + if prev_model is not None: for i in range(len(model)): if model[i] != prev_model[i]: - logfile.write('model[{}] = {}, prev_model[{}] = {}\n'.format( - i, 1 if model[i] > 0 else 0, i, 1 if prev_model[i] > 0 else 0 - )) - for v in model: - logfile.write('{}\n'.format(v)) - for v, s in legend.items(): - logfile.write('{}: {} ({})\n'.format(v, s, True if model[v-1] > 0 else False)) + logfile.write( + f"model[{i}] = {1 if model[i] > 0 else 0}, prev_model[{i}] = {1 if prev_model[i] > 0 else 0}\n" + ) + logfile.writelines(f"{v}\n" for v in model) + logfile.writelines(f"{v}: {s} ({model[v - 1] > 0})\n" for v, s in legend.items()) logfile.close() self.model = model prev_model = model @@ -1331,28 +1344,32 @@ def _synthesize(self, functions, verbosity): models = [] while True: proc = None - with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: - f.write('p cnf {} {}\n'.format(var_idx - 1, len(clauses) + len(models))) + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + f.write(f"p cnf {var_idx - 1} {len(clauses) + len(models)}\n") for clause in clauses: for v in clause: - f.write('{} '.format(v)) - f.write('0\n') + f.write(f"{v} ") + f.write("0\n") for model in models: for v in model: - f.write('{} '.format(-v)) - f.write('0\n') + f.write(f"{-v} ") + f.write("0\n") f.close() - proc = subprocess.run(['../glucose-syrup', '-nthreads={}'.format(self.nr_threads), '-model', f.name], capture_output=True, text=True) + proc = subprocess.run( + ["../glucose-syrup", f"-nthreads={self.nr_threads}", "-model", f.name], + capture_output=True, + text=True, + ) os.remove(f.name) if proc.returncode == 10: # Glucose returns 10 on SAT - output = proc.stdout.split('\n') + output = proc.stdout.split("\n") model = [] for line in output: - if line[:1] == 'v': - modelvals = line.split(' ')[1:] + if line[:1] == "v": + modelvals = line.split(" ")[1:] for modelval in modelvals: - if modelval != '0': + if modelval != "0": model.append(int(modelval)) models.append(model) net = self.model_to_network(self, model, nr_outputs, out_vars, nr_local_sim_vars, verbosity) @@ -1365,43 +1382,39 @@ def _synthesize(self, functions, verbosity): break else: # Error in calling SAT solver. - raise SynthesisException('Error calling Glucose::MultiSolvers') + msg = "Error calling Glucose::MultiSolvers" + raise SynthesisException(msg) def model_to_network(self, model, nr_outputs, out_vars, nr_local_sim_vars, verbosity): - ''' + """ Decodes a SAT model (i.e. a list of integer values) and creates a - :class:`logic_network` from it. - ''' + :class:`logic_network` from it. + """ net = logic_network(self.shape, self.nr_pis, self.nr_pos) for h in range(nr_outputs): houtvars = out_vars[h] out_found = 0 for houtvar, (n, d) in houtvars.items(): if model[houtvar - 1] > 0: - #if verbosity > 1: -# print('out[{}] -> ({}, {}) (houtvar={})'.format(h, n.coords, d, houtvar)) + # if verbosity > 1: + # print('out[{}] -> ({}, {}) (houtvar={})'.format(h, n.coords, d, houtvar)) out_found += 1 net.set_output(h, n.coords, d) # Every output must point to exactly one output port. - assert(out_found == 1) + assert out_found == 1 for n in self.nodes: if n.is_pi: continue if verbosity > 1: for gate_type in n.enabled_gate_types: - print('{} gate type {}: {} ({})'.format( - n.coords, gate_type, 1 if model[n.gate_type_map[gate_type]-1] > 0 else 0, - model[n.gate_type_map[gate_type]-1])) - for direction in n.fanout_directions: - print('{} tt[{}]: '.format(n.coords, direction), end='') - for tt_idx in range(nr_local_sim_vars): - print('{}'.format(1 if model[n.sim_vars[direction][tt_idx]-1] > 0 else 0), end='') - print(' ', end='') - for tt_idx in range(nr_local_sim_vars): - print('({})'.format(model[n.sim_vars[direction][tt_idx]-1]), end='') - print('') - + pass + for _direction in n.fanout_directions: + for _tt_idx in range(nr_local_sim_vars): + pass + for _tt_idx in range(nr_local_sim_vars): + pass + netnode = net.node_map[n.coords] # Find out the gate type. gate_types_found = 0 @@ -1409,16 +1422,15 @@ def model_to_network(self, model, nr_outputs, out_vars, nr_local_sim_vars, verbo if model[gate_var - 1] > 0: gate_types_found += 1 netnode.gate_type = gate_type - assert(gate_types_found == 1) -# print('{} is {}-gate'.format(netnode.coords, netnode.gate_type)) + assert gate_types_found == 1 + # print('{} is {}-gate'.format(netnode.coords, netnode.gate_type)) netnode.is_border_node = n.is_border_node nr_selected_svars = 0 - nr_fanin = 0 - for size_option in n.svar_map.keys(): + for size_option in n.svar_map: for svar, comb in n.svar_map[size_option].items(): if model[svar - 1] > 0: nr_selected_svars += 1 -# print('{} has fanin {}'.format(n.coords, comb)) + # print('{} has fanin {}'.format(n.coords, comb)) for i in range(size_option): in_dir = comb[i][0] innode = comb[i][1][0] @@ -1427,17 +1439,17 @@ def model_to_network(self, model, nr_outputs, out_vars, nr_local_sim_vars, verbo netnode.set_fanin(in_dir, net.nodes[innode.coords], out_dir) else: netnode.set_fanin(in_dir, net.node_map[innode.coords], out_dir) - if netnode.gate_type == 'CROSS': + if netnode.gate_type == "CROSS": netnode.dir_map = n.dir_map[svar] - assert(nr_selected_svars <= 1) # may be zero if EMPTY + assert nr_selected_svars <= 1 # may be zero if EMPTY return net - - def print_model(self): - ''' + def print_model(self) -> None: + """ Prints the model of the latest successful SAT call (if any). - ''' - if self.model == None: - raise Exception('No model available') - for lit in self.model: - print(lit) + """ + if self.model is None: + msg = "No model available" + raise Exception(msg) + for _lit in self.model: + pass diff --git a/libs/mugen/mugen_info.hpp.in b/vendors/mugen/mugen_info.hpp.in similarity index 100% rename from libs/mugen/mugen_info.hpp.in rename to vendors/mugen/mugen_info.hpp.in diff --git a/libs/mugen/requirements.txt b/vendors/mugen/requirements.txt similarity index 100% rename from libs/mugen/requirements.txt rename to vendors/mugen/requirements.txt diff --git a/libs/undirected_graph/LICENSE b/vendors/undirected_graph/LICENSE similarity index 99% rename from libs/undirected_graph/LICENSE rename to vendors/undirected_graph/LICENSE index 8cdb8451d9..23cb790338 100644 --- a/libs/undirected_graph/LICENSE +++ b/vendors/undirected_graph/LICENSE @@ -337,4 +337,3 @@ proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. - diff --git a/libs/undirected_graph/README.md b/vendors/undirected_graph/README.md similarity index 89% rename from libs/undirected_graph/README.md rename to vendors/undirected_graph/README.md index bd8ce4e21e..21fbabfb39 100644 --- a/libs/undirected_graph/README.md +++ b/vendors/undirected_graph/README.md @@ -1,4 +1,5 @@ # undirected_graph + A simple implementation of an undirected graph in c++ using the stl. During my work on a group assignment in university I was in need for a graph @@ -18,21 +19,25 @@ LICENSE file for more information. To start using the `undirect_graph` simply include the `.h` files in your project and instantiate the graph with the corresponding template arguments: + ```c++ template class undirected_graph; ``` + where `T_vertex`/`T_edge` are the datatypes that are stored at the -vertices/edges and `Key_vertex`/`Key_edge` are the identifier types used to +vertices/edges and `Key_vertex`/`Key_edge` are the identifier types used to retrieve or remove objects from the graph. The `Key_edge` type has to meet certain criteria, for an easy start use the supplied `undirected_pair` where `T` would be the `Key_vertex` type of the graph. + ```c++ template class undirected_pair; ``` + Most of the time you can use `int` or `size_t` as the `Key_vertex` type and `undirected_pair` or respectively `undirected_pair` as the `Key_edge` type. If you want to use your own id types (maybe you already have @@ -53,16 +58,18 @@ working with stl containers. The only difference is that most methods have an appended `_vertex` or `_edge` to indicate on which objects the method works. It is possible to: - - add vertex data with a unique id - - add edge data by specifying two vertex ids that aren't already connected - - access the data by reference using the corresponding id - - remove vertices and egdes by id - - iterate through all vertices or edges (unordered) - - iterate through all adjacent vertices of a vertex with a specified id - - iterate through the graph vertices by using an breadth- or depth-first search - iterator + +- add vertex data with a unique id +- add edge data by specifying two vertex ids that aren't already connected +- access the data by reference using the corresponding id +- remove vertices and egdes by id +- iterate through all vertices or edges (unordered) +- iterate through all adjacent vertices of a vertex with a specified id +- iterate through the graph vertices by using an breadth- or depth-first search + iterator Some example code: + ```c++ typedef undirected_graph, double> graph_type; graph_type graph; @@ -89,18 +96,20 @@ while(!bfs.end()) { // Remove a vertex and its edge(s) graph.erase_vertex(2); ``` + You can find more example code in the `main.cpp` in the test subfolder. ## What does this project contain? The project consist mainly of three parts: + - `undirected_graph` the main component of the graph I've been writing about this whole document. - `undirected_pair` a simple data type which behaves similar to a `std::pair` except for the fact that it is just for one data type, pair(a,b) and pair(b,a) compare as equal and it's sortable. The sorting order is determined by the smaller and bigger elements of the pairs. It's a simple data type to use for - the edge ids in the graph. + the edge ids in the graph. - `graph_search_iterator` is an abstract class that provides an interface for graph traversal iterators. Two subclasses are supplied: a breadth-first and a depth-first iterator. @@ -113,19 +122,22 @@ the id types. `Key_vertex` basically has to fulfil the same requirements as a key for an `std::unordered_set` and a `std::map`. Namely: + - Equality operator `==` - Strict weak ordering operator `<` - Specialized `std::hash` functor (see http://stackoverflow.com/a/17017281/929037) The same applies to the `Key_edge` with some more restrictions: + - Additional constructor with the signature `edge_id(vertex_id a, vertex_id b)` - Equality operator where `edge_id(a,b) == edge_id(b,a)` (this also affects the - `<`-operator and hash functor, see below) + `<`-operator and hash functor, see below) - Access to the two connected `vertex_id`s via a public `a` and `b` member For a data type that implements all these requirements have a look at the `undirected_pair`. Nevertheless here are the axioms for the `<`-operator of the `Key_edge`: + ``` Let a, b, c, d be of type vertex_id and idA(a,b), idB(c,d) be of type edge_id. Then: 1. min(a,b) < min(c,d) then: (idA < idB) diff --git a/vendors/undirected_graph/source/graph_search_iterator.h b/vendors/undirected_graph/source/graph_search_iterator.h new file mode 100644 index 0000000000..1c130c5e34 --- /dev/null +++ b/vendors/undirected_graph/source/graph_search_iterator.h @@ -0,0 +1,191 @@ +/* + Header file for graph_search_iterator for the undirected_graph container + Copyright (C) 2015 Fabian Löschner + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef GRAPHSEARCHITERATOR +#define GRAPHSEARCHITERATOR + +#include +#include +#include +#include + +/** + * @brief Base class for search iterators in a graph + * + * This class provides the base for search iterators on graphs with a container of + * currently waiting vertices (/vertex iterators) and a list of already visited + * vertices. Subclasses have to reimplement the next() function to increment the 'iterator'. + * @tparam graph Type of the graph that should be traversed. Required to get the necessary iterator and data types. + * @tparam waiting_container Type of the waiting container that should be used. (stack/queue) + */ +template +class graph_search_iterator +{ + public: + /** + * @brief Constructs a new graph search iterator + * + * Constructs a graph search iterator object, initializing it with the supplied values. + * @param g A reference to the graph that should be traversed. + * @param start Vertex iterator to the node that should be used as a starting point. + */ + graph_search_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) : m_graph(&g) + { + if (!g.empty()) + { + m_waiting.push(start); + m_discovered.emplace(start->first); + } + } + + /** + * @brief Check whether search has finished + * + * Returns whether the search alghorithms iterated through all accessible vertices. + * @return true if the search finished. + */ + bool end() const + { + return m_waiting.empty(); + } + + /** + * @brief Returns number of discovered vertices + * + * Returns the number of vertices the search algorithm already traveresed. + * @return Number of visited vertices. + */ + size_t discovered() const + { + return m_discovered.size(); + } + + /** + * @brief Returns iterator to next vertex in the search order + * + * This method should be reimplemented in subclasses to increment the search iterator + * to the next the element depending on the search algorithm + * @return Iterator to the next element + */ + virtual typename graph::graph_vertex_const_iterator next() = 0; + + protected: + //! Queue for waiting vertex iterators + waiting_container m_waiting; + //! The vertices that were already visited + std::unordered_set m_discovered; + //! The graph this iterator traverses + const graph* m_graph; +}; + +/** + * @brief Breadth first iterator + * + * This iterator traverses an undirected_graph using the breadth first algortihm. + * @tparam graph Type of the graph that should be traversed. + */ +template +class breadth_first_iterator + : public graph_search_iterator> +{ + public: + /** + * @brief Constructs a new BFS iterator + * + * Constructs a breadth first search iterator object, initializing it with the supplied values. + * @param g A reference to the graph that should be traversed. + * @param start Vertex iterator to the node that should be used as a starting point. + */ + breadth_first_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) : + graph_search_iterator>(g, start) + {} + + /** + * @brief Returns iterator to the next vertex + * + * This method increments the iterator using the BFS algorithm and returns + * an iterator to the next element. + * @return Iterator to the next element + */ + typename graph::graph_vertex_const_iterator next() + { + auto next = this->m_waiting.front(); + this->m_waiting.pop(); + + for (auto it = this->m_graph->begin_adjacent(next->first); it != this->m_graph->end_adjacent(next->first); ++it) + { + if (this->m_discovered.count(*it) == 0) + { + this->m_waiting.push(this->m_graph->find_vertex(*it)); + this->m_discovered.emplace(*it); + } + } + + return next; + } +}; + +/** + * @brief Depth first iterator + * + * This iterator traverses an undirected_graph using the depth first algortihm. + * @tparam graph Type of the graph that should be traversed. + */ +template +class depth_first_iterator + : public graph_search_iterator> +{ + public: + /** + * @brief Constructs a new DFS iterator + * + * Constructs a depth first search iterator object, initializing it with the supplied values. + * @param g A reference to the graph that should be traversed. + * @param start Vertex iterator to the node that should be used as a starting point. + */ + depth_first_iterator(const graph& g, typename graph::graph_vertex_const_iterator start) : + graph_search_iterator>(g, start) + {} + + /** + * @brief Returns iterator to the next vertex + * + * This method increments the iterator using the DFS algorithm and returns + * an iterator to the next element. + * @return Iterator to the next element + */ + typename graph::graph_vertex_const_iterator next() + { + auto next = this->m_waiting.top(); + this->m_waiting.pop(); + + for (auto it = this->m_graph->begin_adjacent(next->first); it != this->m_graph->end_adjacent(next->first); ++it) + { + if (this->m_discovered.count(*it) == 0) + { + this->m_waiting.push(this->m_graph->find_vertex(*it)); + this->m_discovered.emplace(*it); + } + } + + return next; + } +}; + +#endif // GRAPHSEARCHITERATOR diff --git a/vendors/undirected_graph/source/undirected_graph.h b/vendors/undirected_graph/source/undirected_graph.h new file mode 100644 index 0000000000..299b3a544f --- /dev/null +++ b/vendors/undirected_graph/source/undirected_graph.h @@ -0,0 +1,497 @@ +/* + Header file for undirected_graph container + Copyright (C) 2015 Fabian Löschner + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef UNDIRECTEDGRAPH +#define UNDIRECTEDGRAPH + +#include +#include +#include +#include + +/** + * @brief Undirected graph with vertices and edges + * + * A basic implementation of undirected graphs containing vertices connected by unique edges. + * There may be multiple vertices with the same value but only one edge between two vertices. + * The container stores data elements for the vertices and edges. The vertices and edges are identified + * and sorted by ids which have to be unique in the container. + * @tparam Key_vertex Type for the ids of vertices. Has to support the default comparison operators. + * @tparam T_vertex The type that should be used for the data elements at the vertices. + * @tparam Key_edge The type for the ids of edges. Has to provide a constructor taking two vertex ids as well as + * @tparam T_edge The type that should be used for the data elments at the edges + * support of the default comparison operators where the order of the two vertices is not important. + * Furthermore it has to provide access to the two vertex ids with a public a and b member variable. + */ +template +class undirected_graph +{ + public: + typedef T_vertex vertex_data_type; + typedef Key_vertex vertex_id_type; + typedef T_edge edge_data_type; + typedef Key_edge edge_id_type; + + private: + // TODO: Replace forward_list with map because of slow erasing. Use unique_ptr to store them. + + //! Type of the current graph + typedef undirected_graph graph_type; + //! Type for the container of vertex data in the graph + typedef std::unordered_map vertex_container; + //! Type for the container of edge data in the graph + typedef std::unordered_map edge_container; + //! Type for the adjacency lists of the graph + typedef std::forward_list adjacency_list; + //! Type for the container of the adjacency lists in the graph + typedef std::unordered_map adjacency_container; + + vertex_container vertices; /*!< Container for the vertex data */ + edge_container edges; /*!< Container for the edge data */ + adjacency_container adjacency; /*!< Container for adjacency lists */ + + public: + // TODO: Erase by iterator + + //! Iterator for vertices in the graph. It behaves like a std::unordered_map iterator with it->first being the + //! vertex id and it->second being the mapped vertex data. + typedef typename vertex_container::iterator graph_vertex_iterator; + //! Const iterator for vertices in the graph. It behaves like a std::unordered_map iterator with it->first being the + //! vertex id and it->second being the mapped vertex data. + typedef typename vertex_container::const_iterator graph_vertex_const_iterator; + //! Iterator for edges in the graph. It behaves like a std::unordered_map iterator with it->first being the edge id + //! and it->second being the mapped edge data. + typedef typename edge_container::iterator graph_edge_iterator; + //! Const iterator for edges in the graph. It behaves like a std::unordered_map iterator with it->first being the + //! edge id and it->second being the mapped edge data. + typedef typename edge_container::const_iterator graph_edge_const_iterator; + //! Iterator for adjacencent vertices in the graph. It behaves like a std::forward_list iterator dereferencing to + //! the id of the adjacent vertex. + typedef typename adjacency_list::iterator graph_adjacency_iterator; + //! Const iterator for adjacencent vertices in the graph. It behaves like a std::forward_list iterator dereferencing + //! to the id of the adjacent vertex. + typedef typename adjacency_list::const_iterator graph_adjacency_const_iterator; + + /** + * @brief Make edge id + * + * Creates the edge id for the edge between the two specified vertex ids. The two vertex ids + * don't have to be present in the graph. + * @param a First vertex id. + * @param b Second vertex id. + * @return Edge id for the edge between the two vertex ids. + */ + static edge_id_type make_edge_id(const vertex_id_type& a, const vertex_id_type& b) + { + return edge_id_type(a, b); + } + + /** + * @brief Test whether graph is empty + * + * Returns whether the graph is empty (i.e. whether there are no vertices). + * This function does not modify the graph in any way. To clear the content + * of a graph container, see undirected_graph::clear. + * @return true if the there no vertices, false otherwise. + */ + bool empty() const + { + return vertices.empty(); + } + + /** + * @brief Return vertex container size + * + * Returns the number of vertices in the undirected_graph container. + * @return The number of vertices in the graph. + */ + size_t size_vertices() const + { + return vertices.size(); + } + + /** + * @brief Return edge container size + * + * Returns the number of edges in the undirected_graph container. + * @return The number of edges in the graph. + */ + size_t size_edges() const + { + return edges.size(); + } + + /** + * @brief Access vertex + * + * Returns a reference to the mapped data of the vertex identified with the specified id. + * If it does not match the id of any vertex in the container, the function throws an out_of_range exception. + * @param id The id of the vertex whose mapped data is accessed. + * @return A reference to the mapped data of the vertex. + */ + vertex_data_type& at_vertex(const vertex_id_type& id) + { + return vertices.at(id); + } + + const vertex_data_type& at_vertex(const vertex_id_type& id) const + { + return vertices.at(id); + } + + /** + * @brief Access edge + * + * Returns a reference to the mapped data of the edge identified with the specified id. + * If it does not match the id of any edge in the container, the function throws an out_of_range exception. + * @param id The id of the edge whose mapped data is accessed. + * @return A reference to the mapped data of the edge. + */ + edge_data_type& at_edge(const edge_id_type& id) + { + return edges.at(id); + } + + const edge_data_type& at_edge(const edge_id_type& id) const + { + return edges.at(id); + } + + /** + * @brief Get iterator to vertex + * + * Searches the container for a vertex with an id equivalent to the one specified and returns an iterator + * to it if found, otherwise it returns an iterator to undirected_graph::end_vertices. + * Two ids are considered equivalent if the container's comparison object returns false reflexively + * (i.e., no matter the order in which the ids are passed as arguments). + * @param id Id to be searched for. + * @return An iterator to the vertex, if a vertex with specified id is found, or undirected_graph::end_vertices + * otherwise. + */ + graph_vertex_iterator find_vertex(const vertex_id_type& id) + { + return vertices.find(id); + } + + graph_vertex_const_iterator find_vertex(const vertex_id_type& id) const + { + return vertices.find(id); + } + + /** + * @brief Get iterator to edge + * + * Searches the container for an edge with an id equivalent to the one specified and returns an iterator + * to it if found, otherwise it returns an iterator to undirected_graph::end_edges. + * Two ids are considered equivalent if the container's comparison object returns false reflexively + * (i.e., no matter the order in which the ids are passed as arguments). + * @param id Id to be searched for. + * @return An iterator to the edge, if an edge with specified id is found, or undirected_graph::end_edges otherwise. + */ + graph_edge_iterator find_edge(const edge_id_type& id) + { + return edges.find(id); + } + + graph_edge_const_iterator find_edge(const edge_id_type& id) const + { + return edges.find(id); + } + + /** + * @brief Return iterator to beginning of vertices + * + * Returns an iterator referring to the first vertex in the graph container. + * If the container is empty, the returned iterator value shall not be dereferenced. + * @return An iterator to the first vertex in the container. + */ + graph_vertex_iterator begin_vertices() + { + return vertices.begin(); + } + graph_vertex_const_iterator begin_vertices() const + { + return vertices.begin(); + } + + /** + * @brief Return iterator to end of vertices + * + * Returns an iterator referring to the past-the-end vertex in the graph container. + * It does not point to any element, and thus shall not be dereferenced. + * If the container is empty, this function returns the same as undirected_graph::begin_vertices. + * @return An iterator to the past-the-end vertex in the container. + */ + graph_vertex_iterator end_vertices() + { + return vertices.end(); + } + graph_vertex_const_iterator end_vertices() const + { + return vertices.end(); + } + + /** + * @brief Return iterator to beginning of edges + * + * Returns an iterator referring to the first edge in the graph container. + * If the container is empty, the returned iterator value shall not be dereferenced. + * @return An iterator to the first edge in the container. + */ + graph_edge_iterator begin_edges() + { + return edges.begin(); + } + graph_edge_const_iterator begin_edges() const + { + return edges.begin(); + } + + /** + * @brief Return iterator to end of edges + * + * Returns an iterator referring to the past-the-end edges in the graph container. + * It does not point to any element, and thus shall not be dereferenced. + * If the container is empty, this function returns the same as undirected_graph::begin_edges. + * @return An iterator to the past-the-end edge in the container. + */ + graph_edge_iterator end_edges() + { + return edges.end(); + } + graph_edge_const_iterator end_edges() const + { + return edges.end(); + } + + /** + * @brief Return iterator to beginning of adjacent vertices + * + * Returns an iterator referring to the first adjacent vertex of the specified vertex. + * If the adjacency list is empty, the returned iterator value shall not be dereferenced. + * It behaves like a forward_list iterator. + * @return An iterator to the first adjacent vertex in the container. + */ + graph_adjacency_iterator begin_adjacent(const vertex_id_type& vertex) + { + return adjacency.at(vertex).begin(); + } + graph_adjacency_const_iterator begin_adjacent(const vertex_id_type& vertex) const + { + return adjacency.at(vertex).begin(); + } + + /** + * @brief Return iterator to end of adjacent vertices + * + * Returns an iterator referring to the past-the-end adjacent vertex to the specified vertex. + * It does not point to any element, and thus shall not be dereferenced. + * If the adjacency list is empty, this function returns the same as undirected_graph::begin_adjacent. + * It behaves like a forward_list iterator. + * @return An iterator to the past-the-end adjacent vertex of the specified vertex. + */ + graph_adjacency_iterator end_adjacent(const vertex_id_type& vertex) + { + return adjacency.at(vertex).end(); + } + graph_adjacency_const_iterator end_adjacent(const vertex_id_type& vertex) const + { + return adjacency.at(vertex).end(); + } + + /** + * @brief Clear content + * + * Removes all elements from the graph container (which are destroyed), leaving the container with a size of 0. + */ + void clear() + { + vertices.clear(); + edges.clear(); + adjacency.clear(); + } + + /** + * @brief Insert vertex + * + * Inserts a new vertex to the graph, effectively increasing the container size by one. Multiple vertices with the + * same value may exist in one graph but ids have to be unique. + * @param vertex_id The id of the vertex. + * @param vertex_data Value to be copied to the inserted vertex. + * @return Returns a pair with an iterator to the inserted vertex and a bool indicating whether the vertex was newly + * inserted or not. + */ + std::pair insert_vertex(const vertex_id_type& vertex_id, + const vertex_data_type& vertex_data) + { + auto pair = vertices.emplace(vertex_id, vertex_data); + adjacency.emplace(vertex_id, adjacency_list()); + return pair; + } + + /** + * @brief Erase vertex + * + * Removes a single vertex from the graph container. This effectively reduces the vertex container + * size by one and the vertex data is destroyed. Also all edge data connected to this vertex + * is destroyed. WARNING: Linear in the number of adjacency entries of connected vertices. + * @param vertex_id Id of the vertex that should be removed. + * @return Returns whether the vertex was removed. + */ + bool erase_vertex(const vertex_id_type& vertex_id) + { + // Try to remove vertex + auto count = vertices.erase(vertex_id); + if (count == 0) + return false; + + /* + * Check if there is a self-adjacency entry because this + * would invalidate iterators in the second loop + */ + if (edges.count(edge_id_type(vertex_id, vertex_id)) == 1) + { + // Delete edge + edges.erase(edge_id_type(vertex_id, vertex_id)); + + // Find and delete adjacency entry + auto& adj_list = adjacency.at(vertex_id); + auto it_prev = adj_list.before_begin(); + for (auto it = adj_list.begin(); it != adj_list.end(); ++it) + { + if (*it == vertex_id) + { + adj_list.erase_after(it_prev); + break; + } + it_prev = it; + } + } + + // Loop through all adjacency entries + auto& adj_list = adjacency.at(vertex_id); + for (auto it = adj_list.begin(); it != adj_list.end(); ++it) + { + const auto other_id = *it; + + // Remove edge data + edges.erase(edge_id_type(vertex_id, other_id)); + + // Delete reverse adjacency entries + auto& other_adj_list = adjacency.at(other_id); + auto it_prev = other_adj_list.before_begin(); + for (auto other_it = other_adj_list.begin(); other_it != other_adj_list.end(); ++other_it) + { + if (*other_it == vertex_id) + { + other_adj_list.erase_after(it_prev); + break; + } + it_prev = other_it; + } + } + + // Remove adjacency list of the vertex + adjacency.erase(vertex_id); + + return true; + } + + /** + * @brief Erase edge + * + * Removes a single edge from the graph container. This effectively reduces the edge container + * size by one and the edge data is destroyed. WARNING: Linear in the number of adjacent vertices + * of the two connected vertices. + * @param edge_id Id of the edge that should be removed. + * @return Returns whether the edge was removed. + */ + bool erase_edge(const edge_id_type& edge_id) + { + // Try to remove edge + auto count = edges.erase(edge_id); + if (count == 0) + return false; + + // Remove adjacency of b from a + auto& adjacency_list_a = adjacency.at(edge_id.a); + auto it_prev_a = adjacency_list_a.before_begin(); + for (auto it = adjacency_list_a.begin(); it != adjacency_list_a.end(); ++it) + { + if (*it == edge_id.b) + { + adjacency_list_a.erase_after(it_prev_a); + break; + } + it_prev_a = it; + } + + // Remove adjacency of a from b + auto& adjacency_list_b = adjacency.at(edge_id.b); + auto it_prev_b = adjacency_list_b.before_begin(); + for (auto it = adjacency_list_b.begin(); it != adjacency_list_b.end(); ++it) + { + if (*it == edge_id.a) + { + adjacency_list_b.erase_after(it_prev_b); + break; + } + it_prev_b = it; + } + + return true; + } + + /** + * @brief Add edge between two vertices + * + * This method inserts a new edge data element to the graph connecting two vertices. + * If the edge already exists the edge will not be modified. + * @param vertex_a Id of the first vertex to connect. + * @param vertex_b Id of the second vertex to connect. + * @param edge_data The data element for the edge between the two vertices. + * @return Returns a pair with an iterator to the inserted edge and a bool indicating whether the edge was inserted + * or not. + */ + std::pair insert_edge(const vertex_id_type& vertex_a, const vertex_id_type& vertex_b, + edge_data_type edge_data) + { + // Check if vertices are in the graph + if (vertices.count(vertex_a) == 0 || vertices.count(vertex_b) == 0) + { + return std::make_pair(edges.end(), false); + } + + // Try to place new edge + auto pair = edges.emplace(edge_id_type(vertex_a, vertex_b), edge_data); + + // Create adjacency entries if the edge was added + if (pair.second) + { + adjacency.at(vertex_a).push_front(vertex_b); + if (vertex_a != vertex_b) + adjacency.at(vertex_b).push_front(vertex_a); + } + + // Return iterator + return pair; + } +}; + +#endif // UNDIRECTEDGRAPH diff --git a/vendors/undirected_graph/source/undirected_pair.h b/vendors/undirected_graph/source/undirected_pair.h new file mode 100644 index 0000000000..57805aa307 --- /dev/null +++ b/vendors/undirected_graph/source/undirected_pair.h @@ -0,0 +1,151 @@ +/* + Header file for undirected_pair container + Copyright (C) 2015 Fabian Löschner + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef UNDIRECTEDPAIR +#define UNDIRECTEDPAIR + +#include +#include + +/** + * @brief Undirected pair + * + * Pair whose comparison operators does not differentiate between pair(a,b) and pair(b,a). + * Suitable as the edge id type for an undirected graph + */ +template +class undirected_pair +{ + public: + T a; /*!< Element a of the undirected pair */ + T b; /*!< Element b of the undirected pair */ + + //! Constructs an empty undirected pair + undirected_pair() = default; + //! Constructs an undirected pair with the specified objects as data + undirected_pair(T a_in, T b_in) : a(a_in), b(b_in) {} + + //! Returns a reference to the other data element if compare is the same as one of the elements in the undirected + //! pair + T& other_element(const T& compare) + { + if (compare == a) + return b; + if (compare == b) + return a; + throw std::invalid_argument(""); + } + //! Returns a const reference to the other data element if compare is the same as one of the elements in the + //! undirected pair + const T& other_element(const T& compare) const + { + if (compare == a) + return b; + if (compare == b) + return a; + throw std::invalid_argument(""); + } + + //! Returns a reference to the smaller of the two elements in the undirected pair or element a + T& smaller_element() + { + if (b < a) + return b; + return a; + } + //! Returns a const reference to the smaller of the two elements in the undirected pair or element a + const T& smaller_element() const + { + if (b < a) + return b; + return a; + } + + //! Returns a reference to the bigger of the two elements in the undirected pair or element b + T& bigger_element() + { + if (a > b) + return a; + return b; + } + //! Returns a const reference to the bigger of the two elements in the undirected pair or element b + const T& bigger_element() const + { + if (a > b) + return a; + return b; + } + + //! Returns whether the two undirected pairs contain the same data elements + inline bool operator==(const undirected_pair& rhs) const + { + return ((a == rhs.a) && (b == rhs.b)) || ((a == rhs.b) && (b == rhs.a)); + } + //! Returns whether the two pairs contain different data elements + inline bool operator!=(const undirected_pair& rhs) const + { + return !(*this == rhs); + } + + //! Strict weak ordering for undirected pairs by the smallest element in the pairs + inline bool operator<(const undirected_pair& rhs) const + { + if (smaller_element() < rhs.smaller_element()) + return true; + if (rhs.smaller_element() < smaller_element()) + return false; + if (bigger_element() < rhs.bigger_element()) + return true; + return false; + } + inline bool operator>(const undirected_pair& rhs) const + { + return rhs < *this; + } + inline bool operator<=(const undirected_pair& rhs) const + { + return !(*this > rhs); + } + inline bool operator>=(const undirected_pair& rhs) const + { + return !(*this < rhs); + } +}; + +namespace std +{ + +/** + * @brief Hash functor for undirected_pair + * + * This functor provides a specialization of the std::hash functor for an undirected_pair + * to store them in containers like an unordered_map. To use this functor, the type T has + * to provide a std::hash specialization itself. + */ +template +struct hash> +{ + inline std::size_t operator()(const undirected_pair& obj) const + { + return (std::hash()(obj.smaller_element()) ^ (std::hash()(obj.bigger_element()) << 1)); + } +}; +} // namespace std + +#endif // UNDIRECTEDPAIR