diff --git a/.github/workflows/compatibility_keras_tests.yml b/.github/workflows/compatibility_keras_tests.yml index 24a05f8..61d300c 100644 --- a/.github/workflows/compatibility_keras_tests.yml +++ b/.github/workflows/compatibility_keras_tests.yml @@ -16,7 +16,7 @@ on: description: 'TF version' required: true type: string - default: '2.12.*' + default: '2.14.*' jobs: run-tensorflow-tests: @@ -44,6 +44,7 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py ${{ inputs.save_version }} python tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py ${{ inputs.save_version }} - name: Checkout to MCT Quantizers latest version @@ -56,5 +57,6 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py ${{ inputs.save_version }} python tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py ${{ inputs.save_version }} \ No newline at end of file diff --git a/.github/workflows/compatibility_torch_tests.yml b/.github/workflows/compatibility_torch_tests.yml index 497b87b..0a2f115 100644 --- a/.github/workflows/compatibility_torch_tests.yml +++ b/.github/workflows/compatibility_torch_tests.yml @@ -16,7 +16,7 @@ on: description: 'Torch version' required: true type: string - default: '2.0.*' + default: '2.3.*' jobs: run-torch-tests: @@ -29,21 +29,11 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ inputs.python_version }} - - name: Modify requirements for Torch 2.2 - if: startsWith(inputs.torch_version, '2.2') - run: | - grep -v 'numpy' requirements.txt > temp_requirements.txt - echo "numpy<2" >> temp_requirements.txt - mv temp_requirements.txt requirements.txt - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - pip install torch==${{ inputs.torch_version }} onnx onnxruntime "onnxruntime-extensions<0.14" - - name: Revert requirements for Torch 2.2 - if: startsWith(inputs.torch_version, '2.2') - run: | - git checkout . + pip install torch==${{ inputs.torch_version }} "onnx<1.18" "onnxruntime<1.22" "onnxruntime-extensions<0.14" - name: Checkout to MCT Quantizers requested tag for saving test models run: | git checkout tags/${{ inputs.save_version }} @@ -54,6 +44,7 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py ${{ inputs.save_version }} python tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py ${{ inputs.save_version }} - name: Checkout to MCT Quantizers latest version @@ -66,5 +57,6 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py ${{ inputs.save_version }} python tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py ${{ inputs.save_version }} diff --git a/.github/workflows/forward_compatibility_keras_tests.yml b/.github/workflows/forward_compatibility_keras_tests.yml index ec84897..323e9cf 100644 --- a/.github/workflows/forward_compatibility_keras_tests.yml +++ b/.github/workflows/forward_compatibility_keras_tests.yml @@ -16,7 +16,7 @@ on: description: 'TF version' required: true type: string - default: '2.12.*' + default: '2.14.*' jobs: run-tensorflow-tests: @@ -41,6 +41,7 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py ${{ inputs.load_version }} python tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py ${{ inputs.load_version }} - name: Checkout to MCT Quantizers requested tag for loading test models @@ -53,5 +54,6 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py ${{ inputs.load_version }} python tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py ${{ inputs.load_version }} \ No newline at end of file diff --git a/.github/workflows/forward_compatibility_torch_tests.yml b/.github/workflows/forward_compatibility_torch_tests.yml index 472f74e..7dbb88c 100644 --- a/.github/workflows/forward_compatibility_torch_tests.yml +++ b/.github/workflows/forward_compatibility_torch_tests.yml @@ -16,7 +16,7 @@ on: description: 'Torch version' required: true type: string - default: '2.0.*' + default: '2.3.*' jobs: run-torch-tests: @@ -29,21 +29,11 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ inputs.python_version }} - - name: Modify requirements for Torch 2.2 - if: startsWith(inputs.torch_version, '2.2') - run: | - grep -v 'numpy' requirements.txt > temp_requirements.txt - echo "numpy<2" >> temp_requirements.txt - mv temp_requirements.txt requirements.txt - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - pip install torch==${{ inputs.torch_version }} onnx onnxruntime "onnxruntime-extensions<0.14" - - name: Revert requirements for Torch 2.2 - if: startsWith(inputs.torch_version, '2.2') - run: | - git checkout . + pip install torch==${{ inputs.torch_version }} "onnx<1.18" "onnxruntime<1.22" "onnxruntime-extensions<0.14" - name: Run save model tests with latest version run: | cd tests @@ -51,6 +41,7 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py ${{ inputs.load_version }} python tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py ${{ inputs.load_version }} - name: Checkout to MCT Quantizers requested tag for loading test models @@ -63,5 +54,6 @@ jobs: export PYTHONPATH="$PWD:${PYTHONPATH}" echo "Updated PYTHONPATH: $PYTHONPATH" cd .. + python -c "import mct_quantizers; print(mct_quantizers.__version__)" python tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py ${{ inputs.load_version }} python tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py ${{ inputs.load_version }} diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml deleted file mode 100644 index a513a8d..0000000 --- a/.github/workflows/nightly.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: Nightly -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - build: - if: github.repository == 'sony/mct_quantizers' # Don't do this in forks - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install Python 3 - uses: actions/setup-python@v5 - with: - python-version: 3.8 - - name: Install dependencies - run: | - python -m pip install --upgrade pip setuptools wheel twine build - pip install -r requirements.txt - - name: Build nightly - run: | - version=$(python -c 'import mct_quantizers; print(mct_quantizers.__version__)') - now=$(date +'%Y%m%d-%H%M%S') - - echo "nightly_version=$version.$now" >> $GITHUB_ENV - sed -i "s/attr: mct_quantizers.__version__/$version.$now/g" setup.cfg - sed -i "s/name='mct-quantizers'/name='mct-quantizers-nightly'/g" setup.py - python setup.py sdist bdist_wheel - - name: Publish nightly - run: | - twine upload --repository pypi dist/* --verbose -u __token__ -p ${{ secrets.PYPI_API_KEY }} - diff --git a/.github/workflows/run_comp_test_tf214_v13.yml b/.github/workflows/run_comp_test_tf214_v13.yml deleted file mode 100644 index 3dbc9a0..0000000 --- a/.github/workflows/run_comp_test_tf214_v13.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Tensorflow 2.14 MCTQ v1.3.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-tensorflow-2_14-v1_3: - uses: ./.github/workflows/compatibility_keras_tests.yml - with: - save_version: "v1.3.0" - python_version: "3.10" - tf_version: "2.14.*" diff --git a/.github/workflows/run_comp_test_tf214_v14.yml b/.github/workflows/run_comp_test_tf214_v14.yml deleted file mode 100644 index 13bc810..0000000 --- a/.github/workflows/run_comp_test_tf214_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Tensorflow 2.14 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-tensorflow-2_14-v1_4: - uses: ./.github/workflows/compatibility_keras_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - tf_version: "2.14.*" diff --git a/.github/workflows/run_comp_test_tf214_v152.yml b/.github/workflows/run_comp_test_tf214_v152.yml index f3efe10..cae5526 100644 --- a/.github/workflows/run_comp_test_tf214_v152.yml +++ b/.github/workflows/run_comp_test_tf214_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-tensorflow-2_14-v1_5_2: diff --git a/.github/workflows/run_comp_test_tf215_v13.yml b/.github/workflows/run_comp_test_tf215_v13.yml deleted file mode 100644 index 7471e29..0000000 --- a/.github/workflows/run_comp_test_tf215_v13.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Tensorflow 2.15 MCTQ v1.3.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-tensorflow-2_15-v1_3: - uses: ./.github/workflows/compatibility_keras_tests.yml - with: - save_version: "v1.3.0" - python_version: "3.10" - tf_version: "2.15.*" diff --git a/.github/workflows/run_comp_test_tf215_v14.yml b/.github/workflows/run_comp_test_tf215_v14.yml deleted file mode 100644 index 7eb50dd..0000000 --- a/.github/workflows/run_comp_test_tf215_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Tensorflow 2.15 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-tensorflow-2_15-v1_4: - uses: ./.github/workflows/compatibility_keras_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - tf_version: "2.15.*" diff --git a/.github/workflows/run_comp_test_tf215_v152.yml b/.github/workflows/run_comp_test_tf215_v152.yml index 40b1d39..594f2e9 100644 --- a/.github/workflows/run_comp_test_tf215_v152.yml +++ b/.github/workflows/run_comp_test_tf215_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-tensorflow-2_15-v1_5_2: diff --git a/.github/workflows/run_comp_test_torch22_v14.yml b/.github/workflows/run_comp_test_torch22_v14.yml deleted file mode 100644 index 2cbf6e0..0000000 --- a/.github/workflows/run_comp_test_torch22_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Pytorch 2.2 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_2-v1_4: - uses: ./.github/workflows/compatibility_torch_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.2.*" diff --git a/.github/workflows/run_comp_test_torch23_v14.yml b/.github/workflows/run_comp_test_torch23_v14.yml deleted file mode 100644 index f3d8ea1..0000000 --- a/.github/workflows/run_comp_test_torch23_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Pytorch 2.3 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_3-v1_4: - uses: ./.github/workflows/compatibility_torch_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.3.*" diff --git a/.github/workflows/run_comp_test_torch23_v152.yml b/.github/workflows/run_comp_test_torch23_v152.yml index de13ba4..0739cde 100644 --- a/.github/workflows/run_comp_test_torch23_v152.yml +++ b/.github/workflows/run_comp_test_torch23_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_3-v1_5_2: diff --git a/.github/workflows/run_comp_test_torch24_v14.yml b/.github/workflows/run_comp_test_torch24_v14.yml deleted file mode 100644 index 1d0a249..0000000 --- a/.github/workflows/run_comp_test_torch24_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Pytorch 2.4 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_4-v1_4: - uses: ./.github/workflows/compatibility_torch_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.4.*" diff --git a/.github/workflows/run_comp_test_torch24_v152.yml b/.github/workflows/run_comp_test_torch24_v152.yml index 11bc021..429fc0c 100644 --- a/.github/workflows/run_comp_test_torch24_v152.yml +++ b/.github/workflows/run_comp_test_torch24_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_4-v1_5_2: diff --git a/.github/workflows/run_comp_test_torch25_v14.yml b/.github/workflows/run_comp_test_torch25_v14.yml deleted file mode 100644 index c006ca4..0000000 --- a/.github/workflows/run_comp_test_torch25_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Backward Compatibility Test - Pytorch 2.5 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_5-v1_4: - uses: ./.github/workflows/compatibility_torch_tests.yml - with: - save_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.5.*" diff --git a/.github/workflows/run_comp_test_torch25_v152.yml b/.github/workflows/run_comp_test_torch25_v152.yml index 57da33f..c90d689 100644 --- a/.github/workflows/run_comp_test_torch25_v152.yml +++ b/.github/workflows/run_comp_test_torch25_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_5-v1_5_2: diff --git a/.github/workflows/run_comp_test_torch26_v152.yml b/.github/workflows/run_comp_test_torch26_v152.yml new file mode 100644 index 0000000..dc7618a --- /dev/null +++ b/.github/workflows/run_comp_test_torch26_v152.yml @@ -0,0 +1,18 @@ +name: Run Backward Compatibility Test - Pytorch 2.6 MCTQ v1.5.2 +on: + workflow_dispatch: # Allow manual triggers + schedule: + - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + run-comp-torch-2_6-v1_5_2: + uses: ./.github/workflows/compatibility_torch_tests.yml + with: + save_version: "v1.5.2" + python_version: "3.12" + torch_version: "2.6.*" diff --git a/.github/workflows/run_forward_comp_test_tf214_v14.yml b/.github/workflows/run_forward_comp_test_tf214_v14.yml deleted file mode 100644 index c8e1230..0000000 --- a/.github/workflows/run_forward_comp_test_tf214_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Tensorflow 2.14 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-forward-comp-tensorflow-2_14-v1_4: - uses: ./.github/workflows/forward_compatibility_keras_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - tf_version: "2.14.*" diff --git a/.github/workflows/run_forward_comp_test_tf214_v152.yml b/.github/workflows/run_forward_comp_test_tf214_v152.yml index c6393df..baf6e87 100644 --- a/.github/workflows/run_forward_comp_test_tf214_v152.yml +++ b/.github/workflows/run_forward_comp_test_tf214_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-forward-comp-tensorflow-2_14-v1_5_2: diff --git a/.github/workflows/run_forward_comp_test_tf215_v14.yml b/.github/workflows/run_forward_comp_test_tf215_v14.yml deleted file mode 100644 index 785d102..0000000 --- a/.github/workflows/run_forward_comp_test_tf215_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Tensorflow 2.15 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-forward-comp-tensorflow-2_15-v1_4: - uses: ./.github/workflows/forward_compatibility_keras_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - tf_version: "2.15.*" diff --git a/.github/workflows/run_forward_comp_test_tf215_v152.yml b/.github/workflows/run_forward_comp_test_tf215_v152.yml index 8793324..e4711b9 100644 --- a/.github/workflows/run_forward_comp_test_tf215_v152.yml +++ b/.github/workflows/run_forward_comp_test_tf215_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-forward-comp-tensorflow-2_15-v1_5_2: diff --git a/.github/workflows/run_forward_comp_test_torch22_v14.yml b/.github/workflows/run_forward_comp_test_torch22_v14.yml deleted file mode 100644 index 66823a4..0000000 --- a/.github/workflows/run_forward_comp_test_torch22_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Pytorch 2.2 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_2-v1_4: - uses: ./.github/workflows/forward_compatibility_torch_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.2.*" diff --git a/.github/workflows/run_forward_comp_test_torch23_v14.yml b/.github/workflows/run_forward_comp_test_torch23_v14.yml deleted file mode 100644 index b0227f0..0000000 --- a/.github/workflows/run_forward_comp_test_torch23_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Pytorch 2.3 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_3-v1_4: - uses: ./.github/workflows/forward_compatibility_torch_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.3.*" diff --git a/.github/workflows/run_forward_comp_test_torch23_v152.yml b/.github/workflows/run_forward_comp_test_torch23_v152.yml index 2146932..09cf879 100644 --- a/.github/workflows/run_forward_comp_test_torch23_v152.yml +++ b/.github/workflows/run_forward_comp_test_torch23_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_3-v1_5_2: diff --git a/.github/workflows/run_forward_comp_test_torch24_v14.yml b/.github/workflows/run_forward_comp_test_torch24_v14.yml deleted file mode 100644 index 7c74529..0000000 --- a/.github/workflows/run_forward_comp_test_torch24_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Pytorch 2.4 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_4-v1_4: - uses: ./.github/workflows/forward_compatibility_torch_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.4.*" diff --git a/.github/workflows/run_forward_comp_test_torch24_v152.yml b/.github/workflows/run_forward_comp_test_torch24_v152.yml index 6b7c70f..0debee3 100644 --- a/.github/workflows/run_forward_comp_test_torch24_v152.yml +++ b/.github/workflows/run_forward_comp_test_torch24_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_4-v1_5_2: diff --git a/.github/workflows/run_forward_comp_test_torch25_v14.yml b/.github/workflows/run_forward_comp_test_torch25_v14.yml deleted file mode 100644 index 509da21..0000000 --- a/.github/workflows/run_forward_comp_test_torch25_v14.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Run Forward Compatibility Test - Pytorch 2.5 MCTQ v1.4.0 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-comp-torch-2_5-v1_4: - uses: ./.github/workflows/forward_compatibility_torch_tests.yml - with: - load_version: "v1.4.0" - python_version: "3.10" - torch_version: "2.5.*" diff --git a/.github/workflows/run_forward_comp_test_torch25_v152.yml b/.github/workflows/run_forward_comp_test_torch25_v152.yml index 47b5cae..de03a87 100644 --- a/.github/workflows/run_forward_comp_test_torch25_v152.yml +++ b/.github/workflows/run_forward_comp_test_torch25_v152.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-comp-torch-2_5-v1_5_2: diff --git a/.github/workflows/run_forward_comp_test_torch26_v152.yml b/.github/workflows/run_forward_comp_test_torch26_v152.yml new file mode 100644 index 0000000..2bdf8cc --- /dev/null +++ b/.github/workflows/run_forward_comp_test_torch26_v152.yml @@ -0,0 +1,18 @@ +name: Run Forward Compatibility Test - Pytorch 2.6 MCTQ v1.5.2 +on: + workflow_dispatch: # Allow manual triggers + schedule: + - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + run-comp-torch-2_6-v1_5_2: + uses: ./.github/workflows/forward_compatibility_torch_tests.yml + with: + load_version: "v1.5.2" + python_version: "3.12" + torch_version: "2.6.*" diff --git a/.github/workflows/run_pytorch_tests.yml b/.github/workflows/run_pytorch_tests.yml index 2bca1dc..bd2f242 100644 --- a/.github/workflows/run_pytorch_tests.yml +++ b/.github/workflows/run_pytorch_tests.yml @@ -15,17 +15,11 @@ jobs: - name: Install Python 3 uses: actions/setup-python@v5 with: - python-version: 3.10.* - - name: Modify requirements for Torch 2.2 - if: startsWith(inputs.torch-version, '2.2') - run: | - grep -v 'numpy' requirements.txt > temp_requirements.txt - echo "numpy<2" >> temp_requirements.txt - mv temp_requirements.txt requirements.txt + python-version: 3.12.* - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - pip install torch==${{ inputs.torch-version }} torchvision onnx onnxruntime "onnxruntime-extensions<0.14" + pip install torch==${{ inputs.torch-version }} torchvision "onnx<1.18" "onnxruntime<1.22" "onnxruntime-extensions<0.14" - name: Run unittests run: python -m unittest discover tests/pytorch_tests -v diff --git a/.github/workflows/run_tests_suite_pip.yml b/.github/workflows/run_tests_suite_pip.yml index 4ac86b8..1e946ad 100644 --- a/.github/workflows/run_tests_suite_pip.yml +++ b/.github/workflows/run_tests_suite_pip.yml @@ -5,7 +5,7 @@ on: mct_quantizers_version: description: 'MCT Quantizers version' required: true - default: 'v1.1.0' + default: 'v1.6.0' python_version: description: 'Python version' required: false @@ -48,14 +48,14 @@ jobs: rm -rf mct_quantizers - name: Install TF run: | - pip install tensorflow==2.14.* + pip install tensorflow==2.15.* - name: Run TF Tests run: | python -m unittest discover tests/keras_tests --verbose - name: Prepare for Torch run: | pip uninstall tensorflow -y - pip install torch==2.0.* torchvision onnx onnxruntime "onnxruntime-extensions<0.14" + pip install torch==2.6.* torchvision "onnx<1.18" "onnxruntime<1.22" "onnxruntime-extensions<0.14" - name: Run Torch Tests run: | python -m unittest discover tests/pytorch_tests --verbose diff --git a/.github/workflows/run_tests_tf212.yml b/.github/workflows/run_tests_tf212.yml deleted file mode 100644 index 932ac5a..0000000 --- a/.github/workflows/run_tests_tf212.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: Run Tests - Tensorflow 2.12 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-tensorflow-2_12: - uses: ./.github/workflows/run_keras_tests.yml - with: - tf-version: "2.12.*" diff --git a/.github/workflows/run_tests_tf213.yml b/.github/workflows/run_tests_tf213.yml deleted file mode 100644 index 5ca89b9..0000000 --- a/.github/workflows/run_tests_tf213.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: Run Tests - Tensorflow 2.13 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-tensorflow-2_13: - uses: ./.github/workflows/run_keras_tests.yml - with: - tf-version: "2.13.*" diff --git a/.github/workflows/run_tests_tf214.yml b/.github/workflows/run_tests_tf214.yml index f77ca29..84a47ac 100644 --- a/.github/workflows/run_tests_tf214.yml +++ b/.github/workflows/run_tests_tf214.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-tensorflow-2_14: diff --git a/.github/workflows/run_tests_tf215.yml b/.github/workflows/run_tests_tf215.yml index 77c839c..e52d012 100644 --- a/.github/workflows/run_tests_tf215.yml +++ b/.github/workflows/run_tests_tf215.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-tensorflow-2_15: diff --git a/.github/workflows/run_tests_torch2_2.yml b/.github/workflows/run_tests_torch2_2.yml deleted file mode 100644 index 4dd2a20..0000000 --- a/.github/workflows/run_tests_torch2_2.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: Run Tests - PyTorch 2.2 -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 0 * * * - -jobs: - run-pytorch-2_2: - uses: ./.github/workflows/run_pytorch_tests.yml - with: - torch-version: "2.2.*" diff --git a/.github/workflows/run_tests_torch2_3.yml b/.github/workflows/run_tests_torch2_3.yml index cf7c8fc..5675af2 100644 --- a/.github/workflows/run_tests_torch2_3.yml +++ b/.github/workflows/run_tests_torch2_3.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-pytorch-2_3: diff --git a/.github/workflows/run_tests_torch2_4.yml b/.github/workflows/run_tests_torch2_4.yml index 96dd30c..a1e69f3 100644 --- a/.github/workflows/run_tests_torch2_4.yml +++ b/.github/workflows/run_tests_torch2_4.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-pytorch-2_4: diff --git a/.github/workflows/run_tests_torch2_5.yml b/.github/workflows/run_tests_torch2_5.yml index 9dde6bc..f02129d 100644 --- a/.github/workflows/run_tests_torch2_5.yml +++ b/.github/workflows/run_tests_torch2_5.yml @@ -3,6 +3,11 @@ on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: run-pytorch-2_5: diff --git a/.github/workflows/run_tests_torch2_6.yml b/.github/workflows/run_tests_torch2_6.yml new file mode 100644 index 0000000..17b6db7 --- /dev/null +++ b/.github/workflows/run_tests_torch2_6.yml @@ -0,0 +1,16 @@ +name: Run Tests - PyTorch 2.6 +on: + workflow_dispatch: # Allow manual triggers + schedule: + - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + run-pytorch-2_6: + uses: ./.github/workflows/run_pytorch_tests.yml + with: + torch-version: "2.6.*" diff --git a/.github/workflows/run_various_python_versions.yml b/.github/workflows/run_various_python_versions_tf.yml similarity index 74% rename from .github/workflows/run_various_python_versions.yml rename to .github/workflows/run_various_python_versions_tf.yml index 50e4a7f..b012734 100644 --- a/.github/workflows/run_various_python_versions.yml +++ b/.github/workflows/run_various_python_versions_tf.yml @@ -1,12 +1,14 @@ -name: Test Python Version +name: Test Python Version(TensorFlow) on: workflow_dispatch: # Allow manual triggers schedule: - cron: 0 0 * * * pull_request: - branches: - - main + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: build: @@ -26,6 +28,5 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt pip install tensorflow==2.15.* - pip install torch==2.5.* onnx onnxruntime "onnxruntime-extensions<0.14" - name: Run unittests - run: python -m unittest discover -s tests -v + run: python -m unittest discover -s tests/keras_tests -v diff --git a/.github/workflows/run_various_python_versions_torch.yml b/.github/workflows/run_various_python_versions_torch.yml new file mode 100644 index 0000000..0a61977 --- /dev/null +++ b/.github/workflows/run_various_python_versions_torch.yml @@ -0,0 +1,32 @@ +name: Test Python Version(PyTorch) + +on: + workflow_dispatch: # Allow manual triggers + schedule: + - cron: 0 0 * * * + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + steps: + - uses: actions/checkout@v4 + - name: Install Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install torch==2.6.* "onnx<1.18" "onnxruntime<1.22" "onnxruntime-extensions<0.14" + - name: Run unittests + run: python -m unittest discover -s tests/pytorch_tests -v diff --git a/README.md b/README.md index b775901..334687e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Model Compression Toolkit (MCT) Quantizers -The MCT Quantizers library is an open-source library developed by researchers and engineers working at Sony Semiconductor Israel. +The MCT Quantizers library is an open-source library developed by researchers and engineers working at Sony Semiconductor Solutions. It provides tools for easily representing a quantized neural network in both Keras and PyTorch. The library offers researchers, developers, and engineers a set of useful quantizers, along with a simple interface for implementing new custom quantizers. diff --git a/mct_quantizers/__init__.py b/mct_quantizers/__init__.py index 2b97a1a..08fd896 100644 --- a/mct_quantizers/__init__.py +++ b/mct_quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/__init__.py b/mct_quantizers/common/__init__.py index 807f5e3..7df9c3f 100644 --- a/mct_quantizers/common/__init__.py +++ b/mct_quantizers/common/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/base_inferable_quantizer.py b/mct_quantizers/common/base_inferable_quantizer.py index b61fb0b..d7eeb8e 100644 --- a/mct_quantizers/common/base_inferable_quantizer.py +++ b/mct_quantizers/common/base_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/constants.py b/mct_quantizers/common/constants.py index a0da374..f8af6f5 100644 --- a/mct_quantizers/common/constants.py +++ b/mct_quantizers/common/constants.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/get_all_subclasses.py b/mct_quantizers/common/get_all_subclasses.py index ef769f0..16fa8e5 100644 --- a/mct_quantizers/common/get_all_subclasses.py +++ b/mct_quantizers/common/get_all_subclasses.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/get_quantizers.py b/mct_quantizers/common/get_quantizers.py index 13b98c9..80f3f48 100644 --- a/mct_quantizers/common/get_quantizers.py +++ b/mct_quantizers/common/get_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/metadata.py b/mct_quantizers/common/metadata.py index b2a6a02..c61729f 100644 --- a/mct_quantizers/common/metadata.py +++ b/mct_quantizers/common/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2024 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/quant_info.py b/mct_quantizers/common/quant_info.py index 6298861..3f5cd49 100644 --- a/mct_quantizers/common/quant_info.py +++ b/mct_quantizers/common/quant_info.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/common/quant_utils.py b/mct_quantizers/common/quant_utils.py index 18b824d..d66838a 100644 --- a/mct_quantizers/common/quant_utils.py +++ b/mct_quantizers/common/quant_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/__init__.py b/mct_quantizers/keras/__init__.py index 807f5e3..7df9c3f 100644 --- a/mct_quantizers/keras/__init__.py +++ b/mct_quantizers/keras/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/activation_quantization_holder.py b/mct_quantizers/keras/activation_quantization_holder.py index b74d24b..3e6194d 100644 --- a/mct_quantizers/keras/activation_quantization_holder.py +++ b/mct_quantizers/keras/activation_quantization_holder.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/load_model.py b/mct_quantizers/keras/load_model.py index 601a2ec..3afece0 100644 --- a/mct_quantizers/keras/load_model.py +++ b/mct_quantizers/keras/load_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/metadata.py b/mct_quantizers/keras/metadata.py index fbe859d..2db8081 100644 --- a/mct_quantizers/keras/metadata.py +++ b/mct_quantizers/keras/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2024 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantize_wrapper.py b/mct_quantizers/keras/quantize_wrapper.py index 3c96069..ff2734e 100644 --- a/mct_quantizers/keras/quantize_wrapper.py +++ b/mct_quantizers/keras/quantize_wrapper.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizer_utils.py b/mct_quantizers/keras/quantizer_utils.py index ee1fcec..61d878b 100644 --- a/mct_quantizers/keras/quantizer_utils.py +++ b/mct_quantizers/keras/quantizer_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/__init__.py b/mct_quantizers/keras/quantizers/__init__.py index e38df66..0a1a6d3 100644 --- a/mct_quantizers/keras/quantizers/__init__.py +++ b/mct_quantizers/keras/quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/__init__.py b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/__init__.py index 807f5e3..7df9c3f 100644 --- a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/__init__.py +++ b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py index d539ddd..0f5a9d0 100644 --- a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py index 3299b1b..2794f29 100644 --- a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py index a408dce..d2e9f22 100644 --- a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py index 5f6f9a6..385cf31 100644 --- a/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/base_keras_inferable_quantizer.py b/mct_quantizers/keras/quantizers/base_keras_inferable_quantizer.py index defb0d6..3a42a6d 100644 --- a/mct_quantizers/keras/quantizers/base_keras_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/base_keras_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/__init__.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/__init__.py index 807f5e3..7df9c3f 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/__init__.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py index 8003234..e297efb 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py index 560ecb0..dd6a1db 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py index 2d69a2c..cbf01c2 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py index 5e3d11e..fcd0458 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py index 679ab2a..f89b56e 100644 --- a/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +++ b/mct_quantizers/keras/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/keras/validation_functions.py b/mct_quantizers/keras/validation_functions.py index 0ade48a..f0f6748 100644 --- a/mct_quantizers/keras/validation_functions.py +++ b/mct_quantizers/keras/validation_functions.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/logger.py b/mct_quantizers/logger.py index 9c63bda..d369943 100644 --- a/mct_quantizers/logger.py +++ b/mct_quantizers/logger.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/__init__.py b/mct_quantizers/pytorch/__init__.py index 807f5e3..7df9c3f 100644 --- a/mct_quantizers/pytorch/__init__.py +++ b/mct_quantizers/pytorch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/activation_quantization_holder.py b/mct_quantizers/pytorch/activation_quantization_holder.py index 12cbef1..985c40b 100644 --- a/mct_quantizers/pytorch/activation_quantization_holder.py +++ b/mct_quantizers/pytorch/activation_quantization_holder.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/fln_activation_quantization_holder.py b/mct_quantizers/pytorch/fln_activation_quantization_holder.py index bd461f1..43728c8 100644 --- a/mct_quantizers/pytorch/fln_activation_quantization_holder.py +++ b/mct_quantizers/pytorch/fln_activation_quantization_holder.py @@ -1,4 +1,4 @@ -# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2025 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/load_model.py b/mct_quantizers/pytorch/load_model.py index e467ffa..9e7f499 100644 --- a/mct_quantizers/pytorch/load_model.py +++ b/mct_quantizers/pytorch/load_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/metadata.py b/mct_quantizers/pytorch/metadata.py index dad71a6..fa3d02b 100644 --- a/mct_quantizers/pytorch/metadata.py +++ b/mct_quantizers/pytorch/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2024 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/onnxruntime_session_options.py b/mct_quantizers/pytorch/onnxruntime_session_options.py index 35fb13a..1dc5d19 100644 --- a/mct_quantizers/pytorch/onnxruntime_session_options.py +++ b/mct_quantizers/pytorch/onnxruntime_session_options.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/onnxruntime_validations.py b/mct_quantizers/pytorch/onnxruntime_validations.py index a06b318..7e7a268 100644 --- a/mct_quantizers/pytorch/onnxruntime_validations.py +++ b/mct_quantizers/pytorch/onnxruntime_validations.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/preserving_activation_quantization_holder.py b/mct_quantizers/pytorch/preserving_activation_quantization_holder.py index b6b0242..174a058 100644 --- a/mct_quantizers/pytorch/preserving_activation_quantization_holder.py +++ b/mct_quantizers/pytorch/preserving_activation_quantization_holder.py @@ -1,4 +1,4 @@ -# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2025 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantize_wrapper.py b/mct_quantizers/pytorch/quantize_wrapper.py index 7731e90..dd15da6 100644 --- a/mct_quantizers/pytorch/quantize_wrapper.py +++ b/mct_quantizers/pytorch/quantize_wrapper.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizer_utils.py b/mct_quantizers/pytorch/quantizer_utils.py index e8ffc21..a8bd0a8 100644 --- a/mct_quantizers/pytorch/quantizer_utils.py +++ b/mct_quantizers/pytorch/quantizer_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/__init__.py b/mct_quantizers/pytorch/quantizers/__init__.py index 770002f..af919c0 100644 --- a/mct_quantizers/pytorch/quantizers/__init__.py +++ b/mct_quantizers/pytorch/quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/__init__.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/__init__.py index 2147ec2..b22d379 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/__init__.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py index 872e797..495d8b1 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py index 95f3df0..ba6c290 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py index e41e9f0..ceb1de8 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py index 82aa6c5..cad5b11 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/activation_uniform_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/base_activation_quantizer_autograd_function.py b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/base_activation_quantizer_autograd_function.py index 25741a9..908fcca 100644 --- a/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/base_activation_quantizer_autograd_function.py +++ b/mct_quantizers/pytorch/quantizers/activation_inferable_quantizers/base_activation_quantizer_autograd_function.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py index e9f1e95..0b4859e 100644 --- a/mct_quantizers/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/base_lut_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/base_pytorch_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/base_pytorch_inferable_quantizer.py index c7d7d88..98566bb 100644 --- a/mct_quantizers/pytorch/quantizers/base_pytorch_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/base_pytorch_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/base_quantizer_autograd_function.py b/mct_quantizers/pytorch/quantizers/base_quantizer_autograd_function.py index 296e375..b89f661 100644 --- a/mct_quantizers/pytorch/quantizers/base_quantizer_autograd_function.py +++ b/mct_quantizers/pytorch/quantizers/base_quantizer_autograd_function.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/base_symmetric_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/base_symmetric_inferable_quantizer.py index 32125fa..2280a5f 100644 --- a/mct_quantizers/pytorch/quantizers/base_symmetric_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/base_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/base_uniform_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/base_uniform_inferable_quantizer.py index 5fdc1b4..b47eb0b 100644 --- a/mct_quantizers/pytorch/quantizers/base_uniform_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/base_uniform_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/__init__.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/__init__.py index 2147ec2..b22d379 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/__init__.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/base_weight_quantizer_autograd_function.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/base_weight_quantizer_autograd_function.py index 4719d3f..0c013d2 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/base_weight_quantizer_autograd_function.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/base_weight_quantizer_autograd_function.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py index 37ecd66..fb27eed 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py index 814ca06..d3031b4 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_lut_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py index 41def61..1fe1825 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py index 12dc432..1ff93cb 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_symmetric_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py index fdf7781..3e1b800 100644 --- a/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py +++ b/mct_quantizers/pytorch/quantizers/weights_inferable_quantizers/weights_uniform_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index c1657e6..507a286 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,5 +52,5 @@ def get_release_arguments(): "Topic :: Scientific/Engineering :: Artificial Intelligence" ], install_requires=read_install_requires(), - python_requires='>=3.6' + python_requires='>=3.9' ) diff --git a/tests/__init__.py b/tests/__init__.py index 2147ec2..b22d379 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/__init__.py b/tests/compatibility_tests/__init__.py index 807f5e3..7df9c3f 100644 --- a/tests/compatibility_tests/__init__.py +++ b/tests/compatibility_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/__init__.py b/tests/compatibility_tests/keras_comp_tests/__init__.py index 2147ec2..b22d379 100644 --- a/tests/compatibility_tests/keras_comp_tests/__init__.py +++ b/tests/compatibility_tests/keras_comp_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/base_activation_compatibility_test.py b/tests/compatibility_tests/keras_comp_tests/base_activation_compatibility_test.py index aa14115..929107c 100644 --- a/tests/compatibility_tests/keras_comp_tests/base_activation_compatibility_test.py +++ b/tests/compatibility_tests/keras_comp_tests/base_activation_compatibility_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/base_weights_compatibility_test.py b/tests/compatibility_tests/keras_comp_tests/base_weights_compatibility_test.py index fa0c92e..94cf12e 100644 --- a/tests/compatibility_tests/keras_comp_tests/base_weights_compatibility_test.py +++ b/tests/compatibility_tests/keras_comp_tests/base_weights_compatibility_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py b/tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py index 7a3c9e9..0fe8276 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_activation_load_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py b/tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py index 1661d47..f258b6e 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_activation_save_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_load_model_test.py b/tests/compatibility_tests/keras_comp_tests/compatibility_load_model_test.py index 492bb1a..6afe303 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_load_model_test.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_load_model_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_save_model_test.py b/tests/compatibility_tests/keras_comp_tests/compatibility_save_model_test.py index 88b7a20..f46f4ac 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_save_model_test.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_save_model_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py b/tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py index 473bd18..0c702c6 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_weights_load_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py b/tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py index d1f93b0..01c8766 100644 --- a/tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py +++ b/tests/compatibility_tests/keras_comp_tests/compatibility_weights_save_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/__init__.py b/tests/compatibility_tests/torch_comp_tests/__init__.py index 2147ec2..b22d379 100644 --- a/tests/compatibility_tests/torch_comp_tests/__init__.py +++ b/tests/compatibility_tests/torch_comp_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/base_activation_compatibility_test.py b/tests/compatibility_tests/torch_comp_tests/base_activation_compatibility_test.py index 1682862..65bb152 100644 --- a/tests/compatibility_tests/torch_comp_tests/base_activation_compatibility_test.py +++ b/tests/compatibility_tests/torch_comp_tests/base_activation_compatibility_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/base_weights_compatibility_test.py b/tests/compatibility_tests/torch_comp_tests/base_weights_compatibility_test.py index 219f41e..4303fd1 100644 --- a/tests/compatibility_tests/torch_comp_tests/base_weights_compatibility_test.py +++ b/tests/compatibility_tests/torch_comp_tests/base_weights_compatibility_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py b/tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py index 87d251a..4938b10 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py b/tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py index 535ba4a..ad94024 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_load_model_test.py b/tests/compatibility_tests/torch_comp_tests/compatibility_load_model_test.py index 42c0d6b..0726e25 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_load_model_test.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_load_model_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_save_model_test.py b/tests/compatibility_tests/torch_comp_tests/compatibility_save_model_test.py index cb2fbd5..9be9716 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_save_model_test.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_save_model_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py b/tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py index e58d71d..b798b01 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py b/tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py index 2ce5f2c..926b42a 100644 --- a/tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py +++ b/tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/compatibility_tests/torch_comp_tests/onnx_utils.py b/tests/compatibility_tests/torch_comp_tests/onnx_utils.py index c56dbb1..dd1059b 100644 --- a/tests/compatibility_tests/torch_comp_tests/onnx_utils.py +++ b/tests/compatibility_tests/torch_comp_tests/onnx_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/keras_tests/__init__.py b/tests/keras_tests/__init__.py index 2147ec2..b22d379 100644 --- a/tests/keras_tests/__init__.py +++ b/tests/keras_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/keras_tests/quantizers_tests/test_activation_inferable_quantizers.py b/tests/keras_tests/quantizers_tests/test_activation_inferable_quantizers.py index 6158f0e..d91b7a3 100644 --- a/tests/keras_tests/quantizers_tests/test_activation_inferable_quantizers.py +++ b/tests/keras_tests/quantizers_tests/test_activation_inferable_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,8 +43,10 @@ def test_symmetric_activation_quantizer(self): self.assertTrue(quantizer_config['threshold'] == thresholds) self.assertTrue(quantizer_config['signed'] == signed) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # Quantize tensor quantized_tensor = quantizer(input_tensor) @@ -68,7 +70,7 @@ def test_symmetric_activation_quantizer(self): scale = thresholds[0] / (2 ** (num_bits - int(signed))) manually_quantized_tensor = tf.clip_by_value(np.round(input_tensor / scale), clip_value_min=-thresholds[0], clip_value_max=thresholds[0] - scale) - self.assertTrue(np.all(manually_quantized_tensor.numpy() == quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor.numpy(), quantized_tensor.numpy())) def test_unsigned_symmetric_activation_quantizer(self): thresholds = [4.] @@ -87,8 +89,10 @@ def test_unsigned_symmetric_activation_quantizer(self): self.assertTrue(quantizer_config['threshold'] == thresholds) self.assertTrue(quantizer_config['signed'] == signed) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # Quantize tensor quantized_tensor = quantizer(input_tensor) @@ -111,7 +115,7 @@ def test_unsigned_symmetric_activation_quantizer(self): scale = thresholds[0] / (2 ** num_bits - int(signed)) manually_quantized_tensor = tf.clip_by_value(np.round(input_tensor / scale), clip_value_min=0, clip_value_max=thresholds[0] - scale) - self.assertTrue(np.all(manually_quantized_tensor.numpy() == quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor.numpy(), quantized_tensor.numpy())) def test_illegal_power_of_two_threshold(self): with self.assertRaises(Exception) as e: @@ -144,8 +148,10 @@ def test_power_of_two_activation_quantizer(self): self.assertTrue(np.all(quantizer.min_range == -1 * thresholds)) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) self.assertTrue(np.max(fake_quantized_tensor) < thresholds[ @@ -166,7 +172,7 @@ def test_power_of_two_activation_quantizer(self): manually_quantized_tensor = np.round( tf.clip_by_value(input_tensor, clip_value_min=-thresholds, clip_value_max=thresholds - scale) / scale) * scale - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_unsigned_power_of_two_activation_quantizer(self): thresholds = [1.] @@ -191,8 +197,10 @@ def test_unsigned_power_of_two_activation_quantizer(self): self.assertTrue(np.all(quantizer.min_range == [0])) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) self.assertTrue(np.max(fake_quantized_tensor) < thresholds[ @@ -213,7 +221,7 @@ def test_unsigned_power_of_two_activation_quantizer(self): manually_quantized_tensor = np.round( tf.clip_by_value(input_tensor, clip_value_min=0, clip_value_max=thresholds - scale) / scale) * scale - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_uniform_activation_quantizer(self): min_range = [-10.] @@ -232,8 +240,10 @@ def test_uniform_activation_quantizer(self): self.assertTrue(quantizer_config['min_range'] == min_range) self.assertTrue(quantizer_config['max_range'] == max_range) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 4, 50) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 4, 50) * 50 + signs = np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) # We expect tensor values values to be between min_range to max_range @@ -254,7 +264,7 @@ def test_uniform_activation_quantizer(self): manually_quantized_tensor = \ np.round((tf.clip_by_value(input_tensor, clip_value_min=min_range, clip_value_max=max_range) - min_range) / scale) * scale + min_range - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_illegal_range_uniform_activation_quantizer(self): min_range = [3.] @@ -272,8 +282,10 @@ def test_illegal_range_uniform_activation_quantizer(self): # self.assertTrue(quantizer_config['min_range'] == min_range) # self.assertTrue(quantizer_config['max_range'] == max_range) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 4, 50) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 4, 50) * 50 + signs = np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) # We expect each channel values to be between min_range to max_range for each channel diff --git a/tests/keras_tests/quantizers_tests/test_activation_lut_pot_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_activation_lut_pot_inferable_quantizer.py index a349e2a..50d8973 100644 --- a/tests/keras_tests/quantizers_tests/test_activation_lut_pot_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_activation_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -50,8 +50,10 @@ def test_lut_pot_signed_quantizer(self): self.assertTrue(quantizer_config['lut_values_bitwidth'] == lut_values_bitwidth) self.assertTrue(quantizer_config['eps'] == eps) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) quantized_tensor = quantizer(input_tensor) # Using a signed quantization, so we expect all values to be between -abs(max(threshold)) @@ -77,7 +79,7 @@ def test_lut_pot_signed_quantizer(self): quant_tensor_values = (lut_values / (2 ** (lut_values_bitwidth - int(signed)))) * thresholds - self.assertTrue(np.all(np.unique(quantized_tensor) == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (thresholds + eps)) * (2 ** (num_bits - 1)), @@ -87,7 +89,7 @@ def test_lut_pot_signed_quantizer(self): lut_values_assignments = tf.argmin(tf.abs(tensor - expanded_lut_values), axis=-1) centers = tf.gather(lut_values.flatten(), lut_values_assignments) - self.assertTrue(np.all(centers / (2 ** (lut_values_bitwidth - 1)) * thresholds == quantized_tensor), + self.assertTrue(np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * thresholds, quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert some values are negative (signed quantization) @@ -120,8 +122,10 @@ def test_lut_pot_unsigned_quantizer(self): self.assertTrue(quantizer_config['lut_values_bitwidth'] == lut_values_bitwidth) self.assertTrue(quantizer_config['eps'] == eps) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) quantized_tensor = quantizer(input_tensor) # Using a unsigned quantization, so we expect all values to be between 0 @@ -146,7 +150,7 @@ def test_lut_pot_unsigned_quantizer(self): quant_tensor_values = (lut_values / (2 ** lut_values_bitwidth)) * thresholds - self.assertTrue(np.all(np.unique(quantized_tensor) == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (thresholds + eps)) * (2 ** lut_values_bitwidth), @@ -157,7 +161,7 @@ def test_lut_pot_unsigned_quantizer(self): lut_values_assignments = tf.argmin(tf.abs(tensor - expanded_lut_values), axis=-1) centers = tf.gather(lut_values.flatten(), lut_values_assignments) - self.assertTrue(np.all(centers / (2 ** lut_values_bitwidth) * thresholds == quantized_tensor), + self.assertTrue(np.allclose(centers / (2 ** lut_values_bitwidth) * thresholds, quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert all values are non-negative (unsigned quantization) diff --git a/tests/keras_tests/quantizers_tests/test_illegal_activation_lut_pot_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_illegal_activation_lut_pot_inferable_quantizer.py index e445db5..6162f52 100644 --- a/tests/keras_tests/quantizers_tests/test_illegal_activation_lut_pot_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_illegal_activation_lut_pot_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/keras_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py index 5d5b357..361f721 100644 --- a/tests/keras_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/keras_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py index bd9786f..56436a5 100644 --- a/tests/keras_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -168,8 +168,10 @@ def weights_inferable_quantizer_test(self, inferable_quantizer, num_bits, thresh perm_vec[channel_axis] = input_rank - 1 perm_vec[input_rank - 1] = channel_axis - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # change the input only when channel_axis is not the last axis input_tensor = tf.transpose(input_tensor, perm=perm_vec) @@ -213,7 +215,7 @@ def weights_inferable_quantizer_test(self, inferable_quantizer, num_bits, thresh self.assertTrue(len(np.unique(channel_slice_i)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(channel_slice_i))} unique values') - self.assertTrue(np.all(np.unique(channel_slice_i) == np.sort(channel_quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(channel_slice_i), np.sort(channel_quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (threshold[i] + eps)) * (2 ** (num_bits - 1)), @@ -224,14 +226,14 @@ def weights_inferable_quantizer_test(self, inferable_quantizer, num_bits, thresh centers = tf.gather(lut_values.flatten(), lut_values_assignments) self.assertTrue( - np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i] == channel_slice_i), + np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i], channel_slice_i), "Quantized tensor values weren't assigned correctly") else: quant_tensor_values = lut_values / (2 ** (lut_values_bitwidth - 1)) * threshold self.assertTrue(len(np.unique(quantized_tensor)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(quantized_tensor))} unique values') - self.assertTrue(np.all(np.unique(quantized_tensor) == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (threshold[0] + eps)) * (2 ** (num_bits - 1)), @@ -242,7 +244,7 @@ def weights_inferable_quantizer_test(self, inferable_quantizer, num_bits, thresh centers = tf.gather(lut_values.flatten(), lut_values_assignments) self.assertTrue( - np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[0] == quantized_tensor), + np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[0], quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert some values are negative (signed quantization) diff --git a/tests/keras_tests/quantizers_tests/test_weights_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_weights_inferable_quantizer.py index d83f669..35fe7f2 100644 --- a/tests/keras_tests/quantizers_tests/test_weights_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_weights_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,8 +42,10 @@ def test_symmetric_weights_quantizer_per_tensor(self): self.assertTrue(quantizer_config['per_channel'] is False) self.assertTrue(quantizer_config['channel_axis'] is None) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # Quantize tensor quantized_tensor = quantizer(input_tensor) @@ -67,7 +69,7 @@ def test_symmetric_weights_quantizer_per_tensor(self): scale = thresholds[0] / (2 ** (num_bits - 1)) manually_quantized_tensor = tf.clip_by_value(np.round(input_tensor / scale), clip_value_min=-thresholds[0], clip_value_max=thresholds[0] - scale) - self.assertTrue(np.all(manually_quantized_tensor.numpy() == quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor.numpy(), quantized_tensor.numpy())) def test_symmetric_weights_quantizer_per_channel(self): thresholds = [3., 6., 2.] @@ -86,8 +88,10 @@ def test_symmetric_weights_quantizer_per_channel(self): self.assertTrue(quantizer_config['channel_axis'] == 3) thresholds = np.asarray(thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # Quantize tensor quantized_tensor = quantizer(input_tensor) @@ -114,7 +118,7 @@ def test_symmetric_weights_quantizer_per_channel(self): manually_quantized_tensor = np.round( tf.clip_by_value(input_tensor, clip_value_min=-thresholds, clip_value_max=thresholds - scale) / scale) * scale - self.assertTrue(np.all(manually_quantized_tensor == quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, quantized_tensor.numpy())) def test_power_of_two_weights_quantizer_per_channel(self): thresholds = [2., 4., 1.] @@ -139,8 +143,10 @@ def test_power_of_two_weights_quantizer_per_channel(self): self.assertTrue(np.all(quantizer.min_range == -1 * thresholds)) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) # We expect each channel values to be between -threshold to threshold since it's a signed quantization @@ -165,7 +171,7 @@ def test_power_of_two_weights_quantizer_per_channel(self): manually_quantized_tensor = np.round( tf.clip_by_value(input_tensor, clip_value_min=-thresholds, clip_value_max=thresholds - scale) / scale) * scale - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_power_of_two_weights_quantizer_per_tensor(self): thresholds = [1.] @@ -188,8 +194,10 @@ def test_power_of_two_weights_quantizer_per_tensor(self): np.log2(delta) == np.log2(delta).astype(int)) self.assertTrue(is_pot_delta, f'Expected delta to be POT but: {delta}') - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) self.assertTrue(np.max(fake_quantized_tensor) < thresholds[ @@ -209,7 +217,7 @@ def test_power_of_two_weights_quantizer_per_tensor(self): manually_quantized_tensor = np.round( tf.clip_by_value(input_tensor, clip_value_min=-thresholds, clip_value_max=thresholds - scale) / scale) * scale - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_uniform_weights_quantizer_per_channel(self): num_bits = 3 @@ -231,8 +239,10 @@ def test_uniform_weights_quantizer_per_channel(self): self.assertTrue(quantizer_config['per_channel'] is True) self.assertTrue(quantizer_config['channel_axis'] == channel_axis) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 4, 50, 50) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 4, 50, 50) * 50 + signs = np.where(np.indices((1, 4, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) min_range = np.asarray(min_range) @@ -264,7 +274,7 @@ def test_uniform_weights_quantizer_per_channel(self): scale = (max_range - min_range) / (2 ** num_bits - 1) manually_quantized_tensor = np.round((tf.clip_by_value( input_tensor, clip_value_min=min_range, clip_value_max=max_range) - min_range) / scale) * scale + min_range - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_uniform_weights_quantizer_per_tensor(self): num_bits = 3 @@ -286,8 +296,10 @@ def test_uniform_weights_quantizer_per_tensor(self): self.assertTrue(quantizer_config['per_channel'] is False) self.assertTrue(quantizer_config['channel_axis'] == channel_axis) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 4, 50) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 4, 50) * 50 + signs = np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) min_range = np.asarray(min_range) @@ -311,7 +323,7 @@ def test_uniform_weights_quantizer_per_tensor(self): scale = (max_range - min_range) / (2 ** num_bits - 1) manually_quantized_tensor = np.round((tf.clip_by_value( input_tensor, clip_value_min=min_range, clip_value_max=max_range) - min_range) / scale) * scale + min_range - self.assertTrue(np.all(manually_quantized_tensor == fake_quantized_tensor.numpy())) + self.assertTrue(np.allclose(manually_quantized_tensor, fake_quantized_tensor.numpy())) def test_uniform_weights_quantizer_zero_not_in_range(self): num_bits = 3 @@ -348,8 +360,10 @@ def test_uniform_weights_quantizer_zero_not_in_range(self): # self.assertTrue(quantizer_config['max_range'][i] == max_adj) # self.assertTrue(quantizer_config['min_range'][i] == min_adj) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 4, 50) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 4, 50) * 50 + signs = np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) fake_quantized_tensor = quantizer(input_tensor) # We expect each channel values to be between min_range to max_range for each channel diff --git a/tests/keras_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py b/tests/keras_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py index 2f52b69..5d3165f 100644 --- a/tests/keras_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py +++ b/tests/keras_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -55,8 +55,10 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, perm_vec[channel_axis] = input_rank - 1 perm_vec[input_rank - 1] = channel_axis - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, dtype=tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # change the input only when channel_axis is not the last axis input_tensor = tf.transpose(input_tensor, perm=perm_vec) @@ -99,7 +101,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, self.assertTrue(len(np.unique(channel_slice_i)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(channel_slice_i))} unique values') - self.assertTrue(np.all(np.unique(channel_slice_i) == np.sort(channel_quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(channel_slice_i), np.sort(channel_quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (threshold[i] + eps)) * (2 ** (num_bits - 1)), @@ -109,14 +111,14 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, lut_values_assignments = tf.argmin(tf.abs(tensor - expanded_lut_values), axis=-1) centers = tf.gather(lut_values.flatten(), lut_values_assignments) - self.assertTrue(np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i] == channel_slice_i), + self.assertTrue(np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i], channel_slice_i), "Quantized tensor values weren't assigned correctly") else: quant_tensor_values = lut_values / (2 ** (lut_values_bitwidth - 1)) * threshold self.assertTrue(len(np.unique(quantized_tensor)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(quantized_tensor))} unique values') - self.assertTrue(np.all(np.unique(quantized_tensor) == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly tensor = tf.clip_by_value((input_tensor / (threshold[0] + eps)) * (2 ** (num_bits - 1)), @@ -126,7 +128,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, lut_values_assignments = tf.argmin(tf.abs(tensor - expanded_lut_values), axis=-1) centers = tf.gather(lut_values.flatten(), lut_values_assignments) - self.assertTrue(np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[0] == quantized_tensor), + self.assertTrue(np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[0], quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert some values are negative (signed quantization) diff --git a/tests/keras_tests/test_activation_quantizer_holder.py b/tests/keras_tests/test_activation_quantizer_holder.py index 41dd135..d42784e 100644 --- a/tests/keras_tests/test_activation_quantizer_holder.py +++ b/tests/keras_tests/test_activation_quantizer_holder.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,8 +40,10 @@ def test_activation_quantization_holder_inference(self): signed=signed) model = keras.Sequential([KerasActivationQuantizationHolder(quantizer)]) - # Initialize a random input to quantize between -50 to 50. - input_tensor = tf.constant(np.random.rand(1, 50, 50, 3) * 100 - 50, tf.float32) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = np.random.rand(1, 50, 50, 3) * 50 + signs = np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8) + input_tensor = tf.constant(input_tensor * signs, dtype=tf.float32) # Quantize tensor quantized_tensor = model(input_tensor) diff --git a/tests/keras_tests/test_get_quantizers.py b/tests/keras_tests/test_get_quantizers.py index 89a2b6c..328918d 100644 --- a/tests/keras_tests/test_get_quantizers.py +++ b/tests/keras_tests/test_get_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/keras_tests/test_keras_load_model.py b/tests/keras_tests/test_keras_load_model.py index 067aa42..8c9e7b6 100644 --- a/tests/keras_tests/test_keras_load_model.py +++ b/tests/keras_tests/test_keras_load_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ def _one_layer_model_save_and_load(self, layer_with_quantizer): os.remove(tmp_h5_file) loaded_pred = loaded_model(x) - self.assertTrue(np.all(loaded_pred == pred)) + self.assertTrue(np.allclose(loaded_pred, pred)) def test_save_and_load_activation_pot(self): num_bits = 3 diff --git a/tests/keras_tests/test_keras_quantization_wrapper.py b/tests/keras_tests/test_keras_quantization_wrapper.py index 00a0225..aca1546 100644 --- a/tests/keras_tests/test_keras_quantization_wrapper.py +++ b/tests/keras_tests/test_keras_quantization_wrapper.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -116,4 +116,4 @@ def test_weights_quantization_wrapper(self): x = sub_wrapper.call(x) x = mul_wrapper.call(x) wrappers_output = matmul_wrapper.call(x) - self.assertTrue((wrappers_output == model_output).numpy().all()) + self.assertTrue(np.allclose(wrappers_output.numpy(), model_output.numpy())) diff --git a/tests/pytorch_tests/__init__.py b/tests/pytorch_tests/__init__.py index 807f5e3..7df9c3f 100644 --- a/tests/pytorch_tests/__init__.py +++ b/tests/pytorch_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/onnx_export_tests/__init__.py b/tests/pytorch_tests/onnx_export_tests/__init__.py index 2147ec2..b22d379 100644 --- a/tests/pytorch_tests/onnx_export_tests/__init__.py +++ b/tests/pytorch_tests/onnx_export_tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/onnx_export_tests/test_activation_quantizers.py b/tests/pytorch_tests/onnx_export_tests/test_activation_quantizers.py index 2871a6c..805b3e7 100644 --- a/tests/pytorch_tests/onnx_export_tests/test_activation_quantizers.py +++ b/tests/pytorch_tests/onnx_export_tests/test_activation_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/onnx_export_tests/test_weight_quantizers.py b/tests/pytorch_tests/onnx_export_tests/test_weight_quantizers.py index 4c677dc..b476cfb 100644 --- a/tests/pytorch_tests/onnx_export_tests/test_weight_quantizers.py +++ b/tests/pytorch_tests/onnx_export_tests/test_weight_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -69,7 +69,7 @@ def test_onnx_weight_symmetric(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(thresholds==onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' + assert np.allclose(thresholds, onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' assert onnx_channel_axis == channel_axis, f'Expected threshold in quantizer to be {channel_axis} but found ' \ f'{onnx_channel_axis}' onnx_signed = node_qparams['signed'] @@ -111,7 +111,7 @@ def test_onnx_weight_pot(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(thresholds==onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' + assert np.allclose(thresholds, onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' assert onnx_channel_axis == channel_axis, f'Expected threshold in quantizer to be {channel_axis} but found ' \ f'{onnx_channel_axis}' @@ -155,8 +155,8 @@ def test_onnx_weight_uniform(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(np.zeros(shape=(4,))==onnx_min_range), f'Expected min_range in quantizer to be zeros after range adjustment but found {onnx_min_range}' - assert np.all(max_range==onnx_max_range), f'Expected max_range in quantizer to be {max_range} but found {onnx_max_range}' + assert np.allclose(np.zeros(shape=(4,)), onnx_min_range), f'Expected min_range in quantizer to be zeros after range adjustment but found {onnx_min_range}' + assert np.allclose(max_range, onnx_max_range), f'Expected max_range in quantizer to be {max_range} but found {onnx_max_range}' assert onnx_channel_axis == channel_axis, f'Expected channel_axis in quantizer to be {channel_axis} but found {onnx_channel_axis}' onnx_signed = node_qparams['signed'] @@ -197,7 +197,7 @@ def test_onnx_weight_symmetric_per_tensor(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(thresholds==onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' + assert np.allclose(thresholds, onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' onnx_signed = node_qparams['signed'] assert onnx_signed == True, f'Expected signed in weight quantizer to be True but is {onnx_signed}' @@ -234,7 +234,7 @@ def test_onnx_weight_pot_per_tensor(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(thresholds==onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' + assert np.allclose(thresholds, onnx_threshold), f'Expected threshold in quantizer to be {thresholds} but found {onnx_threshold}' onnx_signed = node_qparams['signed'] assert onnx_signed == True, f'Expected signed in weight quantizer to be True but is {onnx_signed}' @@ -276,8 +276,8 @@ def test_onnx_weight_uniform_per_tensor(self): assert onnx_nbits == num_bits, f'Expected num_bits in quantizer to be {num_bits} but found {onnx_nbits}' assert onnx_per_channel == per_channel, f'Expected per_channel in quantizer to be {per_channel} but found {onnx_per_channel}' - assert np.all(np.zeros(shape=(1,))==onnx_min_range), f'Expected min_range in quantizer to be zeros after range adjustment but found {onnx_min_range}' - assert np.all(max_range==onnx_max_range), f'Expected max_range in quantizer to be {max_range} but found {onnx_max_range}' + assert np.allclose(np.zeros(shape=(1,)), onnx_min_range), f'Expected min_range in quantizer to be zeros after range adjustment but found {onnx_min_range}' + assert np.allclose(max_range, onnx_max_range), f'Expected max_range in quantizer to be {max_range} but found {onnx_max_range}' onnx_signed = node_qparams['signed'] assert onnx_signed == True, f'Expected signed in weight quantizer to be True but is {onnx_signed}' assert node_qparams[MCTQ_VERSION] == mctq_version, f'Expected version to be {mctq_version} but is {node_qparams[MCTQ_VERSION]}' @@ -391,8 +391,8 @@ def test_onnx_weight_lut_sym(self, node_qparams = _get_qparams_from_input_tensors_for_single_quantizer(onnx_file_path, onnx_op_name) lut_values_onnx = node_qparams[0] threshold_onnx = node_qparams[1] - assert np.all(lut_values_onnx==lut_values), f'Expected lut_values in quantizer to be {lut_values} but found {lut_values_onnx}' - assert np.all(threshold_onnx==threshold), f'Expected threshold in quantizer to be {threshold} but found {threshold_onnx}' + assert np.allclose(lut_values_onnx, lut_values), f'Expected lut_values in quantizer to be {lut_values} but found {lut_values_onnx}' + assert np.allclose(threshold_onnx, threshold), f'Expected threshold in quantizer to be {threshold} but found {threshold_onnx}' node_qparams = _get_qparams_from_attributes_for_single_quantizer(onnx_file_path, onnx_op_name) onnx_nbits = node_qparams['num_bits'] diff --git a/tests/pytorch_tests/quantizers_tests/test_activation_lut_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_activation_lut_inferable_quantizer.py index 6f05f8a..1e07009 100644 --- a/tests/pytorch_tests/quantizers_tests/test_activation_lut_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_activation_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,8 +38,10 @@ def test_lut_pot_signed_quantizer(self): lut_values_bitwidth, threshold=thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 3, 3, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 3, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 3, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # Using a signed quantization, so we expect all values to be between -abs(max(threshold)) @@ -64,8 +66,7 @@ def test_lut_pot_signed_quantizer(self): self.assertTrue(len(np.unique(fake_quantized_tensor)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(fake_quantized_tensor))} unique values') - self.assertTrue(np.all(np.unique(fake_quantized_tensor) - == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(fake_quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly clip_max = 2 ** (lut_values_bitwidth - 1) - 1 @@ -78,8 +79,8 @@ def test_lut_pot_signed_quantizer(self): lut_values_assignments = torch.argmin(torch.abs(tensor - expanded_lut_values), dim=-1) centers = np.asarray(lut_values).flatten()[lut_values_assignments] - self.assertTrue(np.all(centers / (2 ** (lut_values_bitwidth - int(signed))) * thresholds == - fake_quantized_tensor), "Quantized tensor values weren't assigned correctly") + self.assertTrue(np.allclose(centers / (2 ** (lut_values_bitwidth - int(signed))) * thresholds, + fake_quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert some values are negative (signed quantization) self.assertTrue(np.any(fake_quantized_tensor < 0), @@ -99,8 +100,10 @@ def test_lut_pot_unsigned_quantizer(self): lut_values_bitwidth, threshold=thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 3, 3, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 3, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 3, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # Using a unsigned quantization, so we expect all values to be between 0 @@ -125,7 +128,7 @@ def test_lut_pot_unsigned_quantizer(self): f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(fake_quantized_tensor))} unique values') - self.assertTrue(np.all(np.unique(fake_quantized_tensor) == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(fake_quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly clip_max = 2 ** lut_values_bitwidth - 1 @@ -138,8 +141,8 @@ def test_lut_pot_unsigned_quantizer(self): lut_values_assignments = torch.argmin(torch.abs(tensor - expanded_lut_values), dim=-1) centers = np.asarray(lut_values).flatten()[lut_values_assignments] - self.assertTrue(np.all(centers / (2 ** (lut_values_bitwidth - int(signed))) * thresholds == - fake_quantized_tensor), "Quantized tensor values weren't assigned correctly") + self.assertTrue(np.allclose(centers / (2 ** (lut_values_bitwidth - int(signed))) * thresholds, + fake_quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert all values are non-negative (unsigned quantization) self.assertTrue(np.all(fake_quantized_tensor >= 0), diff --git a/tests/pytorch_tests/quantizers_tests/test_activations_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_activations_inferable_quantizer.py index ec49b12..dc7b9f7 100644 --- a/tests/pytorch_tests/quantizers_tests/test_activations_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_activations_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,8 +35,10 @@ def test_symmetric_activation_quantizer(self): threshold=thresholds, signed=True) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs quantized_tensor = quantizer(input_tensor.to(get_working_device())) # The maximal threshold is 4 using a signed quantization, so we expect all values to be in this range @@ -59,7 +61,7 @@ def test_symmetric_activation_quantizer(self): scale = thresholds / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), -thresholds, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) def test_unsigned_symmetric_activation_quantizer(self): thresholds = [4] @@ -68,8 +70,10 @@ def test_unsigned_symmetric_activation_quantizer(self): threshold=thresholds, signed=False) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs quantized_tensor = quantizer(input_tensor.to(get_working_device())) # The maximal threshold is 4 using a signed quantization, so we expect all values to be in this range @@ -91,7 +95,7 @@ def test_unsigned_symmetric_activation_quantizer(self): scale = thresholds / (2 ** num_bits) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), 0, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) def test_illegal_power_of_two_threshold(self): with self.assertRaises(Exception) as e: @@ -116,8 +120,10 @@ def test_power_of_two_activation_quantizer(self): signed=True, threshold=thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 3, 3, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 3, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 3, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())).dequantize() assert torch.max( @@ -137,7 +143,7 @@ def test_power_of_two_activation_quantizer(self): scale = thresholds / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), -thresholds, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def test_unsigned_power_of_two_activation_quantizer(self): thresholds = [1] @@ -146,8 +152,10 @@ def test_unsigned_power_of_two_activation_quantizer(self): signed=False, threshold=thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 3, 3, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 3, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 3, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())).dequantize() assert torch.max( @@ -163,7 +171,7 @@ def test_unsigned_power_of_two_activation_quantizer(self): scale = thresholds / (2 ** num_bits) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), 0, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def test_uniform_activation_quantizer(self): min_range = [-10] @@ -173,8 +181,10 @@ def test_uniform_activation_quantizer(self): min_range=min_range, max_range=max_range) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs quantized_tensor = quantizer(input_tensor.to(get_working_device())) # The maximal threshold is 4 using a signed quantization, so we expect all values to be in this range @@ -199,7 +209,7 @@ def test_uniform_activation_quantizer(self): manually_quantized_tensor = torch.round((torch.clip(input_tensor.to(get_working_device()), min_range, max_range) - min_range) / scale) * scale + min_range - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) def test_illegal_range_uniform_activation_quantizer(self): min_range = [3] @@ -209,8 +219,10 @@ def test_illegal_range_uniform_activation_quantizer(self): min_range=min_range, max_range=max_range) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs quantized_tensor = quantizer(input_tensor.to(get_working_device())) # The maximal threshold is 4 using a signed quantization, so we expect all values to be in this range @@ -235,4 +247,4 @@ def test_illegal_range_uniform_activation_quantizer(self): manually_quantized_tensor = torch.round((torch.clip(input_tensor.to(get_working_device()), min_range, max_range) - min_range) / scale) * scale + min_range - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) diff --git a/tests/pytorch_tests/quantizers_tests/test_illegal_activation_lut_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_illegal_activation_lut_inferable_quantizer.py index 4c04c81..aed6616 100644 --- a/tests/pytorch_tests/quantizers_tests/test_illegal_activation_lut_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_illegal_activation_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py index 5687c43..af65203 100644 --- a/tests/pytorch_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_illegal_weights_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,8 +61,10 @@ def test_zero_not_in_range_uniform_quantizer(self): max_range=max_range, channel_axis=2) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 4, 50) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 4, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # We expect each channel values to be between min_range to max_range for each channel diff --git a/tests/pytorch_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py index 20915bc..1585cf2 100644 --- a/tests/pytorch_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_illegal_weights_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/quantizers_tests/test_weights_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_weights_inferable_quantizer.py index 724b259..fadb24b 100644 --- a/tests/pytorch_tests/quantizers_tests/test_weights_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_weights_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,8 +35,10 @@ def test_symmetric_weights_quantizer_per_tensor(self): per_channel=False, threshold=thresholds) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs # Quantize tensor quantized_tensor = quantizer(input_tensor.to(get_working_device())) @@ -59,7 +61,7 @@ def test_symmetric_weights_quantizer_per_tensor(self): scale = thresholds[0] / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.clip(torch.round(input_tensor.to(get_working_device()) / scale), -thresholds[0], thresholds[0] - scale) - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) def test_symmetric_weights_quantizer_per_channel(self): thresholds = [3, 6, 2] @@ -69,8 +71,10 @@ def test_symmetric_weights_quantizer_per_channel(self): threshold=thresholds, channel_axis=3) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs # Quantize tensor quantized_tensor = quantizer(input_tensor.to(get_working_device())) fake_quantized_tensor = quantized_tensor @@ -97,7 +101,7 @@ def test_symmetric_weights_quantizer_per_channel(self): scale = thresholds / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), -thresholds, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, quantized_tensor)) def test_pot_weights_quantizer_per_channel(self): thresholds = [2, 4, 1] @@ -110,8 +114,10 @@ def test_pot_weights_quantizer_per_channel(self): is_pot_scales = torch.all(quantizer.scales.log2().int() == quantizer.scales.log2()) self.assertTrue(is_pot_scales, f'Expected scales to be POT but: {quantizer.scales}') - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # We expect each channel values to be between -threshold to threshold since it's a signed quantization @@ -136,7 +142,7 @@ def test_pot_weights_quantizer_per_channel(self): scale = thresholds / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), -thresholds, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def test_pot_weights_quantizer_per_tensor(self): thresholds = [1] @@ -150,8 +156,10 @@ def test_pot_weights_quantizer_per_tensor(self): f'Expected to have one scale in per-tensor quantization but found ' f'{len(quantizer.scales)} scales') - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 50, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 50, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 50, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) assert torch.max(fake_quantized_tensor) < thresholds[ @@ -170,7 +178,7 @@ def test_pot_weights_quantizer_per_tensor(self): scale = thresholds / (2 ** (num_bits - 1)) manually_quantized_tensor = torch.round( torch.clip(input_tensor.to(get_working_device()), -thresholds, thresholds - scale) / scale) * scale - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def test_uniform_weights_quantizer_per_channel(self): num_bits = 3 @@ -182,8 +190,10 @@ def test_uniform_weights_quantizer_per_channel(self): max_range=max_range, channel_axis=2) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 4, 50) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 4, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # We expect each channel values to be between min_range to max_range for each channel @@ -211,7 +221,7 @@ def test_uniform_weights_quantizer_per_channel(self): scale = (max_range - min_range) / (2 ** num_bits - 1) manually_quantized_tensor = torch.round((torch.clip(input_tensor.to(get_working_device()), min_range, max_range) - min_range) / scale) * scale + min_range - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def test_uniform_weights_quantizer_per_tensor(self): num_bits = 3 @@ -222,8 +232,10 @@ def test_uniform_weights_quantizer_per_tensor(self): min_range=min_range, max_range=max_range) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 50, 4, 50) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 50, 4, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 50, 4, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # We expect tensor values values to be between min_range to max_range @@ -245,7 +257,7 @@ def test_uniform_weights_quantizer_per_tensor(self): scale = (max_range - min_range) / (2 ** num_bits - 1) manually_quantized_tensor = torch.round((torch.clip(input_tensor.to(get_working_device()), min_range, max_range) - min_range) / scale) * scale + min_range - self.assertTrue(torch.all(manually_quantized_tensor == fake_quantized_tensor)) + self.assertTrue(torch.allclose(manually_quantized_tensor, fake_quantized_tensor)) def quantizer_reuse_test(self, quantizer): @@ -265,15 +277,15 @@ def quantizer_reuse_test(self, quantizer): quantized_tensor1 = quantizer(input_tensor) self.assertTrue(not quantizer.quantizer_first_run, f'Now quantizer_first_run should be false but got true') - self.assertTrue(torch.all(quantizer.resue_outputs == quantized_tensor1)) + self.assertTrue(torch.allclose(quantizer.resue_outputs, quantized_tensor1)) # Quantize tensor: second run quantized_tensor2 = quantizer(input_tensor) - self.assertTrue(torch.all(quantizer.resue_outputs == quantized_tensor2)) + self.assertTrue(torch.allclose(quantizer.resue_outputs, quantized_tensor2)) # Quantize tensor: third run quantized_tensor3 = quantizer(input_tensor) - self.assertTrue(torch.all(quantizer.resue_outputs == quantized_tensor3)) + self.assertTrue(torch.allclose(quantizer.resue_outputs, quantized_tensor3)) def test_symmetric_weights_quantizer_reuse(self): # Create quantizer diff --git a/tests/pytorch_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py b/tests/pytorch_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py index 534b80b..14df0dd 100644 --- a/tests/pytorch_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py +++ b/tests/pytorch_tests/quantizers_tests/test_weights_lut_inferable_quantizer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,8 +34,10 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, lut_values_bitwidth=lut_values_bitwidth, input_rank=input_rank) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.rand(1, 3, 3, 3) * 100 - 50 + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 3, 3) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 3, 3)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs fake_quantized_tensor = quantizer(input_tensor.to(get_working_device())) # Using a signed quantization, so we expect all values to be between -abs(max(threshold)) @@ -72,7 +74,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, self.assertTrue(len(np.unique(channel_slice_i)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(channel_slice_i))} unique values') - self.assertTrue(np.all(np.unique(channel_slice_i) == np.sort(channel_quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(channel_slice_i), np.sort(channel_quant_tensor_values))) # Check quantized tensor assigned correctly tensor = torch.clip((input_tensor / threshold[i]) * (2 ** (lut_values_bitwidth - 1)), @@ -83,7 +85,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, centers = np.asarray(lut_values).flatten()[lut_values_assignments] self.assertTrue( - np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i] == channel_slice_i), + np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold[i], channel_slice_i), "Quantized tensor values weren't assigned correctly") else: @@ -91,8 +93,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, self.assertTrue(len(np.unique(fake_quantized_tensor)) <= 2 ** num_bits, f'Quantized tensor expected to have no more than {2 ** num_bits} unique values but has ' f'{len(np.unique(fake_quantized_tensor))} unique values') - self.assertTrue(np.all(np.unique(fake_quantized_tensor) - == np.sort(quant_tensor_values))) + self.assertTrue(np.allclose(np.unique(fake_quantized_tensor), np.sort(quant_tensor_values))) # Check quantized tensor assigned correctly tensor = torch.clip((input_tensor / np.asarray(threshold)) * (2 ** (lut_values_bitwidth - 1)), @@ -103,7 +104,7 @@ def _weights_lut_quantizer_test(self, inferable_quantizer, num_bits, threshold, centers = np.asarray(lut_values).flatten()[lut_values_assignments] self.assertTrue( - np.all(centers / (2 ** (lut_values_bitwidth - 1)) * threshold == fake_quantized_tensor), + np.allclose(centers / (2 ** (lut_values_bitwidth - 1)) * threshold, fake_quantized_tensor), "Quantized tensor values weren't assigned correctly") # Assert some values are negative (signed quantization) diff --git a/tests/pytorch_tests/test_activation_quantizer_holder.py b/tests/pytorch_tests/test_activation_quantizer_holder.py index d9a3587..6ff55f6 100644 --- a/tests/pytorch_tests/test_activation_quantizer_holder.py +++ b/tests/pytorch_tests/test_activation_quantizer_holder.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,8 +36,10 @@ def test_activation_quantization_holder_inference(self): signed=signed) model = PytorchActivationQuantizationHolder(quantizer) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs # Quantize tensor quantized_tensor = model(input_tensor) @@ -70,8 +72,10 @@ def test_activation_quantization_holder_save_and_load(self): quantizer = quantizer_class(**quantizer_args) model = PytorchActivationQuantizationHolder(quantizer) - # Initialize a random input to quantize between -50 to 50. - x = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + x = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + x = x * signs exp_output_tensor = model(x) fx_model = symbolic_trace(model) @@ -81,7 +85,7 @@ def test_activation_quantization_holder_save_and_load(self): try: torch.save(fx_model, tmp_pth_file) - loaded_model = torch.load(tmp_pth_file) + loaded_model = torch.load(tmp_pth_file, weights_only=False) output_tensor = loaded_model(x) # Output value is the same as the quanization holder before saving. diff --git a/tests/pytorch_tests/test_fln_activation_quantizer_holder.py b/tests/pytorch_tests/test_fln_activation_quantizer_holder.py index 9667459..424ff1b 100644 --- a/tests/pytorch_tests/test_fln_activation_quantizer_holder.py +++ b/tests/pytorch_tests/test_fln_activation_quantizer_holder.py @@ -1,4 +1,4 @@ -# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2025 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,8 +51,10 @@ def test_fln_activation_quantization_holder_inference(self): quantizer = quantizer_class(**quantizer_args) model = PytorchFLNActivationQuantizationHolder(quantizer, quantization_bypass) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs # Quantize tensor quantized_tensor = model(input_tensor) @@ -108,8 +110,10 @@ def test_fln_activation_quantization_holder_save_and_load(self): quantizer = quantizer_class(**quantizer_args) model = PytorchFLNActivationQuantizationHolder(quantizer, quantization_bypass) - # Initialize a random input to quantize between -50 to 50. - x = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + x = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + x = x * signs exp_output_tensor = model(x) fx_model = symbolic_trace(model) @@ -119,7 +123,7 @@ def test_fln_activation_quantization_holder_save_and_load(self): try: torch.save(fx_model, tmp_pth_file) - loaded_model = torch.load(tmp_pth_file) + loaded_model = torch.load(tmp_pth_file, weights_only=False) output_tensor = loaded_model(x) # Output value is the same as the quanization holder before saving. diff --git a/tests/pytorch_tests/test_get_quantizers.py b/tests/pytorch_tests/test_get_quantizers.py index f90d665..79a462d 100644 --- a/tests/pytorch_tests/test_get_quantizers.py +++ b/tests/pytorch_tests/test_get_quantizers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/pytorch_tests/test_preserving_activation_quantizer_holder.py b/tests/pytorch_tests/test_preserving_activation_quantizer_holder.py index bb68496..829faa9 100644 --- a/tests/pytorch_tests/test_preserving_activation_quantizer_holder.py +++ b/tests/pytorch_tests/test_preserving_activation_quantizer_holder.py @@ -1,4 +1,4 @@ -# Copyright 2025 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2025 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,8 +51,10 @@ def test_preserving_activation_quantization_holder_inference(self): quantizer = quantizer_class(**quantizer_args) model = PytorchPreservingActivationQuantizationHolder(quantizer, quantization_bypass) - # Initialize a random input to quantize between -50 to 50. - input_tensor = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + input_tensor = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + input_tensor = input_tensor * signs # Quantize tensor quantized_tensor = model(input_tensor) @@ -108,8 +110,10 @@ def test_preserving_activation_quantization_holder_save_and_load(self): quantizer = quantizer_class(**quantizer_args) model = PytorchPreservingActivationQuantizationHolder(quantizer, quantization_bypass) - # Initialize a random input to quantize between -50 to 50. - x = torch.from_numpy(np.random.rand(1, 3, 50, 50). astype(np.float32) * 100 - 50, ) + # Initialize a random input to quantize between -50 to 50. Input includes positive and negative values. + x = torch.rand(1, 3, 50, 50) * 50 + signs = torch.from_numpy(np.where(np.indices((1, 3, 50, 50)).sum(axis=0) % 2 == 0, 1, -1).astype(np.int8)) + x = x * signs exp_output_tensor = model(x) fx_model = symbolic_trace(model) @@ -119,7 +123,7 @@ def test_preserving_activation_quantization_holder_save_and_load(self): try: torch.save(fx_model, tmp_pth_file) - loaded_model = torch.load(tmp_pth_file) + loaded_model = torch.load(tmp_pth_file, weights_only=False) output_tensor = loaded_model(x) # Output value is the same as the quanization holder before saving. diff --git a/tests/pytorch_tests/test_pytorch_load_model.py b/tests/pytorch_tests/test_pytorch_load_model.py index b62a0ea..16e27f5 100644 --- a/tests/pytorch_tests/test_pytorch_load_model.py +++ b/tests/pytorch_tests/test_pytorch_load_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -70,11 +70,11 @@ def _one_layer_model_save_and_load(self, layer_with_quantizer): torch.save(layer_with_quantizer, tmp_h5_file) - loaded_model = (pytorch_load_quantized_model(tmp_h5_file)) + loaded_model = (pytorch_load_quantized_model(tmp_h5_file, weights_only=False)) os.remove(tmp_h5_file) loaded_pred = loaded_model(x).detach().cpu().numpy() - self.assertTrue(np.all(loaded_pred == pred)) + self.assertTrue(np.allclose(loaded_pred, pred)) def test_save_and_load_activation_pot(self): num_bits = 3 @@ -231,7 +231,7 @@ def test_save_and_load_metadata(self): _, tmp_pt_file = tempfile.mkstemp('.pt') torch.save(model, tmp_pt_file) - loaded_model = pytorch_load_quantized_model(tmp_pt_file) + loaded_model = pytorch_load_quantized_model(tmp_pt_file, weights_only=False) os.remove(tmp_pt_file) self.assertTrue(get_metadata(loaded_model) == get_metadata(model)) diff --git a/tests/pytorch_tests/test_pytorch_quantization_wrapper.py b/tests/pytorch_tests/test_pytorch_quantization_wrapper.py index 8f2fd16..5cbb0b9 100644 --- a/tests/pytorch_tests/test_pytorch_quantization_wrapper.py +++ b/tests/pytorch_tests/test_pytorch_quantization_wrapper.py @@ -1,4 +1,4 @@ -# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved. +# Copyright 2023 Sony Semiconductor Solutions, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -97,7 +97,7 @@ def test_positional_weights_quantization_wrapper(self): self.assertTrue(isinstance(quantizer, ZeroWeightsQuantizer)) y = wrapper(self.inputs) # apply the wrapper on some random inputs self.assertTrue((0 == getattr(wrapper, f'{QUANTIZED_POSITIONAL_WEIGHT}_{name}')).all()) # check the weight are now quantized - self.assertTrue((y == self.layers[1](torch.zeros_like(self.sub_const), self.inputs)).all()) # check the wrapper's outputs are equal to biases + self.assertTrue(torch.allclose(y, self.layers[1](torch.zeros_like(self.sub_const), self.inputs))) # check the wrapper's outputs are equal to biases wrapper = PytorchQuantizationWrapper(self.layers[1], {0: ZeroWeightsQuantizer()}, weight_values={0: self.sub_const}) @@ -109,7 +109,7 @@ def test_positional_weights_quantization_wrapper(self): self.assertTrue(isinstance(quantizer, ZeroWeightsQuantizer)) y = wrapper(self.inputs) # apply the wrapper on some random inputs self.assertTrue((0 == getattr(wrapper, f'{QUANTIZED_POSITIONAL_WEIGHT}_{name}')).all()) # check the weight are now quantized - self.assertTrue((y == self.layers[1](torch.zeros_like(self.sub_const), self.inputs)).all()) # check the wrapper's outputs are equal to biases + self.assertTrue(torch.allclose(y, self.layers[1](torch.zeros_like(self.sub_const), self.inputs))) # check the wrapper's outputs are equal to biases wrapper = PytorchQuantizationWrapper(self.layers[2], {0: ZeroWeightsQuantizer(), 2: ZeroWeightsQuantizer()}, weight_values={0: self.cat_const1, 2: self.cat_const2}, @@ -123,7 +123,7 @@ def test_positional_weights_quantization_wrapper(self): y = wrapper(self.inputs) # apply the wrapper on some random inputs self.assertTrue((0 == getattr(wrapper, f'{QUANTIZED_POSITIONAL_WEIGHT}_0')).all()) # check the weight are now quantized self.assertTrue((0 == getattr(wrapper, f'{QUANTIZED_POSITIONAL_WEIGHT}_2')).all()) # check the weight are now quantized - self.assertTrue((y == self.layers[2]([torch.zeros_like(self.cat_const1), - self.inputs, - torch.zeros_like(self.cat_const2)], - **wrapper.op_call_kwargs)).all()) # check the wrapper's outputs are equal to biases + self.assertTrue(torch.allclose(y, self.layers[2]([torch.zeros_like(self.cat_const1), + self.inputs, + torch.zeros_like(self.cat_const2)], + **wrapper.op_call_kwargs))) # check the wrapper's outputs are equal to biases