diff --git a/.github/actions/keras_application_test/action.yml b/.github/actions/keras_application_test/action.yml
index e22d0ad8e..668e9efff 100644
--- a/.github/actions/keras_application_test/action.yml
+++ b/.github/actions/keras_application_test/action.yml
@@ -24,18 +24,14 @@ runs:
python -m pip install --upgrade pip
pip install onnxconverter-common
pip install onnx==${{ inputs.onnx_version }}
- pip uninstall -y protobuf
- pip install "protobuf~=3.20"
- pip install h5py==3.7.0
+ pip install h5py
pip install parameterized
pip install timeout-decorator
pip install coloredlogs flatbuffers
- pip install tensorflow==${{ inputs.tf_version }}
pip install onnxruntime==${{ inputs.ort_version }}
pip install pillow
pip install opencv-python
pip install tqdm
- pip install keras-segmentation==0.2.0
git clone https://github.com/matterport/Mask_RCNN
cd Mask_RCNN
pip install -r requirements.txt
@@ -43,22 +39,28 @@ runs:
cd ..
pip install matplotlib
git clone https://github.com/qqwweee/keras-yolo3
- pip install keras-resnet
pip install git+https://www.github.com/keras-team/keras-contrib.git
pip install keras-tcn==2.8.3
pip install git+https://github.com/qubvel/efficientnet
- pip install keras-self-attention
pip install pytest pytest-cov pytest-runner
+ pip uninstall -y protobuf h5py
if [[ ${{ inputs.tf_version }} == 1.* ]]; then
- pip install keras==2.3.1
- pip install transformers==4.2.0
- pip uninstall -y h5py
- pip install h5py==2.9.0
- pip install numpy==1.19.0
+ echo "-- install-1 TF1-KERAS ${{ inputs.tf_version }}"
+ pip install numpy==1.19.0 tensorflow==${{ inputs.tf_version }} protobuf h5py==2.9.0 transformers==4.2.0 keras h5py
+ # deprecated packages
+ pip install keras-self-attention
+ pip install keras-segmentation==0.2.0
+ pip install keras-resnet
else
pip install transformers
- pip install "numpy<2"
+ if [[ "${{ inputs.tf_version }}" != "2.13.0" && "${{ inputs.tf_version }}" != "2.9.0" ]]; then
+ echo "-- install-1 TF-KERAS ${{ inputs.tf_version }}"
+ pip install tf_keras==${{ inputs.tf_version }} tensorflow==${{ inputs.tf_version }} protobuf
+ else
+ echo "-- install-1 TF ${{ inputs.tf_version }}"
+ pip install protobuf tensorflow==${{ inputs.tf_version }}
+ fi
fi
pip install -e .
@@ -71,5 +73,7 @@ runs:
run: |
python -c "import onnxruntime"
pytest tests/keras2onnx_unit_tests --doctest-modules --junitxml=junit/test-results.xml
+
+ export TF_USE_LEGACY_KERAS=True
cd tests/keras2onnx_applications/nightly_build
python run_all_v2.py
diff --git a/.github/actions/keras_unit_test/action.yml b/.github/actions/keras_unit_test/action.yml
index 4c78945f1..d5d5171bd 100644
--- a/.github/actions/keras_unit_test/action.yml
+++ b/.github/actions/keras_unit_test/action.yml
@@ -24,19 +24,23 @@ runs:
python -m pip install --upgrade pip
pip install onnxconverter-common
pip install onnx==${{ inputs.onnx_version }}
- pip install h5py==3.7.0
pip install parameterized
pip install timeout-decorator
pip install coloredlogs flatbuffers
- pip install tensorflow==${{ inputs.tf_version }}
pip install pytest pytest-cov pytest-runner
pip install onnxruntime==${{ inputs.ort_version }}
- pip uninstall -y protobuf
- pip install "protobuf~=3.20"
+ pip uninstall -y protobuf h5py tensorflow
if [[ ${{ inputs.tf_version }} == 1.* ]]; then
- pip install numpy==1.19.0
+ echo "-- install-2 TF1-KERAS ${{ inputs.tf_version }}"
+ pip install numpy==1.19.0 tensorflow==${{ inputs.tf_version }} protobuf keras h5py
else
- pip install "numpy<2"
+ if [[ "${{ inputs.tf_version }}" != "2.13.0" && "${{ inputs.tf_version }}" != "2.9.0" ]]; then
+ echo "-- install-2 TF-KERAS ${{ inputs.tf_version }}"
+ pip install tf_keras==${{ inputs.tf_version }} tensorflow==${{ inputs.tf_version }} h5py protobuf
+ else
+ echo "-- install-2 TF ${{ inputs.tf_version }}"
+ pip install protobuf tensorflow==${{ inputs.tf_version }} h5py
+ fi
fi
pip install -e .
@@ -48,6 +52,7 @@ runs:
shell: bash
if: runner.os == 'Linux'
run: |
+ export TF_USE_LEGACY_KERAS=True
python -c "import onnxruntime"
python -c "import onnxconverter_common"
pytest tests/keras2onnx_unit_tests --doctest-modules --junitxml=junit/test-results.xml
diff --git a/.github/actions/unit_test/action.yml b/.github/actions/unit_test/action.yml
index 00cb28386..ebcad6c80 100644
--- a/.github/actions/unit_test/action.yml
+++ b/.github/actions/unit_test/action.yml
@@ -46,6 +46,7 @@ runs:
export TF2ONNX_SKIP_TFLITE_TESTS=${{ inputs.skip_tflite }}
export TF2ONNX_SKIP_TFJS_TESTS=True
export TF2ONNX_SKIP_TF_TESTS=False
+ export TF_USE_LEGACY_KERAS=True
python -m pytest --cov=tf2onnx --cov-report=term --disable-pytest-warnings -r s tests --cov-append --junitxml=junit/test-results.xml
ls
@@ -58,5 +59,6 @@ runs:
set TF2ONNX_SKIP_TFLITE_TESTS=${{ inputs.skip_tflite }}
set TF2ONNX_SKIP_TFJS_TESTS=True
set TF2ONNX_SKIP_TF_TESTS=False
+ set TF_USE_LEGACY_KERAS=True
python -m pytest --cov=tf2onnx --cov-report=term --disable-pytest-warnings -r s tests --cov-append --junitxml=junit/test-results.xml
ls
diff --git a/.github/workflows/keras_application_test_ci.yml b/.github/workflows/keras_application_test_ci.yml
index ce9527158..91a4cd976 100644
--- a/.github/workflows/keras_application_test_ci.yml
+++ b/.github/workflows/keras_application_test_ci.yml
@@ -15,7 +15,7 @@ concurrency:
jobs:
- Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo.
+ Test1_py310_tf2_19: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -24,86 +24,60 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py39-TF2.9)
+ - name: Run Tests (Py310-TF2.19)
uses: ./.github/actions/keras_application_test
with:
- tf_version: '2.9.0'
- python_version: '3.9'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
-
- - name: Upload Test Results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: Test Results (Py39-TF2.9-ubuntu)
- path: ./**/test-results-*.xml
-
- Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo.
- strategy:
- fail-fast: false
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Tests (Py310-TF2.15)
- uses: ./.github/actions/keras_application_test
- with:
- tf_version: '2.15.0'
+ tf_version: '2.19.0'
python_version: '3.10'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
-
- - name: Upload Test Results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: Test Results (Py310-TF2.15-ubuntu)
- path: ./**/test-results-*.xml
-
- Test_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo.
- strategy:
- fail-fast: false
- runs-on: ubuntu-22.04
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Tests (Py37-TF1.15)
- uses: ./.github/actions/keras_application_test
- with:
- tf_version: '1.15.5'
- python_version: '3.7'
- ort_version: '1.14.1'
- onnx_version: '1.14.1'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py37-TF1.15-ubuntu)
+ name: Test Results (Py310-TF2.19-ubuntu)
path: ./**/test-results-*.xml
- Extra_tests:
+ Extra_tests1:
strategy:
fail-fast: false
matrix:
name:
- 'py38-tf2.13'
- 'py39-tf2.15'
- os: ['ubuntu-latest', 'windows-2022']
- ort_version: ['1.16.3']
- onnx_version: ['1.16.1']
+ - 'py310-tf2.19'
+ - 'py311-tf2.19'
+ - 'py312-tf2.19'
+ os: ['ubuntu-latest', 'windows-latest']
include:
- name: 'py38-tf2.13'
tf_version: '2.13.0'
python_version: '3.8'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
+ os_version: 'ubuntu-latest'
- name: 'py39-tf2.15'
tf_version: '2.15.0'
python_version: '3.9'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
+ os_version: 'ubuntu-latest'
+ - name: 'py310-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.10'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
+ - name: 'py311-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
+ - name: 'py312-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.12'
+ ort_version: '1.22.0'
+ onnx_version: '1.18.0'
runs-on: ${{ matrix.os }}
steps:
@@ -126,7 +100,7 @@ jobs:
publish-test-results:
name: "Publish Tests Results to Github"
- needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests]
+ needs: [Test1_py310_tf2_19, Extra_tests1]
runs-on: ubuntu-latest
permissions:
checks: write
diff --git a/.github/workflows/keras_unit_test_ci.yml b/.github/workflows/keras_unit_test_ci.yml
index 756e06373..393d37429 100644
--- a/.github/workflows/keras_unit_test_ci.yml
+++ b/.github/workflows/keras_unit_test_ci.yml
@@ -14,7 +14,7 @@ concurrency:
cancel-in-progress: true
jobs:
- Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo.
+ Test2_py38_tf2_9: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -38,7 +38,7 @@ jobs:
name: Test Results (Py39-TF2.9-ubuntu)
path: ./**/test-results-*.xml
- Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo.
+ Test2_py310_tf2_19: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -47,62 +47,76 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py310-TF2.15)
+ - name: Run Tests (Py310-TF2.19)
uses: ./.github/actions/keras_unit_test
with:
- tf_version: '2.15.0'
+ tf_version: '2.19.0'
python_version: '3.10'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py310-TF2.15-ubuntu)
+ name: Test Results (Py310-TF2.19-ubuntu)
path: ./**/test-results-*.xml
- Test_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo.
+ Test2_py311_tf2_19: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py37-TF1.15)
+ - name: Run Tests (Py311-TF2.19)
uses: ./.github/actions/keras_unit_test
with:
- tf_version: '1.15.5'
- python_version: '3.7'
- ort_version: '1.14.1'
- onnx_version: '1.14.1'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py37-TF1.15-ubuntu)
+ name: Test Results (Py311-TF2.19-ubuntu)
path: ./**/test-results-*.xml
- Extra_tests:
+ Extra_tests2:
strategy:
fail-fast: false
matrix:
name:
- - 'py39-tf2.10'
+ - 'py38-tf2.13'
- 'py39-tf2.15'
+ - 'py310-tf2.19'
+ - 'py311-tf2.19'
os: ['ubuntu-latest', 'windows-2022']
- ort_version: ['1.16.3']
- onnx_version: ['1.16.1']
include:
- - name: 'py39-tf2.10'
- tf_version: '2.10.0'
- python_version: '3.9'
+ - name: 'py38-tf2.13'
+ tf_version: '2.13.0'
+ python_version: '3.8'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
- name: 'py39-tf2.15'
tf_version: '2.15.0'
python_version: '3.9'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
+ - name: 'py310-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.10'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
+ - name: 'py311-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
runs-on: ${{ matrix.os }}
steps:
@@ -125,7 +139,7 @@ jobs:
publish-test-results:
name: "Publish Tests Results to Github"
- needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests]
+ needs: [Test2_py38_tf2_9, Test2_py310_tf2_19, Test2_py311_tf2_19, Extra_tests2]
runs-on: ubuntu-latest
permissions:
checks: write
diff --git a/.github/workflows/pretrained_model_test_ci.yml b/.github/workflows/pretrained_model_test_ci.yml
index c6a7a8e03..4c0d16a01 100644
--- a/.github/workflows/pretrained_model_test_ci.yml
+++ b/.github/workflows/pretrained_model_test_ci.yml
@@ -15,7 +15,7 @@ concurrency:
jobs:
- Test_min_py_with_min_tf: # Do not change this name because it is used in 'publish-test-results' section below.
+ Test3_py38_tf2_9: # Do not change this name because it is used in 'publish-test-results' section below.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -42,7 +42,7 @@ jobs:
name: Test Results (Py39-TF2.9-18-ubuntu)
path: ./**/test-results-*.xml
- Test_max_py_with_latest_tf: # Do not change this name because it is used in 'publish-test-results' section below.
+ Test3_py310_tf2_19: # Do not change this name because it is used in 'publish-test-results' section below.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -51,14 +51,14 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py310-TF2.15-18)
+ - name: Run Tests (Py310-TF2.19-18)
uses: ./.github/actions/pretrained_model_test
with:
os: 'ubuntu-latest'
- tf_version: '2.15.0'
+ tf_version: '2.19.0'
python_version: '3.10'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
opset_version: '18'
skip_tflite: 'False'
@@ -66,54 +66,42 @@ jobs:
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py310-TF2.15-18-ubuntu)
- path: ./**/test-results-*.xml
-
- Test_py37_with_tf1_15: # Do not change this name because it is used in 'publish-test-results' section below.
- strategy:
- fail-fast: false
- runs-on: ubuntu-22.04
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Tests (Py310-TF2.15-18)
- uses: ./.github/actions/pretrained_model_test
- with:
- tf_version: '1.15.5'
- python_version: '3.7'
- os: 'ubuntu-22.04' # Max ubuntu version supports python 3.7.
- opset_version: '15'
- ort_version: '1.14.1'
- onnx_version: '1.14.1'
-
- - name: Upload Test Results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: Test Results (Py37-TF1.15-15-ubuntu)
+ name: Test Results (Py310-TF2.19-18-ubuntu)
path: ./**/test-results-*.xml
- Extra_tests: # Do not change this name because it is used in 'publish-test-results' section below.
+ Extra_tests3: # Do not change this name because it is used in 'publish-test-results' section below.
strategy:
fail-fast: false
matrix:
name:
- 'py38-tf2.13'
- 'py39-tf2.15'
+ - 'py310-tf2.19'
+ - 'py311-tf2.19'
os: ['ubuntu-latest', 'windows-2022']
opset_version: ['18', '15']
- ort_version: ['1.16.3']
- onnx_version: ['1.16.1']
skip_tflite: ['False']
include:
- name: 'py38-tf2.13'
tf_version: '2.13.0'
python_version: '3.8'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
- name: 'py39-tf2.15'
tf_version: '2.15.0'
python_version: '3.9'
+ ort_version: '1.16.3'
+ onnx_version: '1.16.1'
+ - name: 'py310-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.10'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
+ - name: 'py311-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
runs-on: ${{ matrix.os }}
steps:
@@ -139,7 +127,7 @@ jobs:
publish-test-results:
name: "Publish Tests Results to Github"
- needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests]
+ needs: [Test3_py38_tf2_9, Test3_py310_tf2_19, Extra_tests3]
runs-on: ubuntu-latest
permissions:
checks: write
diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
index ac5631aa3..5e9377d2e 100644
--- a/.github/workflows/pylint.yml
+++ b/.github/workflows/pylint.yml
@@ -18,10 +18,10 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: 3.9 # Specify the desired Python version (e.g., 3.8, 3.9)
+ python-version: 3.11
- name: Install dependencies
- run: pip install pylint==2.4.4
+ run: pip install pylint
- name: Run pylint
run: |
diff --git a/.github/workflows/unit_test_ci.yml b/.github/workflows/unit_test_ci.yml
index 54646f68b..395b6bc6a 100644
--- a/.github/workflows/unit_test_ci.yml
+++ b/.github/workflows/unit_test_ci.yml
@@ -15,7 +15,7 @@ concurrency:
jobs:
- Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo.
+ Test4_py310_tf2_19: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -24,14 +24,14 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py39-TF2.9-18)
+ - name: Run Tests (Py310-TF2.19-18)
uses: ./.github/actions/unit_test
with:
os: 'ubuntu-latest'
- tf_version: '2.9.0'
- python_version: '3.9'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
+ tf_version: '2.19.0'
+ python_version: '3.10'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
opset_version: '18'
skip_tflite: 'False'
@@ -39,10 +39,10 @@ jobs:
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py39-TF2.9-18-ubuntu)
+ name: Test Results (Py310-TF2.19-18-ubuntu)
path: ./**/test-results-*.xml
- Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo.
+ Test4_py311_tf2_19: # Do not change this name because it is used in Ruleset of this repo.
strategy:
fail-fast: false
runs-on: ubuntu-latest
@@ -51,14 +51,14 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Tests (Py310-TF2.15-18)
+ - name: Run Tests (Py311-TF2.19-18)
uses: ./.github/actions/unit_test
with:
os: 'ubuntu-latest'
- tf_version: '2.15.0'
- python_version: '3.10'
- ort_version: '1.16.3'
- onnx_version: '1.16.1'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
opset_version: '18'
skip_tflite: 'False'
@@ -66,32 +66,24 @@ jobs:
if: always()
uses: actions/upload-artifact@v4
with:
- name: Test Results (Py310-TF2.15-18-ubuntu)
+ name: Test Results (Py311-TF2.19-18-ubuntu)
path: ./**/test-results-*.xml
- Extra_tests:
+ Extra_tests4:
strategy:
fail-fast: false
matrix:
name:
- - 'py39-tf2.15'
+ - 'py312-tf2.19'
os: ['ubuntu-latest', 'windows-2022']
- opset_version: ['18', '15']
- ort_version: ['1.16.3']
- onnx_version: ['1.16.1']
+ opset_version: ['18']
skip_tflite: ['False']
include:
- - name: 'py39-tf2.15'
- tf_version: '2.15.0'
- python_version: '3.9'
- - name: 'py37-tf1.15'
- tf_version: '1.15.5'
- python_version: '3.7'
- os: 'ubuntu-22.04' # Max ubuntu version supports python 3.7.
- opset_version: '15'
- ort_version: '1.14.1'
- onnx_version: '1.14.1'
- skip_tflite: 'True'
+ - name: 'py312-tf2.19'
+ tf_version: '2.19.0'
+ python_version: '3.11'
+ ort_version: '1.20.1'
+ onnx_version: '1.17.0'
runs-on: ${{ matrix.os }}
@@ -118,7 +110,7 @@ jobs:
publish-test-results:
name: "Publish Tests Results to Github"
- needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Extra_tests]
+ needs: [Test4_py310_tf2_19, Test4_py311_tf2_19, Extra_tests4]
runs-on: ubuntu-latest
permissions:
checks: write
diff --git a/README.md b/README.md
index 776b8e8f0..d69cff1bb 100644
--- a/README.md
+++ b/README.md
@@ -19,10 +19,10 @@ The common issues we run into we try to document here [Troubleshooting Guide](Tr
-| Build Type | OS | Python | TensorFlow | ONNX opset |
-| --- | - | --- | --- | --- |
-| Unit Test - Basic | Linux, Windows | 3.7-3.12 | 1.15, 2.9-2.15 | 14-18 |
-| Unit Test - Full | Linux, Windows | 3.7-3.12 | 1.15, 2.9-2.15 | 14-18 |
+| Build Type | OS | Python | TensorFlow | ONNX opset | Status |
+| --- | --- | --- | --- | --- | --- |
+| Unit Test - Basic | Linux, Windows | 3.7-3.12 | 1.15, 2.9-2.15 | 14-18 | [](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=16&branchName=main) |
+| Unit Test - Full | Linux, Windows | 3.7-3.12 | 1.15, 2.9-2.15 | 14-18 | [](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=18&branchName=main) | |
## Supported Versions
@@ -46,7 +46,7 @@ You can install tf2onnx on top of tf-1.x or tf-2.x.
### Python
-We support Python ```3.7-3.12```.
+We support Python ```3.8-3.12```.
## Prerequisites
@@ -336,8 +336,6 @@ model_proto, external_tensor_storage = tf2onnx.convert.from_keras(model,
An ONNX model_proto and an external_tensor_storage dict.
```
-See [tutorials/keras-resnet50.ipynb](tutorials/keras-resnet50.ipynb) for an end to end example.
-
### from_function (tf-2.0 and newer)
```
import tf2onnx
diff --git a/Troubleshooting.md b/Troubleshooting.md
index fd9ef4a6a..5aff57cc5 100644
--- a/Troubleshooting.md
+++ b/Troubleshooting.md
@@ -36,3 +36,7 @@ An example of this is the [ONNX Slice operator before opset-10](https://github.c
You can pass the options ```--fold_const```(removed after tf2onnx-1.9.3) in the tf2onnx command line that allows tf2onnx to apply more aggressive constant folding which will increase chances to find a constant.
If this doesn't work the model is most likely not to be able to convert to ONNX. We used to see this a lot of issue with the ONNX Slice op and in opset-10 was updated for exactly this reason.
+
+## cudaSetDevice() on GPU:0 failed. Status: CUDA-capable device(s) is/are busy or unavailable
+
+See [Regression: TF 2.18 crashes with cudaSetDevice failing due to GPU being busy](https://github.com/tensorflow/tensorflow/issues/78784).
diff --git a/examples/end2end_tfkeras.py b/examples/end2end_tfkeras.py
index 21ffd2d76..24a95f5dc 100644
--- a/examples/end2end_tfkeras.py
+++ b/examples/end2end_tfkeras.py
@@ -8,11 +8,9 @@
*onnxruntime*, *tensorflow* and *tensorflow.lite*.
"""
from onnxruntime import InferenceSession
-import os
import subprocess
import timeit
import numpy as np
-import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input
diff --git a/examples/tf_custom_op/double_and_add_one_custom_op.py b/examples/tf_custom_op/double_and_add_one_custom_op.py
index 770324ceb..e8510c414 100644
--- a/examples/tf_custom_op/double_and_add_one_custom_op.py
+++ b/examples/tf_custom_op/double_and_add_one_custom_op.py
@@ -7,7 +7,6 @@
import os
from tf2onnx import utils
from tf2onnx.handler import tf_op
-from tf2onnx.tf_loader import tf_placeholder
DIR_PATH = os.path.realpath(os.path.dirname(__file__))
diff --git a/setup.py b/setup.py
index d074a1e80..02d61961f 100644
--- a/setup.py
+++ b/setup.py
@@ -82,7 +82,7 @@ def run(self):
author='ONNX',
author_email='onnx-technical-discuss@lists.lfaidata.foundation',
url='https://github.com/onnx/tensorflow-onnx',
- install_requires=['numpy>=1.14.1', 'onnx>=1.4.1', 'requests', 'six', 'flatbuffers>=1.12', 'protobuf~=3.20'],
+ install_requires=['numpy>=1.14.1', 'onnx>=1.4.1', 'requests', 'six', 'flatbuffers>=1.12'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
diff --git a/tests/keras2onnx_unit_tests/conftest.py b/tests/keras2onnx_unit_tests/conftest.py
index f3b518d44..ea6ab4441 100644
--- a/tests/keras2onnx_unit_tests/conftest.py
+++ b/tests/keras2onnx_unit_tests/conftest.py
@@ -13,6 +13,12 @@
K = keras.backend
+def is_keras_3():
+ if hasattr(keras, '__version__'):
+ return keras.__version__.startswith("3.")
+
+ return False
+
@pytest.fixture(scope='function')
def runner():
np.random.seed(42)
@@ -25,12 +31,19 @@ def runner():
def runner_func(*args, **kwargs):
return run_onnx_runtime(*args, model_files, **kwargs)
- # Ensure Keras layer naming is reset for each function
- if hasattr(K, "reset_uids"):
- # see https://github.com/onnx/tensorflow-onnx/issues/2370
- K.reset_uids()
- # Reset the TensorFlow session to avoid resource leaking between tests
- K.clear_session()
+ if is_keras_3():
+ import tf_keras
+ if hasattr(K, "reset_uids"):
+ # see https://github.com/onnx/tensorflow-onnx/issues/2370
+ K.reset_uids()
+ tf_keras.backend.clear_session()
+ else:
+ # Ensure Keras layer naming is reset for each function
+ if hasattr(K, "reset_uids"):
+ # see https://github.com/onnx/tensorflow-onnx/issues/2370
+ K.reset_uids()
+ # Reset the TensorFlow session to avoid resource leaking between tests
+ K.clear_session()
# Provide wrapped run_onnx_runtime function
yield runner_func
diff --git a/tests/keras2onnx_unit_tests/test_subclassing.py b/tests/keras2onnx_unit_tests/test_subclassing.py
index f4b8ea9d0..dd7e58adc 100644
--- a/tests/keras2onnx_unit_tests/test_subclassing.py
+++ b/tests/keras2onnx_unit_tests/test_subclassing.py
@@ -49,13 +49,41 @@ def call(self, inputs, **kwargs):
return output
-class SimpleWrapperModel(tf.keras.Model):
- def __init__(self, func):
- super(SimpleWrapperModel, self).__init__()
- self.func = func
-
- def call(self, inputs, **kwargs):
- return self.func(inputs)
+def get_save_spec(model, dynamic_batch=False):
+ """Returns the save spec of the subclassing keras model."""
+ from tensorflow.python.framework import tensor_spec
+ shapes_dict = getattr(model, '_build_shapes_dict', None)
+ # TODO: restore dynamic_batch
+ # assert not dynamic_batch, f"get_save_spec: dynamic_batch={dynamic_batch}, shapes_dict={shapes_dict}"
+ if not shapes_dict:
+ return None
+
+ if 'input_shape' not in shapes_dict:
+ raise ValueError(
+ 'Model {} cannot be saved because the input shapes have not been set.'
+ )
+
+ input_shape = shapes_dict['input_shape']
+ if isinstance(input_shape, tuple):
+ shape = input_shape
+ shape = (None,) + shape[1:]
+ return tensor_spec.TensorSpec(
+ shape=shape, dtype=model.input_dtype
+ )
+ elif isinstance(input_shape, dict):
+ specs = {}
+ for key, shape in input_shape.items():
+ shape = (None,) + shape[1:]
+ specs[key] = tensor_spec.TensorSpec(
+ shape=shape, dtype=model.input_dtype, name=key
+ )
+ return specs
+ elif isinstance(input_shape, list):
+ specs = []
+ for shape in input_shape:
+ shape = (None,) + shape[1:]
+ specs.append(tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype))
+ return specs
def test_lenet(runner):
@@ -87,7 +115,11 @@ def op_func(arg_inputs):
x = x - tf.cast(tf.expand_dims(r, axis=0), tf.float32)
return x
- dm = SimpleWrapperModel(op_func)
+ class Model(tf.keras.Model):
+ def call(self, inputs, **kwargs):
+ return op_func(inputs)
+
+ dm = Model()
inputs = [tf.random.normal((3, 2, 20)), tf.random.normal((3, 2, 20))]
expected = dm.predict(inputs)
oxml = convert_keras(dm)
@@ -195,10 +227,17 @@ def _tf_where(input_0):
c = tf.logical_or(tf.cast(a, tf.bool), tf.cast(b, tf.bool))
return c
- swm = SimpleWrapperModel(_tf_where)
+ class Model(tf.keras.Model):
+ def call(self, inputs, **kwargs):
+ return _tf_where(inputs)
+
+ swm = Model()
const_in = [np.array([2, 4, 6, 8, 10]).astype(np.int32)]
expected = swm(const_in)
- swm._set_inputs(const_in)
+ if hasattr(swm, "_set_input"):
+ swm._set_inputs(const_in)
+ else:
+ swm.inputs_spec = const_in
oxml = convert_keras(swm)
assert runner('where_test', oxml, const_in, expected)
diff --git a/tests/run_pretrained_models.py b/tests/run_pretrained_models.py
index a6a952af3..7b74bbc61 100644
--- a/tests/run_pretrained_models.py
+++ b/tests/run_pretrained_models.py
@@ -474,7 +474,7 @@ def run_tflite():
for k in input_names:
v = self.input_names[k]
inputs[to_rename.get(k, k)] = tf.constant(self.make_input(v))
- tf_func = tf.function(concrete_func)
+ tf_func = tf.function(self.concrete_function)
logger.info("Running TF")
tf_results_d = tf_func(**inputs)
# If there is only a single output a dict might not be returned
@@ -492,7 +492,7 @@ def run_tflite():
tf.profiler.experimental.start(self.tf_profile)
while time.time() < stop:
for _ in range(PERF_STEP):
- _ = concrete_func(**inputs)
+ _ = self.concrete_function(**inputs)
n += PERF_STEP
if self.tf_profile is not None:
tf.profiler.experimental.stop()
diff --git a/tests/test_cudnn_compatible_gru.py b/tests/test_cudnn_compatible_gru.py
index ceab5a5ee..e01f08e6d 100644
--- a/tests/test_cudnn_compatible_gru.py
+++ b/tests/test_cudnn_compatible_gru.py
@@ -16,9 +16,7 @@
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop
if is_tf2():
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
- dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
- bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
+ pass
else:
GRUBlockCell = tf.contrib.rnn.GRUBlockCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
diff --git a/tests/test_custom_rnncell.py b/tests/test_custom_rnncell.py
index b5286c348..8cff2d81e 100644
--- a/tests/test_custom_rnncell.py
+++ b/tests/test_custom_rnncell.py
@@ -16,13 +16,8 @@
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
- BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
- LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
- GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
- RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
- dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
- bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
+ # no test for tf2 in this file
+ pass
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
@@ -32,6 +27,45 @@
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
+ class GatedGRUCell(RNNCell):
+ def __init__(self, hidden_dim, reuse=None):
+ super().__init__(self, _reuse=reuse)
+ self._num_units = hidden_dim
+ self._activation = tf.tanh
+
+ @property
+ def state_size(self):
+ return self._num_units
+
+ @property
+ def output_size(self):
+ return self._num_units
+
+ def call(self, inputs, state):
+ # inputs shape: [batch size, time step, input size] = [1, 3, 2]
+ # num_units: 5
+ # W shape: [2, 3 * 5] = [2, 15]
+ # U shape: [5, 3 * 5] = [5, 15]
+ # b shape: [1, 3 * 5] = [1, 15]
+ # state shape: [batch size, state size] = [1, 5]
+
+ input_dim = inputs.get_shape()[-1]
+ assert input_dim is not None, "input dimension must be defined"
+ # W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
+ W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
+ # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
+ U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
+ # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
+ b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
+
+ xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
+ hu = tf.split(tf.matmul(state, U), 3, 1)
+ r = tf.sigmoid(xw[0] + hu[0])
+ z = tf.sigmoid(xw[1] + hu[1])
+ h1 = self._activation(xw[2] + r * hu[2])
+ next_h = h1 * (1 - z) + state * z
+ return next_h, next_h
+
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@@ -376,45 +410,5 @@ def func(encoder_x, decoder_x, seq_length):
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
-class GatedGRUCell(RNNCell):
- def __init__(self, hidden_dim, reuse=None):
- super().__init__(self, _reuse=reuse)
- self._num_units = hidden_dim
- self._activation = tf.tanh
-
- @property
- def state_size(self):
- return self._num_units
-
- @property
- def output_size(self):
- return self._num_units
-
- def call(self, inputs, state):
- # inputs shape: [batch size, time step, input size] = [1, 3, 2]
- # num_units: 5
- # W shape: [2, 3 * 5] = [2, 15]
- # U shape: [5, 3 * 5] = [5, 15]
- # b shape: [1, 3 * 5] = [1, 15]
- # state shape: [batch size, state size] = [1, 5]
-
- input_dim = inputs.get_shape()[-1]
- assert input_dim is not None, "input dimension must be defined"
- # W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
- W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
- # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
- U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
- # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
- b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
-
- xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
- hu = tf.split(tf.matmul(state, U), 3, 1)
- r = tf.sigmoid(xw[0] + hu[0])
- z = tf.sigmoid(xw[1] + hu[1])
- h1 = self._activation(xw[2] + r * hu[2])
- next_h = h1 * (1 - z) + state * z
- return next_h, next_h
-
-
if __name__ == '__main__':
unittest_main()
diff --git a/tests/test_gru.py b/tests/test_gru.py
index 88d1f7f7c..c127c57dc 100644
--- a/tests/test_gru.py
+++ b/tests/test_gru.py
@@ -34,10 +34,13 @@
if is_tf2():
# There is no LSTMBlockCell in tf-2.x
- BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
- LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
- GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
+ try:
+ BasicLSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "BasicLSTMCell", None)
+ LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None)
+ GRUCell = getattr(tf.compat.v1.nn.rnn_cell, "GRUCell", None)
+ MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None)
+ except ImportError:
+ pass
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
diff --git a/tests/test_grublock.py b/tests/test_grublock.py
index ebd878680..7418fc128 100644
--- a/tests/test_grublock.py
+++ b/tests/test_grublock.py
@@ -17,7 +17,10 @@
# pylint: disable=invalid-name
if is_tf2():
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
+ try:
+ MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None)
+ except ImportError:
+ pass
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
diff --git a/tests/test_lstm.py b/tests/test_lstm.py
index 3f9d41e5b..675298c44 100644
--- a/tests/test_lstm.py
+++ b/tests/test_lstm.py
@@ -21,9 +21,12 @@
if is_tf2():
# There is no LSTMBlockCell in tf-2.x
- BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
- LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
+ try:
+ BasicLSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "BasicLSTMCell", None)
+ LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None)
+ MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None)
+ except ImportError:
+ pass
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
diff --git a/tests/test_lstmblock.py b/tests/test_lstmblock.py
index d44e16ebd..0b8c9f8e1 100644
--- a/tests/test_lstmblock.py
+++ b/tests/test_lstmblock.py
@@ -16,9 +16,7 @@
if is_tf2():
# There is no LSTMBlockCell in tf-2.x
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
- dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
- bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
+ pass
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
diff --git a/tests/test_seq2seq.py b/tests/test_seq2seq.py
index 4a6963467..51e1255be 100644
--- a/tests/test_seq2seq.py
+++ b/tests/test_seq2seq.py
@@ -13,13 +13,7 @@
# pylint: disable=invalid-name
if is_tf2():
- BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
- LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
- RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
- dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
- bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
- LSTMStateTuple = tf.compat.v1.nn.rnn_cell.LSTMStateTuple
+ pass
else:
LSTMCell = tf.contrib.rnn.LSTMCell
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
diff --git a/tests/test_stacked_lstm.py b/tests/test_stacked_lstm.py
index 2cfef9bb1..2b3f877bc 100644
--- a/tests/test_stacked_lstm.py
+++ b/tests/test_stacked_lstm.py
@@ -16,8 +16,11 @@
# pylint: disable=invalid-name
if is_tf2():
- LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
- MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
+ try:
+ LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None)
+ MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None)
+ except ImportError:
+ pass
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
else:
LSTMCell = tf.contrib.rnn.LSTMCell
diff --git a/tests/utils/setup_test_env.sh b/tests/utils/setup_test_env.sh
index a14828d05..0c643d854 100755
--- a/tests/utils/setup_test_env.sh
+++ b/tests/utils/setup_test_env.sh
@@ -16,17 +16,22 @@ echo "==== ONNXRuntime version: $ORT_VERSION"
echo "==== ONNX version: $ONNX_VERSION"
pip install pytest pytest-cov pytest-runner coverage graphviz requests pyyaml pillow pandas parameterized sympy coloredlogs flatbuffers timeout-decorator
-pip install onnx==$ONNX_VERSION
-pip install onnxruntime==$ORT_VERSION
-pip install "numpy<2"
-
-pip install onnxruntime-extensions
-pip install "tensorflow-text<=$TF_VERSION"
-
-pip uninstall -y tensorflow
-pip install tensorflow==$TF_VERSION
-pip uninstall -y protobuf
-pip install "protobuf~=3.20"
+pip uninstall -y tensorflow protobuf h5py
+pip install onnx==$ONNX_VERSION onnxruntime==$ORT_VERSION onnxruntime-extensions
+
+if [[ $TF_VERSION == 1.* ]]; then
+ echo "-- install-3 TF1-KERAS $TF_VERSION"
+ pip install numpy==1.19.0 tensorflow==$TF_VERSION protobug keras h5py
+else
+ pip uninstall -y protobuf
+ if [[ "$TF_VERSION" != "2.13.0" && "$TF_VERSION" != "2.9.0" ]]; then
+ echo "-- install-3 TF-KERAS $TF_VERSION"
+ pip install tensorflow==$TF_VERSION tf_keras==$TF_VERSION tensorflow-text
+ else
+ echo "-- install-3 TF $TF_VERSION"
+ pip install tensorflow-text tensorflow==$TF_VERSION protobuf
+ fi
+fi
python setup.py install
diff --git a/tf2onnx/convert.py b/tf2onnx/convert.py
index 6ee66c096..8ce11f9cc 100644
--- a/tf2onnx/convert.py
+++ b/tf2onnx/convert.py
@@ -20,7 +20,7 @@
from tf2onnx import constants, logging, utils, optimizer
from tf2onnx import tf_loader
from tf2onnx.graph import ExternalTensorStorage
-from tf2onnx.tf_utils import compress_graph_def, get_tf_version
+from tf2onnx.tf_utils import compress_graph_def, get_tf_version, get_keras_version
@@ -328,7 +328,8 @@ def _rename_duplicate_keras_model_names(model):
IMPORTANT: model may be edited. Assign model.output_names to old_out_names to restore.
"""
old_out_names = None
- if model.output_names and len(set(model.output_names)) != len(model.output_names):
+ if hasattr(model, "output_names") and model.output_names \
+ and len(set(model.output_names)) != len(model.output_names):
# In very rare cases, keras has a bug where it will give multiple outputs the same name
# We must edit the model or the TF trace will fail
old_out_names = model.output_names
@@ -408,6 +409,100 @@ def _from_keras_tf1(model, opset=None, custom_ops=None, custom_op_handlers=None,
return model_proto, external_tensor_storage
+def from_keras3(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
+ custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None, shape_override=None,
+ target=None, large_model=False, output_path=None, optimizers=None):
+ """
+ Convert a Keras 3 model to ONNX using tf2onnx.
+
+ Args:
+ model: Keras 3 Functional or Sequential model
+ name: Name for the converted model
+ input_signature: Optional list of tf.TensorSpec
+ opset: ONNX opset version
+ custom_ops: Dictionary of custom ops
+ custom_op_handlers: Dictionary of custom op handlers
+ custom_rewriter: List of graph rewriters
+ inputs_as_nchw: List of input names to convert to NCHW
+ extra_opset: Additional opset imports
+ shape_override: Dictionary to override input shapes
+ target: Target platforms (for workarounds)
+ large_model: Whether to use external tensor storage
+ output_path: Optional path to write ONNX model to file
+
+ Returns:
+ A tuple (model_proto, external_tensor_storage_dict)
+ """
+
+
+ if not input_signature:
+
+ input_signature = [
+ tf.TensorSpec(tensor.shape, tensor.dtype, name=tensor.name.split(":")[0])
+ for tensor in model.inputs
+ ]
+
+ # Trace model
+ function = tf.function(model)
+ concrete_func = function.get_concrete_function(*input_signature)
+
+ # These inputs will be removed during freezing (includes resources, etc.)
+ if hasattr(concrete_func.graph, '_captures'):
+ graph_captures = concrete_func.graph._captures # pylint: disable=protected-access
+ captured_inputs = [t_name.name for _, t_name in graph_captures.values()]
+ else:
+ graph_captures = concrete_func.graph.function_captures.by_val_internal
+ captured_inputs = [t.name for t in graph_captures.values()]
+ input_names = [input_tensor.name for input_tensor in concrete_func.inputs
+ if input_tensor.name not in captured_inputs]
+ output_names = [output_tensor.name for output_tensor in concrete_func.outputs
+ if output_tensor.dtype != tf.dtypes.resource]
+
+
+ tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)
+ reverse_lookup = {v: k for k, v in tensors_to_rename.items()}
+
+
+
+ valid_names = []
+ for out in [t.name for t in model.outputs]:
+ if out in reverse_lookup:
+ valid_names.append(reverse_lookup[out])
+ else:
+ print(f"Warning: Output name '{out}' not found in reverse_lookup.")
+ # Fallback: verwende TensorFlow-Ausgangsnamen direkt
+ valid_names = [t.name for t in concrete_func.outputs if t.dtype != tf.dtypes.resource]
+ break
+ output_names = valid_names
+
+
+ #if old_out_names is not None:
+ #model.output_names = old_out_names
+
+ with tf.device("/cpu:0"):
+ frozen_graph, initialized_tables = \
+ tf_loader.from_trackable(model, concrete_func, input_names, output_names, large_model)
+ model_proto, external_tensor_storage = _convert_common(
+ frozen_graph,
+ name=model.name,
+ continue_on_error=True,
+ target=target,
+ opset=opset,
+ custom_ops=custom_ops,
+ custom_op_handlers=custom_op_handlers,
+ optimizers=optimizers,
+ custom_rewriter=custom_rewriter,
+ extra_opset=extra_opset,
+ shape_override=shape_override,
+ input_names=input_names,
+ output_names=output_names,
+ inputs_as_nchw=inputs_as_nchw,
+ outputs_as_nchw=outputs_as_nchw,
+ large_model=large_model,
+ tensors_to_rename=tensors_to_rename,
+ initialized_tables=initialized_tables,
+ output_path=output_path)
+ return model_proto, external_tensor_storage
def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None, shape_override=None,
@@ -438,6 +533,10 @@ def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_
if get_tf_version() < Version("2.0"):
return _from_keras_tf1(model, opset, custom_ops, custom_op_handlers, custom_rewriter, inputs_as_nchw,
outputs_as_nchw, extra_opset, shape_override, target, large_model, output_path)
+ if get_keras_version() > Version("3.0"):
+ return from_keras3(model, input_signature, opset, custom_ops, custom_op_handlers,
+ custom_rewriter, inputs_as_nchw, outputs_as_nchw, extra_opset, shape_override,
+ target, large_model, output_path, optimizers)
old_out_names = _rename_duplicate_keras_model_names(model)
from tensorflow.python.keras.saving import saving_utils as _saving_utils # pylint: disable=import-outside-toplevel
@@ -446,7 +545,7 @@ def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_
function = _saving_utils.trace_model_call(model, input_signature)
try:
concrete_func = function.get_concrete_function()
- except TypeError as e:
+ except (TypeError, AttributeError) as e:
# Legacy keras models don't accept the training arg tf provides so we hack around it
if "got an unexpected keyword argument 'training'" not in str(e):
raise e
diff --git a/tf2onnx/onnx_opset/nn.py b/tf2onnx/onnx_opset/nn.py
index a06f02cec..1d10bbd66 100644
--- a/tf2onnx/onnx_opset/nn.py
+++ b/tf2onnx/onnx_opset/nn.py
@@ -1793,6 +1793,7 @@ def version_11(cls, ctx, node, **kwargs):
node.type = "Identity"
ctx.replace_inputs(node, [data])
return
+ cond = None
if len(conditions) == 1:
cond = conditions[0]
if len(conditions) == 2:
diff --git a/tf2onnx/tf_utils.py b/tf2onnx/tf_utils.py
index 16cb76344..bbbe9db30 100644
--- a/tf2onnx/tf_utils.py
+++ b/tf2onnx/tf_utils.py
@@ -10,6 +10,7 @@
import numpy as np
import tensorflow as tf
+import keras
from tensorflow.core.framework import types_pb2, tensor_pb2, graph_pb2
from tensorflow.python.framework import tensor_util
@@ -124,6 +125,9 @@ def get_tf_node_attr(node, name):
def get_tf_version():
return Version(tf.__version__)
+def get_keras_version():
+ return Version(keras.__version__)
+
def compress_graph_def(graph_def):
"""
Remove large const values from graph. This lets us import the graph and run shape inference without TF crashing.
@@ -351,9 +355,9 @@ def read_tf_node_def_attrs(node_def, input_dtypes, input_shapes):
# ignore the following attributes
TF_IGNORED_NODE_ATTRS = {
"T", "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index", "Tpaddings",
- "TI", "Tparams", "Tindices", "Tlen", "Tdim", "Tin", "dynamic_size", "Tmultiples",
+ "TI", "Tparams", "Tindices", "Tlen", "Tdim", "dynamic_size", "Tmultiples",
"Tblock_shape", "Tcrops", "index_type", "Taxis", "U", "maxval",
- "Tout", "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond",
+ "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond",
"T_threshold", "shape_type", "_lower_using_switch_merge",
"parallel_iterations", "_num_original_outputs", "output_types", "output_shapes",
"key_dtype", "value_dtype", "Tin", "Tout", "capacity", "component_types", "shapes",
diff --git a/tf2onnx/utils.py b/tf2onnx/utils.py
index 7f2f53daa..f6749eb7a 100644
--- a/tf2onnx/utils.py
+++ b/tf2onnx/utils.py
@@ -38,6 +38,7 @@
onnx_pb.TensorProto.FLOAT: np.float32,
onnx_pb.TensorProto.FLOAT16: np.float16,
onnx_pb.TensorProto.DOUBLE: np.float64,
+ onnx_pb.TensorProto.INT64: np.int64,
onnx_pb.TensorProto.INT32: np.int32,
onnx_pb.TensorProto.INT16: np.int16,
onnx_pb.TensorProto.INT8: np.int8,
@@ -45,8 +46,6 @@
onnx_pb.TensorProto.UINT16: np.uint16,
onnx_pb.TensorProto.UINT32: np.uint32,
onnx_pb.TensorProto.UINT64: np.uint64,
- onnx_pb.TensorProto.INT64: np.int64,
- onnx_pb.TensorProto.UINT64: np.uint64,
onnx_pb.TensorProto.BOOL: bool,
onnx_pb.TensorProto.COMPLEX64: np.complex64,
onnx_pb.TensorProto.COMPLEX128: np.complex128,
diff --git a/tools/pylintrc b/tools/pylintrc
index 955e35304..645cf5c29 100644
--- a/tools/pylintrc
+++ b/tools/pylintrc
@@ -8,9 +8,6 @@
# pygtk.require().
#init-hook=
-# Profiled execution.
-profile=no
-
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
@@ -39,11 +36,7 @@ enable=indexing-exception,old-raise-syntax
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,useless-object-inheritance
-
-
-# Set the cache size for astng objects.
-cache-size=500
+disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,useless-object-inheritance,consider-using-f-string,unspecified-encoding,unnecessary-lambda-assignment,use-dict-literal,consider-using-with,wrong-import-order,consider-iterating-dictionary,use-maxsplit-arg,possibly-used-before-assignment,super-with-arguments,redundant-u-string-prefix,use-list-literal,f-string-without-interpolation,condition-evals-to-constant,global-variable-not-assigned,consider-using-enumerate,unused-argument,superfluous-parens,use-sequence-for-iteration,useless-return,superfluous-parens,use-a-generator,raise-missing-from,overgeneral-exceptions,condition-evals-to-constant,consider-using-generator,use-yield-from,consider-using-min-builtin,overgeneral-exceptions
[REPORTS]
@@ -53,11 +46,6 @@ cache-size=500
# mypackage.mymodule.MyReporterClass.
output-format=text
-# Put messages in a separate file for each module / package specified on the
-# command line instead of printing them on stdout. Reports (if any) will be
-# written in a file name "pylint_global.[txt|html]".
-files-output=no
-
# Tells whether to display a full report or only the messages
reports=no
@@ -68,10 +56,6 @@ reports=no
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-comment=no
-
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
@@ -87,10 +71,6 @@ ignore-mixin-members=yes
# (useful for classes with attributes dynamically set).
ignored-classes=SQLObject
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-zope=no
-
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
@@ -117,17 +97,6 @@ additional-builtins=
[BASIC]
-# Required attributes for module, separated by a comma
-required-attributes=
-
-# List of builtins function names that should not be used, separated by a comma
-bad-functions=apply,input,reduce
-
-
-# Disable the report(s) with the given id(s).
-# All non-Google reports are disabled by default.
-disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923
-
# Regular expression which should only match correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
@@ -187,9 +156,6 @@ ignore-long-lines=^\s*(# )??$
# else.
single-line-if-stmt=y
-# List of optional constructs for which whitespace checking is disabled
-no-space-check=
-
# Maximum number of lines in a module
max-module-lines=99999
@@ -239,10 +205,6 @@ int-import-graph=
[CLASSES]
-# List of interface methods to ignore, separated by a comma. This is used for
-# instance to not check methods defines in Zope's Interface base class.
-ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
-
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
@@ -286,33 +248,6 @@ min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=Exception,StandardError,BaseException
-
-
-[AST]
-
-# Maximum line length for lambdas
-short-func-length=1
-
-# List of module members that should be marked as deprecated.
-# All of the string functions are listed in 4.1.4 Deprecated string functions
-# in the Python 2.4 docs.
-deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc
-
-
-[DOCSTRING]
-
-# List of exceptions that do not need to be mentioned in the Raises section of
-# a docstring.
-ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError
-
-
-
[TOKENS]
# Number of spaces of indent required when the last token on the preceding line
diff --git a/tutorials/README.md b/tutorials/README.md
index 352552fac..4e92b49e4 100644
--- a/tutorials/README.md
+++ b/tutorials/README.md
@@ -9,8 +9,6 @@ The following tutorials show how to convert various models to ONNX.
[efficientnet-lite](https://github.com/onnx/tensorflow-onnx/blob/main/tutorials/efficientnet-lite.ipynb)
-[keras-resnet50](https://github.com/onnx/tensorflow-onnx/blob/main/tutorials/keras-resnet50.ipynb) - shows how to convert a keras model via python api
-
## Object Detectors
[ssd-mobilenet](https://github.com/onnx/tensorflow-onnx/blob/main/tutorials/ConvertingSSDMobilenetToONNX.ipynb)
diff --git a/tutorials/keras-resnet50.ipynb b/tutorials/keras-resnet50.ipynb
deleted file mode 100755
index ac3c6b203..000000000
--- a/tutorials/keras-resnet50.ipynb
+++ /dev/null
@@ -1,252 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Conversion to ONNX from keras model using tf2onnx python api "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# get image\n",
- "!wget -q https://raw.githubusercontent.com/onnx/tensorflow-onnx/main/tests/ade20k.jpg"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "!pip install tensorflow tf2onnx onnxruntime"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "import tensorflow as tf\n",
- "from tensorflow.keras.applications.resnet50 import ResNet50\n",
- "from tensorflow.keras.preprocessing import image\n",
- "from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\n",
- "import numpy as np\n",
- "import onnxruntime\n",
- "\n",
- "img_path = 'ade20k.jpg'\n",
- "\n",
- "img = image.load_img(img_path, target_size=(224, 224))\n",
- "\n",
- "x = image.img_to_array(img)\n",
- "x = np.expand_dims(x, axis=0)\n",
- "x = preprocess_input(x)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Run the keras model"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Keras Predicted: [('n04285008', 'sports_car', 0.34311807), ('n02974003', 'car_wheel', 0.28819188), ('n03100240', 'convertible', 0.10169428)]\n",
- "INFO:tensorflow:Assets written to: /tmp/resnet50/assets\n"
- ]
- }
- ],
- "source": [
- "model = ResNet50(weights='imagenet')\n",
- "\n",
- "preds = model.predict(x)\n",
- "print('Keras Predicted:', decode_predictions(preds, top=3)[0])\n",
- "model.save(os.path.join(\"/tmp\", model.name))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Convert to ONNX using the Python API"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "WARNING:tensorflow:From /home/ipython/.local/lib/python3.7/site-packages/tf2onnx/tf_loader.py:558: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
- "Instructions for updating:\n",
- "Use `tf.compat.v1.graph_util.extract_sub_graph`\n"
- ]
- }
- ],
- "source": [
- "import tf2onnx\n",
- "import onnxruntime as rt\n",
- "\n",
- "spec = (tf.TensorSpec((None, 224, 224, 3), tf.float32, name=\"input\"),)\n",
- "output_path = model.name + \".onnx\"\n",
- "\n",
- "model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13, output_path=output_path)\n",
- "output_names = [n.name for n in model_proto.graph.output]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Run the ONNX model"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "ONNX Predicted: [('n04285008', 'sports_car', 0.34311718), ('n02974003', 'car_wheel', 0.2881928), ('n03100240', 'convertible', 0.10169421)]\n"
- ]
- }
- ],
- "source": [
- "providers = ['CPUExecutionProvider']\n",
- "m = rt.InferenceSession(output_path, providers=providers)\n",
- "onnx_pred = m.run(output_names, {\"input\": x})\n",
- "\n",
- "print('ONNX Predicted:', decode_predictions(onnx_pred[0], top=3)[0])\n",
- "\n",
- "# make sure ONNX and keras have the same results\n",
- "np.testing.assert_allclose(preds, onnx_pred[0], rtol=1e-5)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Convert to ONNX using the command line"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "scrolled": false
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "/opt/anaconda3/lib/python3.7/runpy.py:125: RuntimeWarning: 'tf2onnx.convert' found in sys.modules after import of package 'tf2onnx', but prior to execution of 'tf2onnx.convert'; this may result in unpredictable behaviour\n",
- " warn(RuntimeWarning(msg))\n",
- "2021-02-22 16:19:56,658 - WARNING - '--tag' not specified for saved_model. Using --tag serve\n",
- "2021-02-22 16:20:01,809 - INFO - Signatures found in model: [serving_default].\n",
- "2021-02-22 16:20:01,809 - WARNING - '--signature_def' not specified, using first signature: serving_default\n",
- "2021-02-22 16:20:01,810 - INFO - Output names: ['predictions']\n",
- "WARNING:tensorflow:From /home/ipython/.local/lib/python3.7/site-packages/tf2onnx/tf_loader.py:558: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
- "Instructions for updating:\n",
- "Use `tf.compat.v1.graph_util.extract_sub_graph`\n",
- "2021-02-22 16:20:03,659 - WARNING - From /home/ipython/.local/lib/python3.7/site-packages/tf2onnx/tf_loader.py:558: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
- "Instructions for updating:\n",
- "Use `tf.compat.v1.graph_util.extract_sub_graph`\n",
- "2021-02-22 16:20:04,540 - INFO - Using tensorflow=2.4.1, onnx=1.8.1, tf2onnx=1.9.0/ed89ef\n",
- "2021-02-22 16:20:04,540 - INFO - Using opset \n",
- "2021-02-22 16:20:07,401 - INFO - Computed 0 values for constant folding\n",
- "2021-02-22 16:20:13,830 - INFO - Optimizing ONNX model\n",
- "2021-02-22 16:20:15,152 - INFO - After optimization: Add -1 (18->17), BatchNormalization -53 (53->0), Cast -1 (1->0), Concat +1 (0->1), Const -162 (271->109), Identity -57 (57->0), Reshape -1 (1->0), Split +1 (0->1), Transpose -213 (215->2)\n",
- "2021-02-22 16:20:15,259 - INFO - \n",
- "2021-02-22 16:20:15,259 - INFO - Successfully converted TensorFlow model /tmp/resnet50 to ONNX\n",
- "2021-02-22 16:20:15,259 - INFO - Model inputs: ['input_1:0']\n",
- "2021-02-22 16:20:15,259 - INFO - Model outputs: ['predictions']\n",
- "2021-02-22 16:20:15,259 - INFO - ONNX model is saved at /tmp/resnet50.onnx\n"
- ]
- }
- ],
- "source": [
- "!python -m tf2onnx.convert --opset 13 \\\n",
- " --saved-model {os.path.join(\"/tmp\", model.name)} \\\n",
- " --output {os.path.join(\"/tmp\", model.name + \".onnx\")}"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.3"
- },
- "varInspector": {
- "cols": {
- "lenName": 16,
- "lenType": 16,
- "lenVar": 40
- },
- "kernels_config": {
- "python": {
- "delete_cmd_postfix": "",
- "delete_cmd_prefix": "del ",
- "library": "var_list.py",
- "varRefreshCmd": "print(var_dic_list())"
- },
- "r": {
- "delete_cmd_postfix": ") ",
- "delete_cmd_prefix": "rm(",
- "library": "var_list.r",
- "varRefreshCmd": "cat(var_dic_list()) "
- }
- },
- "types_to_exclude": [
- "module",
- "function",
- "builtin_function_or_method",
- "instance",
- "_Feature"
- ],
- "window_display": false
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}