Skip to content

Commit 679f59c

Browse files
authored
Set default_batch_size to 'None' (#339)
* Set default_batch_size to 'None' * Set default_batch_size to 'None'
1 parent 060a71e commit 679f59c

File tree

15 files changed

+36
-37
lines changed

15 files changed

+36
-37
lines changed

.azure-pipelines/linux-CI-nightly.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ jobs:
5454
python -c "import svmutil"
5555
python -c "import onnxconverter_common"
5656
test '$(python.version)' != '2.7' && python -c "import onnxruntime"
57-
pytest tests --doctest-modules --junitxml=junit/test-results.xml
57+
pytest tests --ignore=tests/sparkml --doctest-modules --junitxml=junit/test-results.xml
5858
displayName: 'pytest - onnxmltools'
5959
6060
- task: PublishTestResults@2

.azure-pipelines/linux-conda-CI.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ jobs:
6161
python -c "import svmutil"
6262
python -c "import onnxconverter_common"
6363
test '$(python.version)' != '2.7' && python -c "import onnxruntime"
64-
pytest tests --doctest-modules --junitxml=junit/test-results.xml
64+
pytest tests --ignore=tests/sparkml --doctest-modules --junitxml=junit/test-results.xml
6565
displayName: 'pytest - onnxmltools'
6666
6767
- script: |

.azure-pipelines/win32-CI-nightly.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ jobs:
5858
call activate py$(python.version)
5959
set PYTHONPATH=libsvm\python;%PYTHONPATH%
6060
pip install -e .
61-
pytest tests --doctest-modules --junitxml=junit/test-results.xml
61+
pytest tests --ignore=tests/sparkml --doctest-modules --junitxml=junit/test-results.xml
6262
displayName: 'pytest - onnxmltools'
6363
6464
- task: PublishTestResults@2

.azure-pipelines/win32-conda-CI.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ jobs:
7171
call activate py$(python.version)
7272
set PYTHONPATH=libsvm\python;%PYTHONPATH%
7373
pip install -e .
74-
pytest tests --doctest-modules --junitxml=junit/test-results.xml
74+
pytest tests --ignore=tests/sparkml --doctest-modules --junitxml=junit/test-results.xml
7575
displayName: 'pytest - onnxmltools'
7676
7777
- task: PublishTestResults@2

onnxmltools/convert/coreml/_parse.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -448,8 +448,7 @@ def parse_coreml(model, initial_types=None, target_opset=None, custom_conversion
448448

449449
# Determine the batch size for parsing CoreML model's input and output features. Note that batch size is always
450450
# missing in all CoreML models.
451-
default_batch_size = 1 if model.WhichOneof('Type') not in \
452-
['neuralNetworkClassifier', 'neuralNetworkRegressor', 'neuralNetwork'] else 'None'
451+
default_batch_size = 'None'
453452

454453
# Topology is shared by both of CoreML and scikit-learn conversion frameworks, so we have a wrapper class,
455454
# CoremlModelContainer, to make sure our topology-related functions can seamlessly handle both of CoreML and

onnxmltools/convert/libsvm/_parse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def parse_libsvm(model, initial_types=None, target_opset=None,
5858

5959
# Declare a computational graph. It will become a representation of
6060
# the input scikit-learn model after parsing.
61-
topology = Topology(raw_model_container,
61+
topology = Topology(raw_model_container, default_batch_size='None',
6262
initial_types=initial_types,
6363
target_opset=target_opset,
6464
custom_conversion_functions=custom_conversion_functions,

onnxmltools/convert/lightgbm/_parse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def parse_lightgbm(model, initial_types=None, target_opset=None,
104104
custom_conversion_functions=None, custom_shape_calculators=None):
105105

106106
raw_model_container = LightGbmModelContainer(model)
107-
topology = Topology(raw_model_container,
107+
topology = Topology(raw_model_container, default_batch_size='None',
108108
initial_types=initial_types, target_opset=target_opset,
109109
custom_conversion_functions=custom_conversion_functions,
110110
custom_shape_calculators=custom_shape_calculators)

onnxmltools/convert/sparkml/_parse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def parse_sparkml(spark, model, initial_types=None, target_opset=None,
9696
raw_model_container = SparkmlModelContainer(model)
9797

9898
# Declare a computational graph. It will become a representation of the input spark-ml model after parsing.
99-
topology = Topology(raw_model_container,
99+
topology = Topology(raw_model_container, default_batch_size='None',
100100
initial_types=initial_types,
101101
target_opset=target_opset,
102102
custom_conversion_functions=custom_conversion_functions,

onnxmltools/convert/xgboost/_parse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def parse_xgboost(model, initial_types=None, target_opset=None,
7070
custom_conversion_functions=None, custom_shape_calculators=None):
7171

7272
raw_model_container = XGBoostModelContainer(model)
73-
topology = Topology(raw_model_container,
73+
topology = Topology(raw_model_container, default_batch_size='None',
7474
initial_types=initial_types, target_opset=target_opset,
7575
custom_conversion_functions=custom_conversion_functions,
7676
custom_shape_calculators=custom_shape_calculators)

onnxmltools/utils/tests_helper.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def dump_one_class_classification(model, suffix="", folder=None, allow_failure=N
224224
X = numpy.array(X, dtype=numpy.float32)
225225
y = [1, 1, 1]
226226
model.fit(X, y)
227-
model_onnx, prefix = convert_model(model, 'one_class', [('input', FloatTensorType([1, 2]))])
227+
model_onnx, prefix = convert_model(model, 'one_class', [('input', FloatTensorType(['None', 2]))])
228228
return dump_data_and_model(X, model, model_onnx, folder=folder, allow_failure=allow_failure,
229229
basename=prefix + "One" + model.__class__.__name__ + suffix)
230230

@@ -250,7 +250,7 @@ def dump_binary_classification(model, suffix="", folder=None, allow_failure=None
250250
X = numpy.array(X, dtype=numpy.float32)
251251
y = [0, 1, 0]
252252
model.fit(X, y)
253-
model_onnx, prefix = convert_model(model, 'tree-based binary classifier', [('input', FloatTensorType([1, 2]))])
253+
model_onnx, prefix = convert_model(model, 'tree-based binary classifier', [('input', FloatTensorType(['None', 2]))])
254254
dump_data_and_model(X, model, model_onnx, folder=folder, allow_failure=allow_failure,
255255
basename=prefix + "Bin" + model.__class__.__name__ + suffix,
256256
verbose=verbose)
@@ -275,7 +275,7 @@ def dump_multiple_classification(model, suffix="", folder=None, allow_failure=No
275275
X = numpy.array(X, dtype=numpy.float32)
276276
y = [0, 1, 2, 1, 1, 2]
277277
model.fit(X, y)
278-
model_onnx, prefix = convert_model(model, 'tree-based multi-output regressor', [('input', FloatTensorType([1, 2]))])
278+
model_onnx, prefix = convert_model(model, 'tree-based multi-output regressor', [('input', FloatTensorType(['None', 2]))])
279279
dump_data_and_model(X, model, model_onnx, folder=folder, allow_failure=allow_failure,
280280
basename=prefix + "Mcl" + model.__class__.__name__ + suffix)
281281

@@ -300,7 +300,7 @@ def dump_multiple_regression(model, suffix="", folder=None, allow_failure=None):
300300
X = numpy.array(X, dtype=numpy.float32)
301301
y = numpy.array([[100, 50], [100, 49], [100, 99]], dtype=numpy.float32)
302302
model.fit(X, y)
303-
model_onnx, prefix = convert_model(model, 'tree-based multi-output regressor', [('input', FloatTensorType([1, 2]))])
303+
model_onnx, prefix = convert_model(model, 'tree-based multi-output regressor', [('input', FloatTensorType(['None', 2]))])
304304
dump_data_and_model(X, model, model_onnx, folder=folder, allow_failure=allow_failure,
305305
basename=prefix + "MRg" + model.__class__.__name__ + suffix)
306306

@@ -326,7 +326,7 @@ def dump_single_regression(model, suffix="", folder=None, allow_failure=None):
326326
X = numpy.array(X, dtype=numpy.float32)
327327
y = numpy.array([100, -10, 50], dtype=numpy.float32)
328328
model.fit(X, y)
329-
model_onnx, prefix = convert_model(model, 'tree-based regressor', [('input', FloatTensorType([1, 2]))])
329+
model_onnx, prefix = convert_model(model, 'tree-based regressor', [('input', FloatTensorType(['None', 2]))])
330330
dump_data_and_model(X, model, model_onnx, folder=folder, allow_failure=allow_failure,
331331
basename=prefix + "Reg" + model.__class__.__name__ + suffix)
332332

0 commit comments

Comments
 (0)