Skip to content

Commit 5f2b51e

Browse files
ooplesclaude
andcommitted
fix: correct onnx field numbers and address pr review comments
CRITICAL: Fix ONNX TensorProto field number compliance: - OnnxProto.cs: Change field 3 → 8 for tensor name per ONNX spec - OnnxToCoreMLConverter.cs: Fix all TensorProto fields (1=dims, 2=data_type, 8=name, 9=raw_data) - Previous incorrect field numbers would cause empty tensor names and broken shape inference Additional fixes: - CoreMLExporter.cs: Fix QuantizationBits mapping (Int8→8, Float16→16, default→32) - TensorRTConfiguration.cs: Use ArgumentException instead of ArgumentNullException for whitespace validation - ModelExporterBase.cs: Remove redundant null check (IsNullOrWhiteSpace handles null) Addresses PR #486 review comments #1, #2, #4, #5, #6 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <[email protected]>
1 parent 4365614 commit 5f2b51e

File tree

5 files changed

+16
-12
lines changed

5 files changed

+16
-12
lines changed

src/Deployment/Export/ModelExporterBase.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ public virtual void Export(IFullModel<T, TInput, TOutput> model, string outputPa
3838
// Ensure output directory exists
3939
// Path.GetDirectoryName can return null for root paths or relative filenames
4040
var directory = Path.GetDirectoryName(outputPath);
41-
if (directory is not null && !string.IsNullOrWhiteSpace(directory))
41+
if (!string.IsNullOrWhiteSpace(directory))
4242
{
4343
if (!Directory.Exists(directory))
4444
{

src/Deployment/Export/Onnx/OnnxProto.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,8 +353,8 @@ private static byte[] CreateTensorProto<T>(string name, Vector<T> data)
353353
writer.WriteTag(2, WireFormat.WireType.Varint);
354354
writer.WriteInt32(GetDataTypeValue(typeof(T)));
355355

356-
// Field 3: name
357-
writer.WriteTag(3, WireFormat.WireType.LengthDelimited);
356+
// Field 8: name (per ONNX TensorProto specification)
357+
writer.WriteTag(8, WireFormat.WireType.LengthDelimited);
358358
writer.WriteString(name);
359359

360360
// Field 9: raw_data

src/Deployment/Mobile/CoreML/CoreMLExporter.cs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,12 @@ private byte[] ConvertOnnxToCoreML(byte[] onnxBytes, ExportConfiguration config)
8484
ModelName = config.ModelName,
8585
ModelDescription = config.ModelDescription,
8686
OptimizeForSize = true,
87-
QuantizationBits = config.QuantizationMode == QuantizationMode.Float16 ? 16 : 32
87+
QuantizationBits = config.QuantizationMode switch
88+
{
89+
QuantizationMode.Int8 => 8,
90+
QuantizationMode.Float16 => 16,
91+
_ => 32
92+
}
8893
};
8994
}
9095

src/Deployment/Mobile/CoreML/OnnxToCoreMLConverter.cs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -145,14 +145,14 @@ private static OnnxTensor ParseTensor(byte[] tensorBytes)
145145

146146
switch (fieldNumber)
147147
{
148-
case 3: // name
149-
name = reader.ReadString();
148+
case 1: // dims (repeated) - ONNX TensorProto field 1
149+
dims.Add(reader.ReadInt64());
150150
break;
151-
case 5: // data_type
151+
case 2: // data_type - ONNX TensorProto field 2
152152
dataType = reader.ReadInt32();
153153
break;
154-
case 7: // dims (repeated)
155-
dims.Add(reader.ReadInt64());
154+
case 8: // name - ONNX TensorProto field 8
155+
name = reader.ReadString();
156156
break;
157157
case 9: // raw_data
158158
var rawBytes = reader.ReadBytes().ToByteArray();

src/Deployment/TensorRT/TensorRTConfiguration.cs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,12 +157,11 @@ public static TensorRTConfiguration ForHighThroughput(int batchSize = 64, string
157157
/// Creates a configuration with INT8 quantization.
158158
/// </summary>
159159
/// <param name="calibrationDataPath">Path to calibration data file (required for INT8 quantization)</param>
160-
/// <exception cref="ArgumentNullException">Thrown when calibrationDataPath is null or whitespace</exception>
160+
/// <exception cref="ArgumentException">Thrown when calibrationDataPath is null or whitespace</exception>
161161
public static TensorRTConfiguration ForInt8(string calibrationDataPath)
162162
{
163163
if (string.IsNullOrWhiteSpace(calibrationDataPath))
164-
throw new ArgumentNullException(nameof(calibrationDataPath),
165-
"Calibration data path is required for INT8 quantization");
164+
throw new ArgumentException("Calibration data path cannot be null or whitespace", nameof(calibrationDataPath));
166165

167166
return new TensorRTConfiguration
168167
{

0 commit comments

Comments
 (0)