Skip to content

Commit e794342

Browse files
Load onnx model from Stream working (dotnet#7254)
1 parent 1843bcd commit e794342

File tree

4 files changed

+421
-7
lines changed

4 files changed

+421
-7
lines changed

src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs

Lines changed: 222 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
using System;
66
using System.Collections.Generic;
7+
using System.IO;
78
using Microsoft.ML.Data;
89
using Microsoft.ML.Transforms;
910
using Microsoft.ML.Transforms.Onnx;
@@ -40,6 +41,34 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
4041
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
4142
}
4243

44+
/// <summary>
45+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
46+
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
47+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
48+
/// and how to run it on a GPU.
49+
/// </summary>
50+
/// <remarks>
51+
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
52+
/// The name/type of the produced output columns will match name/type of the ONNX model outputs.
53+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
54+
/// </remarks>
55+
/// <param name="catalog">The transform's catalog.</param>
56+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
57+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
58+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
59+
/// <example>
60+
/// <format type="text/markdown">
61+
/// <![CDATA[
62+
/// [!code-csharp[ApplyOnnxModel](~/../docs/samples/docs/samples/Microsoft.ML.Samples/Dynamic/Transforms/ApplyOnnxModel.cs)]
63+
/// ]]>
64+
/// </format>
65+
/// </example>
66+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, Stream modelBytes, int? gpuDeviceId = null, bool fallbackToCpu = false)
67+
{
68+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
69+
return new OnnxScoringEstimator(env, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
70+
}
71+
4372
/// <summary>
4473
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
4574
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
@@ -76,6 +105,42 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
76105
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
77106
}
78107

108+
/// <summary>
109+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
110+
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
111+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
112+
/// and how to run it on a GPU.
113+
/// </summary>
114+
/// <remarks>
115+
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
116+
/// The name/type of the produced output columns will match name/type of the ONNX model outputs.
117+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
118+
/// </remarks>
119+
/// <param name="catalog">The transform's catalog.</param>
120+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
121+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelBytes"/>.
122+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
123+
/// is particularly useful for working with variable dimension inputs and outputs.
124+
/// </param>
125+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
126+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
127+
/// <example>
128+
/// <format type="text/markdown">
129+
/// <![CDATA[
130+
/// [!code-csharp[ApplyOnnxModel](~/../docs/samples/docs/samples/Microsoft.ML.Samples/Dynamic/Transforms/ApplyOnnxModel.cs)]
131+
/// ]]>
132+
/// </format>
133+
/// </example>
134+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
135+
Stream modelBytes,
136+
IDictionary<string, int[]> shapeDictionary,
137+
int? gpuDeviceId = null,
138+
bool fallbackToCpu = false)
139+
{
140+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
141+
return new OnnxScoringEstimator(env, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
142+
}
143+
79144
/// <summary>
80145
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
81146
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
@@ -108,6 +173,38 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
108173
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName }, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
109174
}
110175

176+
/// <summary>
177+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
178+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
179+
/// and how to run it on a GPU.
180+
/// </summary>
181+
/// <param name="catalog">The transform's catalog.</param>
182+
/// <param name="outputColumnName">The output column resulting from the transformation.</param>
183+
/// <param name="inputColumnName">The input column.</param>
184+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
185+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
186+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
187+
/// <remarks>
188+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
189+
/// </remarks>
190+
/// <example>
191+
/// <format type="text/markdown">
192+
/// <![CDATA[
193+
/// [!code-csharp[ApplyOnnxModel](~/../docs/samples/docs/samples/Microsoft.ML.Samples/Dynamic/Transforms/ApplyONNXModelWithInMemoryImages.cs)]
194+
/// ]]>
195+
/// </format>
196+
/// </example>
197+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
198+
string outputColumnName,
199+
string inputColumnName,
200+
Stream modelBytes,
201+
int? gpuDeviceId = null,
202+
bool fallbackToCpu = false)
203+
{
204+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
205+
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName }, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
206+
}
207+
111208
/// <summary>
112209
/// Create a <see cref="OnnxScoringEstimator"/> using the specified <see cref="OnnxOptions"/>.
113210
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
@@ -163,6 +260,44 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
163260
modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
164261
}
165262

263+
/// <summary>
264+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
265+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
266+
/// and how to run it on a GPU.
267+
/// </summary>
268+
/// <param name="catalog">The transform's catalog.</param>
269+
/// <param name="outputColumnName">The output column resulting from the transformation.</param>
270+
/// <param name="inputColumnName">The input column.</param>
271+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
272+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelBytes"/>.
273+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
274+
/// is particularly useful for working with variable dimension inputs and outputs.
275+
/// </param>
276+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
277+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
278+
/// <remarks>
279+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
280+
/// </remarks>
281+
/// <example>
282+
/// <format type="text/markdown">
283+
/// <![CDATA[
284+
/// [!code-csharp[ApplyOnnxModel](~/../docs/samples/docs/samples/Microsoft.ML.Samples/Dynamic/Transforms/ApplyONNXModelWithInMemoryImages.cs)]
285+
/// ]]>
286+
/// </format>
287+
/// </example>
288+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
289+
string outputColumnName,
290+
string inputColumnName,
291+
Stream modelBytes,
292+
IDictionary<string, int[]> shapeDictionary,
293+
int? gpuDeviceId = null,
294+
bool fallbackToCpu = false)
295+
{
296+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
297+
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName },
298+
modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
299+
}
300+
166301
/// <summary>
167302
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
168303
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
@@ -188,6 +323,31 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
188323
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
189324
}
190325

326+
/// <summary>
327+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
328+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
329+
/// and how to run it on a GPU.
330+
/// </summary>
331+
/// <param name="catalog">The transform's catalog.</param>
332+
/// <param name="outputColumnNames">The output columns resulting from the transformation.</param>
333+
/// <param name="inputColumnNames">The input columns.</param>
334+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
335+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
336+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
337+
/// <remarks>
338+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
339+
/// </remarks>
340+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
341+
string[] outputColumnNames,
342+
string[] inputColumnNames,
343+
Stream modelBytes,
344+
int? gpuDeviceId = null,
345+
bool fallbackToCpu = false)
346+
{
347+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
348+
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
349+
}
350+
191351
/// <summary>
192352
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
193353
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
@@ -218,6 +378,36 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
218378
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
219379
}
220380

381+
/// <summary>
382+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
383+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
384+
/// and how to run it on a GPU.
385+
/// </summary>
386+
/// <param name="catalog">The transform's catalog.</param>
387+
/// <param name="outputColumnNames">The output columns resulting from the transformation.</param>
388+
/// <param name="inputColumnNames">The input columns.</param>
389+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
390+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelBytes"/>.
391+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
392+
/// is particularly useful for working with variable dimension inputs and outputs.
393+
/// </param>
394+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
395+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
396+
/// <remarks>
397+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
398+
/// </remarks>
399+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
400+
string[] outputColumnNames,
401+
string[] inputColumnNames,
402+
Stream modelBytes,
403+
IDictionary<string, int[]> shapeDictionary,
404+
int? gpuDeviceId = null,
405+
bool fallbackToCpu = false)
406+
{
407+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
408+
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
409+
}
410+
221411
/// <summary>
222412
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
223413
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
@@ -250,6 +440,38 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
250440
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary, recursionLimit);
251441
}
252442

443+
/// <summary>
444+
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
445+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
446+
/// and how to run it on a GPU.
447+
/// </summary>
448+
/// <param name="catalog">The transform's catalog.</param>
449+
/// <param name="outputColumnNames">The output columns resulting from the transformation.</param>
450+
/// <param name="inputColumnNames">The input columns.</param>
451+
/// <param name="modelBytes">The <see cref="System.IO.Stream"/> containing the model bytes.</param>
452+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelBytes"/>.
453+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
454+
/// is particularly useful for working with variable dimension inputs and outputs.
455+
/// </param>
456+
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
457+
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
458+
/// <param name="recursionLimit">Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.</param>
459+
/// <remarks>
460+
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
461+
/// </remarks>
462+
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
463+
string[] outputColumnNames,
464+
string[] inputColumnNames,
465+
Stream modelBytes,
466+
IDictionary<string, int[]> shapeDictionary,
467+
int? gpuDeviceId = null,
468+
bool fallbackToCpu = false,
469+
int recursionLimit = 100)
470+
{
471+
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
472+
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary, recursionLimit);
473+
}
474+
253475
/// <summary>
254476
/// Create <see cref="DnnImageFeaturizerEstimator"/>, which applies one of the pre-trained DNN models in
255477
/// <see cref="DnnImageModelSelector"/> to featurize an image.

0 commit comments

Comments
 (0)