Skip to content

Commit 6415f7f

Browse files
Update OnnxTransformer Docs (#5296)
Update OnnxTransformer Docs
1 parent f87a3bb commit 6415f7f

File tree

2 files changed

+30
-8
lines changed

2 files changed

+30
-8
lines changed

src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ public static class OnnxCatalog
1515
/// <summary>
1616
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
1717
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
18+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
19+
/// and how to run it on a GPU.
1820
/// </summary>
1921
/// <remarks>
2022
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
@@ -40,14 +42,19 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
4042
/// <summary>
4143
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
4244
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
45+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
46+
/// and how to run it on a GPU.
4347
/// </summary>
4448
/// <remarks>
4549
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
4650
/// The name/type of the produced output columns will match name/type of the ONNX model outputs.
4751
/// </remarks>
4852
/// <param name="catalog">The transform's catalog.</param>
4953
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
50-
/// <param name="shapeDictionary">ONNX shape should be used to over those loaded from <paramref name="modelFile"/>.</param>
54+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelFile"/>.
55+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
56+
/// is particullarly useful for working with variable dimension inputs and outputs.
57+
/// </param>
5158
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
5259
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
5360
/// <example>
@@ -67,6 +74,8 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
6774

6875
/// <summary>
6976
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
77+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
78+
/// and how to run it on a GPU.
7079
/// </summary>
7180
/// <param name="catalog">The transform's catalog.</param>
7281
/// <param name="outputColumnName">The output column resulting from the transformation.</param>
@@ -92,12 +101,17 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
92101

93102
/// <summary>
94103
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
104+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
105+
/// and how to run it on a GPU.
95106
/// </summary>
96107
/// <param name="catalog">The transform's catalog.</param>
97108
/// <param name="outputColumnName">The output column resulting from the transformation.</param>
98109
/// <param name="inputColumnName">The input column.</param>
99110
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
100-
/// <param name="shapeDictionary">ONNX shape should be used to over those loaded from <paramref name="modelFile"/>.</param>
111+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelFile"/>.
112+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
113+
/// is particullarly useful for working with variable dimension inputs and outputs.
114+
/// </param>
101115
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
102116
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
103117
/// <example>
@@ -119,6 +133,8 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
119133

120134
/// <summary>
121135
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
136+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
137+
/// and how to run it on a GPU.
122138
/// </summary>
123139
/// <param name="catalog">The transform's catalog.</param>
124140
/// <param name="outputColumnNames">The output columns resulting from the transformation.</param>
@@ -137,12 +153,17 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
137153

138154
/// <summary>
139155
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
156+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
157+
/// and how to run it on a GPU.
140158
/// </summary>
141159
/// <param name="catalog">The transform's catalog.</param>
142160
/// <param name="outputColumnNames">The output columns resulting from the transformation.</param>
143161
/// <param name="inputColumnNames">The input columns.</param>
144162
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
145-
/// <param name="shapeDictionary">ONNX shape should be used to over those loaded from <paramref name="modelFile"/>.</param>
163+
/// <param name="shapeDictionary">ONNX shapes to be used over those loaded from <paramref name="modelFile"/>.
164+
/// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
165+
/// is particullarly useful for working with variable dimension inputs and outputs.
166+
/// </param>
146167
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
147168
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
148169
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,

src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ namespace Microsoft.ML.Transforms.Onnx
3636
{
3737
/// <summary>
3838
/// <see cref="ITransformer"/> resulting from fitting an <see cref="OnnxScoringEstimator"/>.
39+
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
40+
/// and how to run it on a GPU.
3941
/// </summary>
4042
public sealed class OnnxTransformer : RowToRowTransformerBase
4143
{
@@ -719,25 +721,24 @@ public NamedOnnxValue GetNamedOnnxValue()
719721
/// | Required NuGet in addition to Microsoft.ML | Microsoft.ML.OnnxTransformer (always), either Microsoft.ML.OnnxRuntime 1.3.0 (for CPU processing) or Microsoft.ML.OnnxRuntime.Gpu 1.3.0 (for GPU processing if GPU is available) |
720722
/// | Exportable to ONNX | No |
721723
///
724+
/// To create this estimator use the following APIs:
725+
/// [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*)
726+
///
722727
/// Supports inferencing of models in ONNX 1.6 format (opset 11), using the [Microsoft.ML.OnnxRuntime](https://www.nuget.org/packages/Microsoft.ML.OnnxRuntime/) library.
723728
/// Models are scored on CPU if the project references Microsoft.ML.OnnxRuntime and on the GPU if the project references Microsoft.ML.OnnxRuntime.Gpu.
724729
/// Every project using the OnnxScoringEstimator must reference one of the above two packages.
725730
///
726731
/// To run on a GPU, use the
727732
/// NuGet package [Microsoft.ML.OnnxRuntime.Gpu](https://www.nuget.org/packages/Microsoft.ML.OnnxRuntime.Gpu/) instead of the Microsoft.ML.OnnxRuntime nuget (which is for CPU processing). Microsoft.ML.OnnxRuntime.Gpu
728733
/// requires a [CUDA supported GPU](https://developer.nvidia.com/cuda-gpus#compute), the [CUDA 10.1 Toolkit](https://developer.nvidia.com/cuda-downloads), and [cuDNN 7.6.5](https://developer.nvidia.com/cudnn) (as indicated on [Onnxruntime's documentation](https://github.com/Microsoft/onnxruntime#default-gpu-cuda)).
729-
/// Set parameter 'gpuDeviceId' to a valid non-negative integer. Typical device ID values are 0 or 1.
734+
/// When creating the estimator through [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*), set the parameter 'gpuDeviceId' to a valid non-negative integer. Typical device ID values are 0 or 1. If the GPU device isn't found but 'fallbackToCpu = true' then the estimator will run on the CPU. If the GPU device isn't found but 'fallbackToCpu = false' then the estimator will throw an exception
730735
///
731736
/// The inputs and outputs of the ONNX models must be Tensor type. Sequence and Maps are not yet supported.
732737
///
733738
/// OnnxRuntime works on Windows, MacOS and Ubuntu 16.04 Linux 64-bit platforms.
734739
/// Visit [ONNX Models](https://github.com/onnx/models) to see a list of readily available models to get started with.
735740
/// Refer to [ONNX](http://onnx.ai) for more information.
736741
///
737-
/// To create this estimator use the following:
738-
/// [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*)
739-
///
740-
/// Check the See Also section for links to usage examples.
741742
/// ]]>
742743
/// </format>
743744
/// </remarks>

0 commit comments

Comments
 (0)