Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions src/Microsoft.ML.Core/Data/IHostEnvironment.cs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,16 @@ internal interface IHostEnvironmentInternal : IHostEnvironment
/// The location for the temp files created by ML.NET
/// </summary>
string TempFilePath { get; set; }

/// <summary>
/// Allow falling back to run on CPU if couldn't run on GPU.
/// </summary>
bool FallbackToCpu { get; set; }

/// <summary>
/// GPU device ID to run execution on, <see langword="null" /> to run on CPU.
/// </summary>
int? GpuDeviceId { get; set; }
}

/// <summary>
Expand Down
4 changes: 4 additions & 0 deletions src/Microsoft.ML.Core/Environment/HostEnvironmentBase.cs
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,10 @@ public void RemoveListener(Action<IMessageSource, TMessage> listenerFunc)
public string TempFilePath { get; set; } = System.IO.Path.GetTempPath();
#pragma warning restore MSML_NoInstanceInitializers

public int? GpuDeviceId { get; set; }

public bool FallbackToCpu { get; set; }

protected readonly TEnv Root;
// This is non-null iff this environment was a fork of another. Disposing a fork
// doesn't free temp files. That is handled when the master is disposed.
Expand Down
18 changes: 18 additions & 0 deletions src/Microsoft.ML.Data/MLContext.cs
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,24 @@ public string TempFilePath
set { _env.TempFilePath = value; }
}

/// <summary>
/// Allow falling back to run on CPU if couldn't run on GPU.
/// </summary>
public bool FallbackToCpu
{
get => _env.FallbackToCpu;
set { _env.FallbackToCpu = value; }
}

/// <summary>
/// GPU device ID to run execution on, <see langword="null" /> to run on CPU.
/// </summary>
public int? GpuDeviceId
{
get => _env.GpuDeviceId;
set { _env.GpuDeviceId = value; }
}

/// <summary>
/// Create the ML context.
/// </summary>
Expand Down
86 changes: 67 additions & 19 deletions src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ public static class OnnxCatalog
/// <remarks>
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
/// The name/type of the produced output columns will match name/type of the ONNX model outputs.
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
/// <param name="catalog">The transform's catalog.</param>
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
Expand All @@ -33,11 +34,11 @@ public static class OnnxCatalog
/// ]]>
/// </format>
/// </example>
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
string modelFile,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), modelFile, gpuDeviceId, fallbackToCpu);
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, string modelFile, int? gpuDeviceId = null, bool fallbackToCpu = false)
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the input column.
Expand All @@ -48,6 +49,7 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// <remarks>
/// The name/type of input columns must exactly match name/type of the ONNX model inputs.
/// The name/type of the produced output columns will match name/type of the ONNX model outputs.
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
/// <param name="catalog">The transform's catalog.</param>
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
Expand All @@ -69,8 +71,10 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
IDictionary<string, int[]> shapeDictionary,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), modelFile, gpuDeviceId, fallbackToCpu,
shapeDictionary: shapeDictionary);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
Expand All @@ -83,6 +87,9 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
/// <remarks>
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
/// <example>
/// <format type="text/markdown">
/// <![CDATA[
Expand All @@ -96,19 +103,27 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
string modelFile,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), new[] { outputColumnName }, new[] { inputColumnName },
modelFile, gpuDeviceId, fallbackToCpu);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName }, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/> using the specified <see cref="OnnxOptions"/>.
/// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies,
/// and how to run it on a GPU.
/// </summary>
/// <remarks>
/// If the options.GpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
/// <param name="catalog">The transform's catalog.</param>
/// <param name="options">Options for the <see cref="OnnxScoringEstimator"/>.</param>
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, OnnxOptions options)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), options.OutputColumns, options.InputColumns, options.ModelFile,
options.GpuDeviceId, options.FallbackToCpu, options.ShapeDictionary, options.RecursionLimit, options.InterOpNumThreads, options.IntraOpNumThreads);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, options.GpuDeviceId, options.FallbackToCpu);
return new OnnxScoringEstimator(env, options.OutputColumns, options.InputColumns, options.ModelFile,
gpuDeviceIdToUse, fallbackToCpuToUse, options.ShapeDictionary, options.RecursionLimit, options.InterOpNumThreads, options.IntraOpNumThreads);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnName"/> column.
Expand All @@ -125,6 +140,9 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// </param>
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
/// <remarks>
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
/// <example>
/// <format type="text/markdown">
/// <![CDATA[
Expand All @@ -139,8 +157,11 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
IDictionary<string, int[]> shapeDictionary,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), new[] { outputColumnName }, new[] { inputColumnName },
modelFile, gpuDeviceId, fallbackToCpu, shapeDictionary: shapeDictionary);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName },
modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
Expand All @@ -153,14 +174,19 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// <param name="modelFile">The path of the file containing the ONNX model.</param>
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
/// <remarks>
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
string[] outputColumnNames,
string[] inputColumnNames,
string modelFile,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), outputColumnNames, inputColumnNames,
modelFile, gpuDeviceId, fallbackToCpu);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
Expand All @@ -177,15 +203,20 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// </param>
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
/// <remarks>
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
string[] outputColumnNames,
string[] inputColumnNames,
string modelFile,
IDictionary<string, int[]> shapeDictionary,
int? gpuDeviceId = null,
bool fallbackToCpu = false)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), outputColumnNames, inputColumnNames,
modelFile, gpuDeviceId, fallbackToCpu, shapeDictionary: shapeDictionary);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}

/// <summary>
/// Create a <see cref="OnnxScoringEstimator"/>, which applies a pre-trained Onnx model to the <paramref name="inputColumnNames"/> columns.
Expand All @@ -203,6 +234,9 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
/// <param name="gpuDeviceId">Optional GPU device ID to run execution on, <see langword="null" /> to run on CPU.</param>
/// <param name="fallbackToCpu">If GPU error, raise exception or fallback to CPU.</param>
/// <param name="recursionLimit">Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.</param>
/// <remarks>
/// If the gpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />.
/// </remarks>
public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
string[] outputColumnNames,
string[] inputColumnNames,
Expand All @@ -211,8 +245,10 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
int? gpuDeviceId = null,
bool fallbackToCpu = false,
int recursionLimit = 100)
=> new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), outputColumnNames, inputColumnNames,
modelFile, gpuDeviceId, fallbackToCpu, shapeDictionary: shapeDictionary, recursionLimit);
{
var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary, recursionLimit);
}

/// <summary>
/// Create <see cref="DnnImageFeaturizerEstimator"/>, which applies one of the pre-trained DNN models in
Expand All @@ -237,5 +273,17 @@ public static DnnImageFeaturizerEstimator DnnFeaturizeImage(this TransformsCatal
Func<DnnImageFeaturizerInput, EstimatorChain<ColumnCopyingTransformer>> modelFactory,
string inputColumnName = null)
=> new DnnImageFeaturizerEstimator(CatalogUtils.GetEnvironment(catalog), outputColumnName, modelFactory, inputColumnName);

private static (Runtime.IHostEnvironment, int?, bool) GetGpuDeviceId(TransformsCatalog catalog, int? gpuDeviceId, bool fallbackToCpu)
{
Runtime.IHostEnvironment env = CatalogUtils.GetEnvironment(catalog);
if (gpuDeviceId == null && env is Runtime.IHostEnvironmentInternal localEnvironment && localEnvironment.GpuDeviceId != null)
{
gpuDeviceId = localEnvironment.GpuDeviceId;
fallbackToCpu = localEnvironment.FallbackToCpu;
}

return (env, gpuDeviceId, fallbackToCpu);
}
}
}
7 changes: 7 additions & 0 deletions src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,13 @@ private static OnnxTransformer Create(IHostEnvironment env, ModelLoadContext ctx
RecursionLimit = recursionLimit
};

IHostEnvironmentInternal localEnvironment = env as IHostEnvironmentInternal;
Comment thread
tarekgh marked this conversation as resolved.
if (localEnvironment is not null)
{
options.GpuDeviceId = localEnvironment.GpuDeviceId;
options.FallbackToCpu = localEnvironment.FallbackToCpu;
}

return new OnnxTransformer(env, options, modelBytes);
}

Expand Down
2 changes: 2 additions & 0 deletions test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ private float[] GetSampleArrayData()

public OnnxTransformTests(ITestOutputHelper output) : base(output)
{
ML.GpuDeviceId = _gpuDeviceId;
ML.FallbackToCpu = _fallbackToCpu;
}

[OnnxTheory]
Expand Down
2 changes: 2 additions & 0 deletions test/Microsoft.ML.TestFramework/GlobalBase.cs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,9 @@ private static void AssertHandler(string msg, IExceptionContext ectx)
}
else
#endif
{
Assert.True(false, $"Assert failed: {msg}");
}
}

public static void AssertHandlerTest()
Expand Down