Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions src/Baballonia.Desktop/Baballonia.Desktop.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,9 @@
<!-- Linux-specific -->
<PackageReference Include="Project-Babble.OpenCvSharp4.mini.runtime.ubuntu.22.04-x64" Version="4.11.0.1" Condition="$([MSBuild]::IsOSPlatform('Linux')) And
!($([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture.ToString().Contains('Arm')))" />

<PackageReference Include="Project-Babble.OpenCvSharp4.mini.runtime.ubuntu.22.04-arm64" Version="4.11.0.1" Condition="$([MSBuild]::IsOSPlatform('Linux')) And
$([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture.ToString().Contains('Arm'))" />
<PackageReference Include="Microsoft.ML.OnnxRuntime" Version="1.22.0" Condition="$([MSBuild]::IsOSPlatform('Linux'))" />
<PackageReference Include="Microsoft.ML.OnnxRuntime.Managed" Version="1.22.0" Condition="$([MSBuild]::IsOSPlatform('Linux'))" />
<PackageReference Include="SkiaSharp.NativeAssets.Linux" Version="3.119.0" Condition="$([MSBuild]::IsOSPlatform('Linux'))" />
<PackageReference Include="Xaml.Behaviors.Interactions.Draggable" Version="11.3.9.5" />
<ProjectReference Include="..\Baballonia.LibV4L2Capture\Baballonia.LibV4L2Capture.csproj" Condition="$([MSBuild]::IsOSPlatform('Linux'))" />
Expand Down
53 changes: 31 additions & 22 deletions src/Baballonia/Services/DefaultInferenceRunner.cs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Security.Cryptography;

namespace Baballonia.Services;

Expand All @@ -31,11 +32,12 @@ public void Setup(string modelPath, bool useGpu = true)
if (!File.Exists(modelPath))
throw new FileNotFoundException($"{modelPath} does not exist");

_logger = loggerFactory.CreateLogger(this.GetType().Name + "." + Path.GetFileName(modelPath));
var modelName = Path.GetFileName(modelPath);
_logger = loggerFactory.CreateLogger(GetType().Name + "." + modelName);

SessionOptions sessionOptions = SetupSessionOptions();
if (useGpu)
ConfigurePlatformSpecificGpu(sessionOptions, modelPath);
ConfigurePlatformSpecificGpu(sessionOptions, modelName);
else
sessionOptions.AppendExecutionProvider_CPU();

Expand All @@ -56,7 +58,7 @@ public void Setup(string modelPath, bool useGpu = true)
/// </summary>
private void InitializeModelMetadata()
{
_isOldEyeModel = _session.ModelMetadata.CustomMetadataMap.Count() == 0;
_isOldEyeModel = _session.ModelMetadata.CustomMetadataMap.Count == 0;

if (!_isOldEyeModel)
{
Expand Down Expand Up @@ -98,7 +100,7 @@ private void ConfigurePlatformSpecificGpu(SessionOptions sessionOptions, string
if (OperatingSystem.IsWindows())
{
// If DirectML is supported on the user's system, try using it first.
// This has support for both AMD and Nvidia GPUs, and uses less memory in my testing
// This has support for both AMD and Nvidia GPUs, and generally works OOB without any weird setup
try
{
sessionOptions.AppendExecutionProvider_DML();
Expand All @@ -107,13 +109,33 @@ private void ConfigurePlatformSpecificGpu(SessionOptions sessionOptions, string
}
catch (Exception)
{
_logger.LogWarning("Failed to create DML Execution Provider on Windows. Falling back to CUDA...");
_logger.LogInformation("Failed to create DML Execution Provider on Windows. Falling back to CUDA...");
}
}

if (OperatingSystem.IsLinux())
{
try
{
// MiGraphX is the successor to ROCm, and is available only on Linux,
// you'll need to install it per your distro's package manager instructions.
// Everytime we load a model we'll hitch, but *works*
// https://onnxruntime.ai/docs/execution-providers/MIGraphX-ExecutionProvider.html
// For some reason we can't load/save compiled graphs!?
sessionOptions.AppendExecutionProvider_MIGraphX();
_logger.LogInformation("Initialized ExecutionProvider: MIGraphX for {ModelName}", modelName);
return;
}
catch (Exception)
{
_logger.LogInformation("Failed to create MIGraphX Execution Provider.");
}
}

// If the user's system does not support DirectML (for whatever reason,
// it's shipped with Windows 10, version 1903(10.0; Build 18362)+
// Fallback on good ol' CUDA
// or MiGraphX, try CUDA. This requires manual setup, see:
// https://github.com/Project-Babble/Baballonia/pull/247
try
{
sessionOptions.AppendExecutionProvider_CUDA();
Expand All @@ -122,20 +144,7 @@ private void ConfigurePlatformSpecificGpu(SessionOptions sessionOptions, string
}
catch (Exception)
{
_logger.LogWarning("Failed to create CUDA Execution Provider.");
}

// And, if CUDA fails (or we have an AMD card)
// Try one more time with MiGraphX
try
{
sessionOptions.AppendExecutionProvider_MIGraphX();
_logger.LogInformation("Initialized ExecutionProvider: MIGraphX for {ModelName}", modelName);
return;
}
catch (Exception)
{
_logger.LogWarning("Failed to create MIGraphX Execution Provider.");
_logger.LogInformation("Failed to create CUDA Execution Provider.");
}

// Finally, try OpenVINO (for Intel CPUs/GPUs)
Expand All @@ -147,10 +156,10 @@ private void ConfigurePlatformSpecificGpu(SessionOptions sessionOptions, string
}
catch (Exception)
{
_logger.LogWarning("Failed to create OpenVINO Execution Provider.");
_logger.LogInformation("Failed to create OpenVINO Execution Provider.");
}

_logger.LogWarning("No GPU acceleration will be applied.");
_logger.LogInformation("No GPU acceleration will be applied.");
sessionOptions.AppendExecutionProvider_CPU();
}

Expand Down
Loading