diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 3f040e54a..3e9c01a83 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -8,6 +8,7 @@ __Bug Fixes__: #1426 Sequential.eval() does not put model into eval mode
`torch.optim.lr_scheduler.LinearLR` `end_factor` default has been corrected, is now 1.0.
Update package version of SixLabors.ImageSharp to avoid security vulnerability
+Updated dll dependencies loading to avoid using hardcoded version strings
__API Changes__: diff --git a/src/TorchSharp/Torch.cs b/src/TorchSharp/Torch.cs index 728fa9ccd..aa930ec3b 100644 --- a/src/TorchSharp/Torch.cs +++ b/src/TorchSharp/Torch.cs @@ -54,6 +54,26 @@ public static partial class torch static bool nativeBackendCudaLoaded = false; public static string __version__ => libtorchPackageVersion; + public static string NormalizeNuGetVersion(string versionString) + { + if (string.IsNullOrWhiteSpace(versionString)) + throw new ArgumentException($"Invalid NuGet version: {versionString}. Version string is null, empty or only contains whitespaces"); + + string[] parts = versionString.Split('-', '+'); + string[] versionParts = parts[0].Split('.'); + + if (versionParts.Length < 2 || versionParts.Length > 4 || !versionParts.All(v => int.TryParse(v, out _))) + throw new ArgumentException($"Invalid NuGet version: {versionString}. Please check: https://learn.microsoft.com/en-us/nuget/concepts/package-versioning"); + + string normalizedVersion = versionParts[0] + "." + versionParts[1]; + if (versionParts.Length > 2) normalizedVersion += "." + versionParts[2]; + if (versionParts.Length > 3 && int.Parse(versionParts[3]) != 0) normalizedVersion += "." + versionParts[3]; + + if (parts.Length > 1) + normalizedVersion += "-" + parts[1]; + + return normalizedVersion; + } internal static bool TryLoadNativeLibraryFromFile(string path, StringBuilder trace) { bool ok; @@ -168,14 +188,14 @@ private static void LoadNativeBackend(bool useCudaBackend, out StringBuilder? tr if (torchsharpLoc!.Contains("torchsharp") && torchsharpLoc.Contains("lib") && Directory.Exists(packagesDir) && Directory.Exists(torchsharpHome)) { - var torchSharpVersion = Path.GetFileName(torchsharpHome); // really GetDirectoryName - + var torchSharpVersion = NormalizeNuGetVersion(Path.GetFileName(torchsharpHome)); + var normalizedLibtorchPackageVersion = NormalizeNuGetVersion(libtorchPackageVersion); if (useCudaBackend) { var consolidatedDir = Path.Combine(torchsharpLoc, $"cuda-{cudaVersion}"); trace.AppendLine($" Trying dynamic load for .NET/F# Interactive by consolidating native {cudaRootPackage}-* binaries to {consolidatedDir}..."); - var cudaOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, $"{cudaRootPackage}-*", libtorchPackageVersion, consolidatedDir, trace); + var cudaOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, $"{cudaRootPackage}-*", normalizedLibtorchPackageVersion, consolidatedDir, trace); if (cudaOk) { cudaOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, "torchsharp", torchSharpVersion, consolidatedDir, trace); if (cudaOk) { @@ -193,7 +213,7 @@ private static void LoadNativeBackend(bool useCudaBackend, out StringBuilder? tr trace.AppendLine($" Trying dynamic load for .NET/F# Interactive by consolidating native {cpuRootPackage}-* binaries to {consolidatedDir}..."); - var cpuOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, cpuRootPackage, libtorchPackageVersion, consolidatedDir, trace); + var cpuOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, cpuRootPackage, normalizedLibtorchPackageVersion, consolidatedDir, trace); if (cpuOk) { cpuOk = CopyNativeComponentsIntoSingleDirectory(packagesDir, "torchsharp", torchSharpVersion, consolidatedDir, trace); if (cpuOk) { diff --git a/test/TorchSharpTest/TestTorchSharp.cs b/test/TorchSharpTest/TestTorchSharp.cs index 88929cbfe..549b8f131 100644 --- a/test/TorchSharpTest/TestTorchSharp.cs +++ b/test/TorchSharpTest/TestTorchSharp.cs @@ -4,6 +4,8 @@ using System.Collections.Generic; using System.Linq.Expressions; using System.Reflection; +using System.Security.Cryptography; +using Tensorboard; using Xunit; using static TorchSharp.torch; @@ -450,6 +452,17 @@ public void AllowFP16ReductionCuBLAS() Assert.False(torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction); } + [Fact] + public void CheckVersionStrings() + { + Assert.Equal("2.5.1", torch.NormalizeNuGetVersion("2.5.1.0")); + Assert.Equal("0.105.0", torch.NormalizeNuGetVersion("0.105.0.0")); + Assert.Equal("0.1.0-alpha", torch.NormalizeNuGetVersion("0.1.0-alpha")); + Assert.Equal("0.1.0", torch.NormalizeNuGetVersion("0.1.0")); + Assert.Throws(() => NormalizeNuGetVersion("")); + Assert.Throws(() => NormalizeNuGetVersion("1.2.3.4.5")); + } + // Because some of the tests mess with global state, and are run in parallel, we need to // acquire a lock before testing setting the default RNG see. private static object _lock = new object();