Skip to content

Commit

Permalink
Merge pull request #443 from smoothdeveloper/minor-torchsharp-update
Browse files Browse the repository at this point in the history
updating to latest TorchSharp
  • Loading branch information
dsyme committed Oct 3, 2023
2 parents 1bfe5b6 + 8926222 commit afee7e9
Show file tree
Hide file tree
Showing 10 changed files with 35 additions and 37 deletions.
7 changes: 2 additions & 5 deletions DEVGUIDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,15 +78,12 @@ By default in-branch testing is only done on CPU. To enable on GPU/CUDA you mus

1. Make sure you have a device eligible for CUDA 11.1 and all device drivers installed (e.g. install the appropriate NVIDIA CUDA SDK)

2. Manually enable Torch CUDA binaries in `DiffSharp.Tests.fsproj` or set the `DIFFSHARP_TESTGPU` environment variable to `true` (e.g. `dotnet test /p:DIFFSHARP_TESTGPU=true`)

3. Verify that `dsharp.isCudaEnabled()` is returning true and GPU testing is enabled in `TestUtil.fs`.

2. Use `dotnet test /p:DIFFSHARP_TESTGPU=true`

3. Verify that `dsharp.isCudaEnabled(Device.GPU)` is returning true and GPU testing is enabled in `TestUtil.fs`.

## Micro Performance Benchmarking


Python numbers must be collected in a separate run, they are currently injected back into source code (ugh)
to get figures in one report. There are better ways to do this.

Expand Down
10 changes: 2 additions & 8 deletions Directory.Build.props
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@

<PropertyGroup>

<TorchSharpVersion>0.96.8</TorchSharpVersion>
<TorchSharpVersion>0.100.3</TorchSharpVersion>
<FSharpCoreVersion>6.0.3</FSharpCoreVersion>

<!-- Standard nuget.org location -->
<RestoreSources>https://api.nuget.org/v3/index.json</RestoreSources>
<DIFFSHARP_TESTGPU Condition="'$(COMPUTERNAME)' == 'MSRC-3617253'">true</DIFFSHARP_TESTGPU>
<DIFFSHARP_TESTGPU Condition="'$(COMPUTERNAME)' == 'DESKTOP-RSKK5GT'">true</DIFFSHARP_TESTGPU>

<!-- TorchSharp local build packages -->
<RestoreSources Condition="Exists('$(MSBuildThisFileDirectory)../TorchSharp/bin/packages/Release')">
Expand All @@ -19,12 +19,6 @@
$(RestoreSources);$(MSBuildThisFileDirectory)../TorchSharp/bin/packages/Debug;
</RestoreSources>

<!-- TorchSharp CI build packages
<RestoreSources>
$(RestoreSources);
https://donsyme.pkgs.visualstudio.com/TorchSharp/_packaging/packages2/nuget/v3/index.json
</RestoreSources>
-->
<!-- turn on unused variable warnings -->
<OtherFlags>--warnon:1182 $(OtherFlags)</OtherFlags>
<!-- turn on documentation warnings -->
Expand Down
17 changes: 9 additions & 8 deletions src/DiffSharp.Backends.Torch/Torch.RawTensor.fs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ open DiffSharp.Backends
open DiffSharp.Util
open TorchSharp

type torch_cuda = torch.cuda
type TorchShape = int64[]
type TorchDevice = Torch.Device
type Device = DiffSharp.Device
Expand Down Expand Up @@ -293,7 +294,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
override t.SplitT(sizes, dim) =
let shape = t.Shape
let outShapes = Shape.checkCanSplit shape sizes dim
let results = tt.split(int64s sizes, dim)
let results = tt.split(int64s sizes, int64 dim)
(results, outShapes) ||> Array.map2 (fun rvalues outShape ->
t.MakeLike(rvalues, shape=outShape))

Expand Down Expand Up @@ -477,7 +478,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
t1.MakeLike(result, dtype=Dtype.Bool)

override t.MaxReduceT(dim, keepDim) =
let (struct (maxValues, indexes)) = tt.max(int64 dim, keepDim=keepDim)
let (struct (maxValues, indexes)) = tt.max(int64 dim, keepdim=keepDim)
let newShape = Shape.checkCanMinMaxReduce dim keepDim t.Shape
let maxValuesResult = t.MakeLike(maxValues, shape=newShape)
let indexesResult = t.MakeLike(indexes, shape=newShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
Expand Down Expand Up @@ -515,7 +516,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
res |> Array.map int32

override t.MinReduceT(dim, keepDim) =
let (struct (minValues, indexes)) = tt.min(int64 dim, keepDim=keepDim)
let (struct (minValues, indexes)) = tt.min(int64 dim, keepdim=keepDim)
let newShape = Shape.checkCanMinMaxReduce dim keepDim t.Shape
let minValuesResult = t.MakeLike(minValues, shape=newShape)
let indexesResult = t.MakeLike(indexes, shape=newShape, dtype=Dtype.Int64).Cast(Dtype.Int32)
Expand Down Expand Up @@ -1440,8 +1441,8 @@ type TorchBackendTensorStatics() =

match deviceType with
| None | Some DiffSharp.DeviceType.CUDA ->
if torch.cuda.is_available() then
let ncuda = torch.cuda.device_count()
if torch_cuda.is_available() then
let ncuda = torch_cuda.device_count()
for i in 0 .. ncuda - 1 do
yield (DiffSharp.Device(DiffSharp.DeviceType.CUDA, i))
| _ -> ()
Expand All @@ -1453,12 +1454,12 @@ type TorchBackendTensorStatics() =
override _.IsDeviceTypeAvailable (deviceType) =
match deviceType with
| DiffSharp.DeviceType.CPU -> true
| DiffSharp.DeviceType.CUDA -> torch.cuda.is_available()
| DiffSharp.DeviceType.CUDA -> torch_cuda.is_available()
| _ -> isSupported deviceType

override _.Seed(seed) =
// TODO (important): we need to do *both* this Torch.SetSeed and CUDA SetSeed when device is GPU. CPU seed and CUDA seed are handled separately in torch and libtorch.
// However at the point of writing this comment, Cuda SetSeed was not available in TorchSharp
if torch_cuda.is_available() then
torch_cuda.manual_seed(int64 seed) |> ignore
torch.random.manual_seed(int64 seed) |> ignore

override _.Zero(dtype, device) =
Expand Down
7 changes: 3 additions & 4 deletions src/DiffSharp.Core/Op.BMM.fs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,9 @@ namespace DiffSharp
[<AutoOpen>]
module OpBMMExtensions =

/// <summary>Batched matrix product of two tensors. Tensors <paramref name="a" /> and <paramref name="b" /> must be 3d tensors each containing the same number of matrices. If <paramref name="a" /> is a \(b \times n \times m\) tensor, <paramref name="b" /> is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.</summary>
/// <param name="a">The first tensor.</param>
/// <param name="b">The second tensor.</param>
type Tensor with
/// <summary>Batched matrix product of two tensors. Tensors <paramref name="b" /> must be 3d tensors each containing the same number of matrices. If the tensor is a \(b \times n \times m\) tensor, and <paramref name="b" /> is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.</summary>
/// <param name="b">The second tensor.</param>
member a.bmm(b:Tensor) =
Shape.checkCanBMM a.shape b.shape |> ignore
Tensor.Op
Expand All @@ -25,7 +24,7 @@ module OpBMMExtensions =
(a,b)

type dsharp with
/// <summary>Batched matrix product of two tensors.</summary>
/// <summary>Batched matrix product of two tensors. Tensors <paramref name="a" /> and <paramref name="b" /> must be 3d tensors each containing the same number of matrices. If <paramref name="a" /> is a \(b \times n \times m\) tensor, <paramref name="b" /> is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.</summary>
/// <param name="a">The first tensor.</param>
/// <param name="b">The second tensor.</param>
static member bmm(a:Tensor, b:Tensor) = a.bmm(b)
5 changes: 5 additions & 0 deletions src/DiffSharp.Core/Op.Outer.fs
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,16 @@ namespace DiffSharp
module OpOuterExtensions =

type Tensor with
/// <summary>Outer product of two tensors.</summary>
/// <param name="b">The second tensor.</param>
member a.outer(b:Tensor) =
match a.dim, b.dim with
| 1, 1 -> a.unsqueeze(1).matmul(b.unsqueeze(0))
| 2, 2 when a.shape[0] = b.shape[0] -> a.unsqueeze(2).bmm(b.unsqueeze(1)) // Batched outer product
| _ -> failwithf "Outer product unsupported for tensor shapes %A %A" a.shape b.shape

type dsharp with
/// <summary>Outer product of two tensors.</summary>
/// <param name="a">The first tensor.</param>
/// <param name="b">The second tensor.</param>
static member outer(a:Tensor, b:Tensor) = a.outer(b)
1 change: 1 addition & 0 deletions tests/DiffSharp.Tests/TestDiffSharp.fs
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ type TestDiffSharp () =
[<Test>]
member this.TestSeed () =
for combo in Combos.FloatingPointExcept16s do
printfn "%A" (combo.device, combo.backend, combo.dtype)
use _holder = dsharp.useConfig(combo.dtype, combo.device, combo.backend)
dsharp.seed(123)
let t = combo.randint(0,10,[25])
Expand Down
1 change: 1 addition & 0 deletions tests/DiffSharp.Tests/TestDistributions.fs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ type TestDistributions () =
[<Test>]
member _.TestDistributionsUniformBatched () =
for combo in Combos.AllDevicesAndBackendsFloat32 do
printfn "%A" (combo.device, combo.backend, combo.dtype)
let lowCorrect = combo.tensor([0.5, 0., -5.])
let highCorrect = combo.tensor([10.5, 1., 5.])
let rangeCorrect = highCorrect - lowCorrect
Expand Down
2 changes: 1 addition & 1 deletion tests/DiffSharp.Tests/TestOp.BMM.fs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ open DiffSharp
type TestTensorBMM () =
[<Test>]
member _.TestTensorBMM () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[[-1.0372e+00, 7.5673e-01, 1.9448e+00, 3.6433e+00, -3.9134e-01],
[-1.7011e+00, 3.0675e+00, 1.8387e+00, -2.3037e-01, 5.0916e-01],
[ 2.1869e+00, 1.5561e+00, 1.2905e+00, -3.5149e-03, -2.0392e+00],
Expand Down
2 changes: 1 addition & 1 deletion tests/DiffSharp.Tests/TestOp.Outer.fs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ open DiffSharp
type TestTensorOuter () =
[<Test>]
member _.TestTensorOuter () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let a1 = combo.tensor([ 1.7865, 1.2723, 0.2065, -0.4601, 0.3218])
let b1 = combo.tensor([ 2.1136, 1.0551, -0.4575])

Expand Down
20 changes: 10 additions & 10 deletions tests/DiffSharp.Tests/TestTensor.fs
Original file line number Diff line number Diff line change
Expand Up @@ -2458,7 +2458,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul11 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([8.0766; 3.3030; 2.1732; 8.9448; 1.1028])
let t2 = combo.tensor([5.1067; 7.4633; 3.6027; 9.0070; 7.3012])
let t3 = t1.matmul(t2)
Expand All @@ -2468,7 +2468,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul12 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([8.0766; 3.3030; 2.1732; 8.9448; 1.1028])
let t2 = combo.tensor([[5.1067; 0.0681];
[7.4633; 3.6027];
Expand All @@ -2482,7 +2482,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul13 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
// 5 --> 1x5 --> 3x1x5 (batching expansion)
let t1 = combo.tensor([8.0766; 3.3030; 2.1732; 8.9448; 1.1028])

Expand Down Expand Up @@ -2510,7 +2510,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul21 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[5.1067; 7.4633; 3.6027; 9.0070; 7.3012]])
let t2 = combo.tensor([0.0681; 3.6027; 7.3012; 2.8728; 2.3695])
Expand All @@ -2521,7 +2521,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul31 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
//2 x 2 x 5
let t1 = combo.tensor([[[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[5.1067; 7.4633; 3.6027; 9.0070; 7.3012]];
Expand All @@ -2539,7 +2539,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul33 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[4.1215; 4.9130; 5.2462; 4.2981; 9.3622];
[7.4682; 5.2166; 5.1184; 1.9626; 0.7562]])
Expand All @@ -2559,7 +2559,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMul44 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[4.1215; 4.9130; 5.2462; 4.2981; 9.3622];
[7.4682; 5.2166; 5.1184; 1.9626; 0.7562]])
Expand All @@ -2579,7 +2579,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMulBroadcast1 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[4.1215; 4.9130; 5.2462; 4.2981; 9.3622];
[7.4682; 5.2166; 5.1184; 1.9626; 0.7562]])
Expand All @@ -2599,7 +2599,7 @@ type TestTensor () =

[<Test>]
member _.TestTensorMatMulBroadcast2 () =
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t1 = combo.tensor([[8.0766; 3.3030; 2.1732; 8.9448; 1.1028];
[4.1215; 4.9130; 5.2462; 4.2981; 9.3622];
[7.4682; 5.2166; 5.1184; 1.9626; 0.7562]])
Expand Down Expand Up @@ -3057,7 +3057,7 @@ type TestTensor () =
np.cov(a,fweights=fweights)
np.cov(a,aweights=aweights)
*)
for combo in Combos.FloatingPoint do
for combo in Combos.FloatingPointExcept16s do
let t = combo.tensor([[0.3787;0.7515;0.2252;0.3416];
[0.6078;0.4742;0.7844;0.0967];
[0.1416;0.1559;0.6452;0.1417]])
Expand Down

0 comments on commit afee7e9

Please sign in to comment.