Skip to content

Commit 4873db6

Browse files
committed
Add VNN Operations.
1 parent 341433a commit 4873db6

15 files changed

+2297
-3
lines changed

README.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,30 @@ DeepScaleAndShiftOperation
199199

200200
FlattenOperation
201201

202+
### Vector Neural Network (VNN) Operations
203+
These types of operations typically operate on instances of the Matrix class where the left half are magnitudes and the right half are angles in radians.
204+
Learn more about Vector Neural Networks [here](https://www.amazon.com/Vector-Neural-Networks-Geometric-Tensors-ebook/dp/B0CXBV3DY5/ref=sr_1_1).
205+
206+
ElementwiseSquareOperation
207+
208+
ElementwiseVectorAddOperation
209+
210+
ElementwiseVectorCartesianSummationOperation
211+
212+
ElementwiseVectorConstituentMultiplyOperation
213+
214+
ElementwiseVectorDecompositionOperation
215+
216+
ElementwiseVectorMiniDecompositionOperation
217+
218+
PairwiseSineSoftmaxOperation
219+
220+
VectorAttentionBinaryOperation
221+
222+
VectorAttentionOperation
223+
224+
VectorizeOperation
225+
202226
### Neural Network Parameters
203227

204228
Each neural network base class has a set of parameters that can be used to configure the neural network. They are as follows:

examples/gravnet/ParallelReverseAutoDiff.GravNetExample/VectorNetwork/RMAD/VectorAttentionOperation.cs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,5 @@ public override BackwardResult Backward(Matrix dOutput)
9595
.AddInputGradient(dProbabilities)
9696
.Build();
9797
}
98-
9998
}
10099
}

src/ParallelReverseAutoDiff.nuspec

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
33
<metadata>
44
<id>ParallelReverseAutoDiff</id>
5-
<version>1.1.65</version>
5+
<version>1.2.0</version>
66
<authors>ameritusweb</authors>
77
<owners>ameritusweb</owners>
88
<license type="expression">LGPL-2.1-only</license>
@@ -11,7 +11,7 @@
1111
<requireLicenseAcceptance>false</requireLicenseAcceptance>
1212
<description>A library for parallelized reverse mode automatic differentiation in C# for custom neural network development.</description>
1313
<repository type="git" url="https://github.com/ameritusweb/ParallelReverseAutoDiff.git" commit="0a9bbd18f45c4f4434160a7c064539f29f3a3c67" />
14-
<releaseNotes>Fix GPU Matrix Multiply.</releaseNotes>
14+
<releaseNotes>Add VNN Operations.</releaseNotes>
1515
<copyright>ameritusweb, 2024</copyright>
1616
<tags>autodiff automatic-differentiation parallel reverse-mode differentiation C# neural network</tags>
1717
<dependencies>
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
//------------------------------------------------------------------------------
2+
// <copyright file="ElementwiseSquareOperation.cs" author="ameritusweb" date="5/2/2023">
3+
// Copyright (c) 2023 ameritusweb All rights reserved.
4+
// </copyright>
5+
//------------------------------------------------------------------------------
6+
namespace ParallelReverseAutoDiff.RMAD
7+
{
8+
using System;
9+
10+
/// <summary>
11+
/// Performs the forward and backward operations for the element-wise square function.
12+
/// </summary>
13+
public class ElementwiseSquareOperation : Operation
14+
{
15+
private Matrix input;
16+
17+
/// <summary>
18+
/// A common factory method for instantiating this operation.
19+
/// </summary>
20+
/// <param name="net">The neural network.</param>
21+
/// <returns>The instantiated operation.</returns>
22+
public static IOperation Instantiate(NeuralNetwork net)
23+
{
24+
return new ElementwiseSquareOperation();
25+
}
26+
27+
/// <inheritdoc />
28+
public override void Store(Guid id)
29+
{
30+
this.IntermediateMatrices.AddOrUpdate(id, this.input, (x, y) => this.input);
31+
}
32+
33+
/// <inheritdoc />
34+
public override void Restore(Guid id)
35+
{
36+
this.input = this.IntermediateMatrices[id];
37+
}
38+
39+
/// <summary>
40+
/// Performs the forward operation for the element-wise square function.
41+
/// </summary>
42+
/// <param name="input">The input to the element-wise square operation.</param>
43+
/// <returns>The output of the element-wise square operation.</returns>
44+
public Matrix Forward(Matrix input)
45+
{
46+
this.input = input;
47+
int rows = input.Length;
48+
int cols = input[0].Length;
49+
this.Output = new Matrix(rows, cols);
50+
51+
for (int i = 0; i < rows; i++)
52+
{
53+
for (int j = 0; j < cols; j++)
54+
{
55+
double x = input[i][j];
56+
this.Output[i][j] = x * x;
57+
}
58+
}
59+
60+
return this.Output;
61+
}
62+
63+
/// <inheritdoc />
64+
public override BackwardResult Backward(Matrix dLdOutput)
65+
{
66+
int rows = dLdOutput.Length;
67+
int cols = dLdOutput[0].Length;
68+
Matrix dLdInput = new Matrix(rows, cols);
69+
70+
for (int i = 0; i < rows; i++)
71+
{
72+
for (int j = 0; j < cols; j++)
73+
{
74+
double x = this.input[i][j];
75+
double gradient = 2 * x;
76+
dLdInput[i][j] = dLdOutput[i][j] * gradient;
77+
}
78+
}
79+
80+
return new BackwardResultBuilder()
81+
.AddInputGradient(dLdInput)
82+
.Build();
83+
}
84+
}
85+
}
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
//------------------------------------------------------------------------------
2+
// <copyright file="ElementwiseVectorAddOperation.cs" author="ameritusweb" date="5/2/2023">
3+
// Copyright (c) 2023 ameritusweb All rights reserved.
4+
// </copyright>
5+
//------------------------------------------------------------------------------
6+
namespace ParallelReverseAutoDiff.RMAD
7+
{
8+
using System;
9+
using System.Threading.Tasks;
10+
11+
/// <summary>
12+
/// Element-wise add operation.
13+
/// </summary>
14+
public class ElementwiseVectorAddOperation : Operation
15+
{
16+
private Matrix input1;
17+
private Matrix input2;
18+
19+
/// <summary>
20+
/// A common method for instantiating an operation.
21+
/// </summary>
22+
/// <param name="net">The neural network.</param>
23+
/// <returns>The instantiated operation.</returns>
24+
public static IOperation Instantiate(NeuralNetwork net)
25+
{
26+
return new ElementwiseVectorAddOperation();
27+
}
28+
29+
/// <summary>
30+
/// Performs the forward operation for the element-wise vector summation function.
31+
/// </summary>
32+
/// <param name="input1">The first input to the element-wise vector summation operation.</param>
33+
/// <param name="input2">The second input to the element-wise vector summation operation.</param>
34+
/// <returns>The output of the element-wise vector summation operation.</returns>
35+
public Matrix Forward(Matrix input1, Matrix input2)
36+
{
37+
this.input1 = input1;
38+
this.input2 = input2;
39+
40+
this.Output = new Matrix(this.input1.Rows, this.input1.Cols);
41+
Parallel.For(0, input1.Rows, i =>
42+
{
43+
for (int j = 0; j < input1.Cols / 2; j++)
44+
{
45+
// Accessing the magnitudes and angles from the concatenated matrices
46+
double magnitude = input1[i, j];
47+
double angle = input1[i, j + (input1.Cols / 2)];
48+
49+
double wMagnitude = input2[i, j];
50+
double wAngle = input2[i, j + (input2.Cols / 2)];
51+
52+
// Compute vector components
53+
double x1 = magnitude * Math.Cos(angle);
54+
double y1 = magnitude * Math.Sin(angle);
55+
double x2 = wMagnitude * Math.Cos(wAngle);
56+
double y2 = wMagnitude * Math.Sin(wAngle);
57+
58+
double sumx = x1 + x2;
59+
double sumy = y1 + y2;
60+
61+
// Compute resultant vector magnitude and angle
62+
double resultMagnitude = Math.Sqrt((sumx * sumx) + (sumy * sumy));
63+
double resultAngle = Math.Atan2(sumy, sumx);
64+
65+
this.Output[i, j] = resultMagnitude;
66+
this.Output[i, j + (this.input1.Cols / 2)] = resultAngle;
67+
}
68+
});
69+
70+
return this.Output;
71+
}
72+
73+
/// <inheritdoc />
74+
public override BackwardResult Backward(Matrix dOutput)
75+
{
76+
Matrix dInput1 = new Matrix(this.input1.Rows, this.input1.Cols);
77+
Matrix dInput2 = new Matrix(this.input2.Rows, this.input2.Cols);
78+
79+
Parallel.For(0, this.input1.Rows, i =>
80+
{
81+
for (int j = 0; j < this.input1.Cols / 2; j++)
82+
{
83+
var magnitude = this.input1[i, j];
84+
var angle = this.input1[i, j + (this.input1.Cols / 2)];
85+
var wMagnitude = this.input2[i, j];
86+
var wAngle = this.input2[i, j + (this.input2.Cols / 2)];
87+
88+
var x1 = magnitude * Math.Cos(angle);
89+
var y1 = magnitude * Math.Sin(angle);
90+
var x2 = wMagnitude * Math.Cos(wAngle);
91+
var y2 = wMagnitude * Math.Sin(wAngle);
92+
93+
var combinedX = x1 + x2;
94+
var combinedY = y1 + y2;
95+
96+
// Compute gradients for magnitude and angle
97+
double dResultMagnitude_dX = combinedX / this.Output[i, j];
98+
double dResultMagnitude_dY = combinedY / this.Output[i, j];
99+
100+
double dResultAngle_dX = -combinedY / ((combinedX * combinedX) + (combinedY * combinedY));
101+
double dResultAngle_dY = combinedX / ((combinedX * combinedX) + (combinedY * combinedY));
102+
103+
// Chain rule to compute gradients for input vectors
104+
dInput1[i, j] = (dOutput[i, j] * dResultMagnitude_dX * Math.Cos(angle)) +
105+
(dOutput[i, j + (this.input1.Cols / 2)] * dResultAngle_dX * -Math.Sin(angle));
106+
dInput1[i, j + (this.input1.Cols / 2)] = (dOutput[i, j] * dResultMagnitude_dY * Math.Sin(angle)) +
107+
(dOutput[i, j + (this.input1.Cols / 2)] * dResultAngle_dY * Math.Cos(angle));
108+
109+
dInput2[i, j] = (dOutput[i, j] * dResultMagnitude_dX * Math.Cos(wAngle)) +
110+
(dOutput[i, j + (this.input2.Cols / 2)] * dResultAngle_dX * -Math.Sin(wAngle));
111+
dInput2[i, j + (this.input2.Cols / 2)] = (dOutput[i, j] * dResultMagnitude_dY * Math.Sin(wAngle)) +
112+
(dOutput[i, j + (this.input2.Cols / 2)] * dResultAngle_dY * Math.Cos(wAngle));
113+
}
114+
});
115+
116+
return new BackwardResultBuilder()
117+
.AddInputGradient(dInput1)
118+
.AddInputGradient(dInput2)
119+
.Build();
120+
}
121+
}
122+
}

0 commit comments

Comments
 (0)