Skip to content

Commit edf43c0

Browse files
Fixed a couple of failing modules after the merge.
Renamed 'kernelSize' to 'kernel_size'
1 parent e5b1e82 commit edf43c0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+450
-260
lines changed

docfx/articles/modules.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ To illustrate, this is the code for MobileNet from the TorchSharp examples:
8484
var modules = new List<(string, Module)>();
8585

8686
modules.Add(("conv2d-first",
87-
Conv2d(3, 32, kernelSize: 3, stride: 1, padding: 1, bias: false)));
87+
Conv2d(3, 32, kernel_size: 3, stride: 1, padding: 1, bias: false)));
8888
modules.Add(("bnrm2d-first",
8989
BatchNorm2d(32)));
9090
modules.Add(("relu-first",
@@ -110,13 +110,13 @@ To illustrate, this is the code for MobileNet from the TorchSharp examples:
110110
var stride = strides[i];
111111

112112
modules.Add(($"conv2d-{i}a",
113-
Conv2d(in_planes, in_planes, kernelSize: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
113+
Conv2d(in_planes, in_planes, kernel_size: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
114114
modules.Add(($"bnrm2d-{i}a",
115115
BatchNorm2d(in_planes)));
116116
modules.Add(($"relu-{i}a",
117117
ReLU()));
118118
modules.Add(($"conv2d-{i}b",
119-
Conv2d(in_planes, out_planes, kernelSize: 1L, stride: 1L, padding: 0L, bias: false)));
119+
Conv2d(in_planes, out_planes, kernel_size: 1L, stride: 1L, padding: 0L, bias: false)));
120120
modules.Add(($"bnrm2d-{i}b",
121121
BatchNorm2d(out_planes)));
122122
modules.Add(($"relu-{i}b",

src/Examples/AlexNet.cs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,17 @@ class AlexNet : Module<Tensor, Tensor>
1717
public AlexNet(string name, int numClasses, torch.Device device = null) : base(name)
1818
{
1919
features = Sequential(
20-
("c1", Conv2d(3, 64, kernelSize: 3, stride: 2, padding: 1)),
20+
("c1", Conv2d(3, 64, kernel_size: 3, stride: 2, padding: 1)),
2121
("r1", ReLU(inplace: true)),
2222
("mp1", MaxPool2d(kernel_size: new long[] { 2, 2 })),
23-
("c2", Conv2d(64, 192, kernelSize: 3, padding: 1)),
23+
("c2", Conv2d(64, 192, kernel_size: 3, padding: 1)),
2424
("r2", ReLU(inplace: true)),
2525
("mp2", MaxPool2d(kernel_size: new long[] { 2, 2 })),
26-
("c3", Conv2d(192, 384, kernelSize: 3, padding: 1)),
26+
("c3", Conv2d(192, 384, kernel_size: 3, padding: 1)),
2727
("r3", ReLU(inplace: true)),
28-
("c4", Conv2d(384, 256, kernelSize: 3, padding: 1)),
28+
("c4", Conv2d(384, 256, kernel_size: 3, padding: 1)),
2929
("r4", ReLU(inplace: true)),
30-
("c5", Conv2d(256, 256, kernelSize: 3, padding: 1)),
30+
("c5", Conv2d(256, 256, kernel_size: 3, padding: 1)),
3131
("r5", ReLU(inplace: true)),
3232
("mp3", MaxPool2d(kernel_size: new long[] { 2, 2 })));
3333

src/Examples/MobileNet.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ public MobileNet(string name, int numClasses, Device device = null) : base(name)
3030

3131
var modules = new List<(string, Module<Tensor, Tensor>)>();
3232

33-
modules.Add(($"conv2d-first", Conv2d(3, 32, kernelSize: 3, stride: 1, padding: 1, bias: false)));
33+
modules.Add(($"conv2d-first", Conv2d(3, 32, kernel_size: 3, stride: 1, padding: 1, bias: false)));
3434
modules.Add(($"bnrm2d-first", BatchNorm2d(32)));
3535
modules.Add(($"relu-first", ReLU()));
3636
MakeLayers(modules, 32);
@@ -53,10 +53,10 @@ private void MakeLayers(List<(string, Module<Tensor, Tensor>)> modules, long in_
5353
var out_planes = planes[i];
5454
var stride = strides[i];
5555

56-
modules.Add(($"conv2d-{i}a", Conv2d(in_planes, in_planes, kernelSize: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
56+
modules.Add(($"conv2d-{i}a", Conv2d(in_planes, in_planes, kernel_size: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
5757
modules.Add(($"bnrm2d-{i}a", BatchNorm2d(in_planes)));
5858
modules.Add(($"relu-{i}a", ReLU()));
59-
modules.Add(($"conv2d-{i}b", Conv2d(in_planes, out_planes, kernelSize: 1L, stride: 1L, padding: 0L, bias: false)));
59+
modules.Add(($"conv2d-{i}b", Conv2d(in_planes, out_planes, kernel_size: 1L, stride: 1L, padding: 0L, bias: false)));
6060
modules.Add(($"bnrm2d-{i}b", BatchNorm2d(out_planes)));
6161
modules.Add(($"relu-{i}b", ReLU()));
6262

src/Examples/ResNet.cs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ public ResNet(string name, Func<string, int,int,int,Module<Tensor, Tensor>> bloc
7272
{
7373
var modules = new List<(string, Module<Tensor, Tensor>)>();
7474

75-
modules.Add(($"conv2d-first", Conv2d(3, 64, kernelSize: 3, stride: 1, padding: 1, bias: false)));
75+
modules.Add(($"conv2d-first", Conv2d(3, 64, kernel_size: 3, stride: 1, padding: 1, bias: false)));
7676
modules.Add(($"bnrm2d-first", BatchNorm2d(64)));
7777
modules.Add(($"relu-first", ReLU(inplace:true)));
7878
MakeLayer(modules, block, expansion, 64, num_blocks[0], 1);
@@ -124,17 +124,17 @@ public BasicBlock (string name, int in_planes, int planes, int stride) : base(na
124124
{
125125
var modules = new List<(string, Module<Tensor, Tensor>)>();
126126

127-
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernelSize: 3, stride: stride, padding: 1, bias: false)));
127+
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernel_size: 3, stride: stride, padding: 1, bias: false)));
128128
modules.Add(($"{name}-bnrm2d-1", BatchNorm2d(planes)));
129129
modules.Add(($"{name}-relu-1", ReLU(inplace: true)));
130-
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernelSize: 3, stride: 1, padding: 1, bias: false)));
130+
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernel_size: 3, stride: 1, padding: 1, bias: false)));
131131
modules.Add(($"{name}-bnrm2d-2", BatchNorm2d(planes)));
132132

133133
layers = Sequential(modules);
134134

135135
if (stride != 1 || in_planes != expansion*planes) {
136136
shortcut = Sequential(
137-
($"{name}-conv2d-3", Conv2d(in_planes, expansion * planes, kernelSize: 1, stride: stride, bias: false)),
137+
($"{name}-conv2d-3", Conv2d(in_planes, expansion * planes, kernel_size: 1, stride: stride, bias: false)),
138138
($"{name}-bnrm2d-3", BatchNorm2d(expansion * planes)));
139139
}
140140
else {
@@ -175,20 +175,20 @@ public Bottleneck(string name, int in_planes, int planes, int stride) : base(nam
175175
{
176176
var modules = new List<(string, Module<Tensor, Tensor>)>();
177177

178-
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernelSize: 1, bias: false)));
178+
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernel_size: 1, bias: false)));
179179
modules.Add(($"{name}-bnrm2d-1", BatchNorm2d(planes)));
180180
modules.Add(($"{name}relu-1", ReLU(inplace:true)));
181-
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernelSize: 3, stride: stride, padding: 1, bias: false)));
181+
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernel_size: 3, stride: stride, padding: 1, bias: false)));
182182
modules.Add(($"{name}-bnrm2d-2", BatchNorm2d(planes)));
183183
modules.Add(($"{name}relu-2", ReLU(inplace: true)));
184-
modules.Add(($"{name}-conv2d-3", Conv2d(planes, expansion * planes, kernelSize: 1, bias: false)));
184+
modules.Add(($"{name}-conv2d-3", Conv2d(planes, expansion * planes, kernel_size: 1, bias: false)));
185185
modules.Add(($"{name}-bnrm2d-3", BatchNorm2d(expansion * planes)));
186186

187187
layers = Sequential(modules);
188188

189189
if (stride != 1 || in_planes != expansion * planes) {
190190
shortcut = Sequential(
191-
($"{name}-conv2d-4", Conv2d(in_planes, expansion * planes, kernelSize: 1, stride: stride, bias: false)),
191+
($"{name}-conv2d-4", Conv2d(in_planes, expansion * planes, kernel_size: 1, stride: stride, bias: false)),
192192
($"{name}-bnrm2d-4", BatchNorm2d(expansion * planes)));
193193
} else {
194194
shortcut = Sequential();

src/Examples/SpeechCommands.cs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,16 +235,16 @@ internal class M5 : Module<Tensor, Tensor>
235235

236236
public M5(string name, int n_input = 1, int n_output = 35, int stride = 16, int n_channel = 32) : base(name)
237237
{
238-
conv1 = nn.Conv1d(n_input, n_channel, kernelSize: 80, stride: stride);
238+
conv1 = nn.Conv1d(n_input, n_channel, kernel_size: 80, stride: stride);
239239
bn1 = nn.BatchNorm1d(n_channel);
240240
pool1 = nn.MaxPool1d(4);
241-
conv2 = nn.Conv1d(n_channel, n_channel, kernelSize: 3);
241+
conv2 = nn.Conv1d(n_channel, n_channel, kernel_size: 3);
242242
bn2 = nn.BatchNorm1d(n_channel);
243243
pool2 = nn.MaxPool1d(4);
244-
conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernelSize: 3);
244+
conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size: 3);
245245
bn3 = nn.BatchNorm1d(2 * n_channel);
246246
pool3 = nn.MaxPool1d(4);
247-
conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernelSize: 3);
247+
conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size: 3);
248248
bn4 = nn.BatchNorm1d(2 * n_channel);
249249
pool4 = nn.MaxPool1d(4);
250250
fc1 = nn.Linear(2 * n_channel, n_output);

src/Examples/VGG.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ public VGG(string name, int numClasses, Device device = null) : base(name)
4040
if (channels[i] == 0) {
4141
modules.Add(($"MaxPool2d-{i}a", MaxPool2d(kernel_size: 2, stride: 2)));
4242
} else {
43-
modules.Add(($"conv2d-{i}a", Conv2d(in_channels, channels[i], kernelSize: 3, padding: 1)));
43+
modules.Add(($"conv2d-{i}a", Conv2d(in_channels, channels[i], kernel_size: 3, padding: 1)));
4444
modules.Add(($"bnrm2d-{i}a", BatchNorm2d(channels[i])));
4545
modules.Add(($"relu-{i}b", ReLU(inplace: true)));
4646
in_channels = channels[i];

src/FSharp.Examples/AlexNet.fs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,17 +47,17 @@ let getDataFiles sourceDir targetDir =
4747
type Model(name,device:torch.Device) as this =
4848
inherit Module<torch.Tensor,torch.Tensor>(name)
4949

50-
let features = Sequential(("c1", Conv2d(3L, 64L, kernelSize=3L, stride=2L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
50+
let features = Sequential(("c1", Conv2d(3L, 64L, kernel_size=3L, stride=2L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5151
("r1", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
5252
("mp1", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
53-
("c2", Conv2d(64L, 192L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
53+
("c2", Conv2d(64L, 192L, kernel_size=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5454
("r2", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
5555
("mp2", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
56-
("c3", Conv2d(192L, 384L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
56+
("c3", Conv2d(192L, 384L, kernel_size=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5757
("r3", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
58-
("c4", Conv2d(384L, 256L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
58+
("c4", Conv2d(384L, 256L, kernel_size=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5959
("r4", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
60-
("c5", Conv2d(256L, 256L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
60+
("c5", Conv2d(256L, 256L, kernel_size=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
6161
("r5", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
6262
("mp3", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
6363
("avg", AdaptiveAvgPool2d([|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>))

src/Native/LibTorchSharp/THSNormalization.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,10 @@
55

66
Tensor THSNN_batch_norm(const Tensor input, Tensor running_mean, const Tensor running_var, const Tensor weight, const Tensor bias, const bool training, const double momentum, const double eps)
77
{
8-
c10::optional<at::Tensor> w, b, rm, rv;
9-
if (weight != nullptr) w.emplace(*weight);
10-
if (bias != nullptr) b.emplace(*bias);
11-
if (running_mean != nullptr) rm.emplace(*running_mean);
12-
if (running_var != nullptr) rv.emplace(*running_var);
8+
c10::optional<at::Tensor> w = weight != nullptr ? *weight : c10::optional<at::Tensor>();
9+
c10::optional<at::Tensor> b = bias != nullptr ? *bias : c10::optional<at::Tensor>();
10+
c10::optional<at::Tensor> rm = running_mean != nullptr ? *running_mean : c10::optional<at::Tensor>();
11+
c10::optional<at::Tensor> rv = running_var != nullptr ? *running_var : c10::optional<at::Tensor>();
1312

1413
CATCH_TENSOR(torch::batch_norm(*input, w, b, rm, rv, training, momentum, eps, false));
1514
}

src/Native/LibTorchSharp/THSTensor.h

Lines changed: 33 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -815,29 +815,56 @@ EXPORT_API(void) THSTensor_max_along_dimension(const Tensor tensor, Tensor* (*al
815815

816816
EXPORT_API(Tensor) THSTensor_max_elementwise(const Tensor tensor, const Tensor other);
817817

818-
EXPORT_API(Tensor) THSTensor_max_pool1d_with_indices(
818+
EXPORT_API(Tensor) THSTensor_max_pool1d(
819819
const Tensor tensor,
820820
const int64_t* kernelSize, const int kernelSizeLength,
821821
const int64_t* stride, const int strideLength,
822822
const int64_t* padding, const int paddingLength,
823823
const int64_t* dilation, const int dilationLength,
824-
bool ceil_mode, Tensor *indices);
824+
bool ceil_mode);
825825

826-
EXPORT_API(Tensor) THSTensor_max_pool2d_with_indices(
826+
EXPORT_API(Tensor) THSTensor_max_pool2d(
827827
const Tensor tensor,
828828
const int64_t* kernelSize, const int kernelSizeLength,
829829
const int64_t* stride, const int strideLength,
830830
const int64_t* padding, const int paddingLength,
831831
const int64_t* dilation, const int dilationLength,
832-
bool ceil_mode, Tensor* indices);
832+
bool ceil_mode);
833833

834-
EXPORT_API(Tensor) THSTensor_max_pool3d_with_indices(
834+
EXPORT_API(Tensor) THSTensor_max_pool3d(
835835
const Tensor tensor,
836836
const int64_t* kernelSize, const int kernelSizeLength,
837837
const int64_t* stride, const int strideLength,
838838
const int64_t* padding, const int paddingLength,
839839
const int64_t* dilation, const int dilationLength,
840-
bool ceil_mode, Tensor* indices);
840+
bool ceil_mode);
841+
842+
EXPORT_API(void) THSTensor_max_pool1d_with_indices(
843+
const Tensor tensor,
844+
Tensor* (*allocator)(size_t length),
845+
const int64_t* kernelSize, const int kernelSizeLength,
846+
const int64_t* stride, const int strideLength,
847+
const int64_t* padding, const int paddingLength,
848+
const int64_t* dilation, const int dilationLength,
849+
bool ceil_mode);
850+
851+
EXPORT_API(void) THSTensor_max_pool2d_with_indices(
852+
const Tensor tensor,
853+
Tensor* (*allocator)(size_t length),
854+
const int64_t* kernelSize, const int kernelSizeLength,
855+
const int64_t* stride, const int strideLength,
856+
const int64_t* padding, const int paddingLength,
857+
const int64_t* dilation, const int dilationLength,
858+
bool ceil_mode);
859+
860+
EXPORT_API(void) THSTensor_max_pool3d_with_indices(
861+
const Tensor tensor,
862+
Tensor* (*allocator)(size_t length),
863+
const int64_t* kernelSize, const int kernelSizeLength,
864+
const int64_t* stride, const int strideLength,
865+
const int64_t* padding, const int paddingLength,
866+
const int64_t* dilation, const int dilationLength,
867+
bool ceil_mode);
841868

842869
EXPORT_API(Tensor) THSTensor_max_unpool1d(
843870
const Tensor tensor,

0 commit comments

Comments
 (0)