Skip to content

Commit 738259e

Browse files
Merge branch 'unit' of https://github.com/NiklasGustafsson/TorchSharp into unit
2 parents 6bdf60b + 3d02a25 commit 738259e

File tree

118 files changed

+3262
-5407
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

118 files changed

+3262
-5407
lines changed

src/Examples/AlexNet.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,17 @@ public AlexNet(string name, int numClasses, torch.Device device = null) : base(n
1919
features = Sequential(
2020
("c1", Conv2d(3, 64, kernelSize: 3, stride: 2, padding: 1)),
2121
("r1", ReLU(inplace: true)),
22-
("mp1", MaxPool2d(kernelSize: new long[] { 2, 2 })),
22+
("mp1", MaxPool2d(kernel_size: new long[] { 2, 2 })),
2323
("c2", Conv2d(64, 192, kernelSize: 3, padding: 1)),
2424
("r2", ReLU(inplace: true)),
25-
("mp2", MaxPool2d(kernelSize: new long[] { 2, 2 })),
25+
("mp2", MaxPool2d(kernel_size: new long[] { 2, 2 })),
2626
("c3", Conv2d(192, 384, kernelSize: 3, padding: 1)),
2727
("r3", ReLU(inplace: true)),
2828
("c4", Conv2d(384, 256, kernelSize: 3, padding: 1)),
2929
("r4", ReLU(inplace: true)),
3030
("c5", Conv2d(256, 256, kernelSize: 3, padding: 1)),
3131
("r5", ReLU(inplace: true)),
32-
("mp3", MaxPool2d(kernelSize: new long[] { 2, 2 })));
32+
("mp3", MaxPool2d(kernel_size: new long[] { 2, 2 })));
3333

3434
avgPool = AdaptiveAvgPool2d(new long[] { 2, 2 });
3535

src/Examples/MNIST.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ internal class Model : Module<Tensor, Tensor>
105105

106106
// These don't have any parameters, so the only reason to instantiate
107107
// them is performance, since they will be used over and over.
108-
private Module<Tensor, Tensor> pool1 = MaxPool2d(kernelSize: new long[] { 2, 2 });
108+
private Module<Tensor, Tensor> pool1 = MaxPool2d(kernel_size: new long[] { 2, 2 });
109109

110110
private Module<Tensor, Tensor> relu1 = ReLU();
111111
private Module<Tensor, Tensor> relu2 = ReLU();

src/Examples/VGG.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ public VGG(string name, int numClasses, Device device = null) : base(name)
3838
for (var i = 0; i < channels.Length; i++) {
3939

4040
if (channels[i] == 0) {
41-
modules.Add(($"MaxPool2d-{i}a", MaxPool2d(kernelSize: 2, stride: 2)));
41+
modules.Add(($"MaxPool2d-{i}a", MaxPool2d(kernel_size: 2, stride: 2)));
4242
} else {
4343
modules.Add(($"conv2d-{i}a", Conv2d(in_channels, channels[i], kernelSize: 3, padding: 1)));
4444
modules.Add(($"bnrm2d-{i}a", BatchNorm2d(channels[i])));

src/FSharp.Examples/AlexNet.fs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,17 @@ type Model(name,device:torch.Device) as this =
4949

5050
let features = Sequential(("c1", Conv2d(3L, 64L, kernelSize=3L, stride=2L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5151
("r1", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
52-
("mp1", MaxPool2d(kernelSize=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
52+
("mp1", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
5353
("c2", Conv2d(64L, 192L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5454
("r2", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
55-
("mp2", MaxPool2d(kernelSize=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
55+
("mp2", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
5656
("c3", Conv2d(192L, 384L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5757
("r3", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
5858
("c4", Conv2d(384L, 256L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
5959
("r4", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
6060
("c5", Conv2d(256L, 256L, kernelSize=3L, padding=1L) :> Module<torch.Tensor,torch.Tensor>),
6161
("r5", ReLU(inplace=true) :> Module<torch.Tensor,torch.Tensor>),
62-
("mp3", MaxPool2d(kernelSize=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
62+
("mp3", MaxPool2d(kernel_size=[|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>),
6363
("avg", AdaptiveAvgPool2d([|2L; 2L|]) :> Module<torch.Tensor,torch.Tensor>))
6464

6565
let classifier = Sequential(("d1", Dropout() :> Module<torch.Tensor,torch.Tensor>),

src/FSharp.Examples/MNIST.fs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ type Model(name,device:torch.Device) as this =
5151
let fc1 = Linear(9216L, 128L)
5252
let fc2 = Linear(128L, 10L)
5353

54-
let pool1 = MaxPool2d(kernelSize=[|2L; 2L|])
54+
let pool1 = MaxPool2d(kernel_size=[|2L; 2L|])
5555

5656
let relu = ReLU()
5757

src/Native/LibTorchSharp/THSActivation.cpp

Lines changed: 0 additions & 327 deletions
Original file line numberDiff line numberDiff line change
@@ -2,330 +2,3 @@
22
#include "THSNN.h"
33

44
#include <torch/nn/init.h>
5-
6-
NNModule THSNN_CELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule)
7-
{
8-
CATCH_RETURN_NNModule(
9-
auto opts = torch::nn::CELUOptions().alpha(alpha).inplace(inplace);
10-
res = create_module<torch::nn::CELUImpl>(opts, outAsAnyModule);
11-
);
12-
}
13-
14-
Tensor THSNN_CELU_forward(const NNModule module, const Tensor tensor)
15-
{
16-
CATCH_TENSOR((*module)->as<torch::nn::CELU>()->forward(*tensor));
17-
}
18-
19-
NNModule THSNN_ELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule)
20-
{
21-
CATCH_RETURN_NNModule(
22-
auto opts = torch::nn::ELUOptions().alpha(alpha).inplace(inplace);
23-
res = create_module<torch::nn::ELUImpl>(opts, outAsAnyModule);
24-
);
25-
}
26-
27-
Tensor THSNN_ELU_forward(const NNModule module, const Tensor tensor)
28-
{
29-
CATCH_TENSOR((*module)->as<torch::nn::ELU>()->forward(*tensor));
30-
}
31-
32-
NNModule THSNN_GELU_ctor(NNAnyModule* outAsAnyModule)
33-
{
34-
CATCH_RETURN_NNModule(
35-
res = create_module<torch::nn::GELUImpl>(outAsAnyModule);
36-
);
37-
}
38-
39-
Tensor THSNN_GELU_forward(const NNModule module, const Tensor tensor)
40-
{
41-
CATCH_TENSOR((*module)->as<torch::nn::GELU>()->forward(*tensor));
42-
}
43-
44-
NNModule THSNN_GLU_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
45-
{
46-
CATCH_RETURN_NNModule(
47-
auto opts = torch::nn::GLUOptions().dim(dim);
48-
res = create_module<torch::nn::GLUImpl>(opts, outAsAnyModule);
49-
);
50-
}
51-
52-
Tensor THSNN_GLU_forward(const NNModule module, const Tensor tensor)
53-
{
54-
CATCH_TENSOR((*module)->as<torch::nn::GLU>()->forward(*tensor));
55-
}
56-
57-
NNModule THSNN_Hardshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule)
58-
{
59-
CATCH_RETURN_NNModule(
60-
auto opts = torch::nn::HardshrinkOptions(lambda);
61-
res = create_module<torch::nn::HardshrinkImpl>(opts, outAsAnyModule);
62-
);
63-
}
64-
65-
Tensor THSNN_Hardshrink_forward(const NNModule module, const Tensor tensor)
66-
{
67-
CATCH_TENSOR((*module)->as<torch::nn::Hardshrink>()->forward(*tensor));
68-
}
69-
70-
NNModule THSNN_Hardtanh_ctor(const double min_val, const double max_val, const bool inplace, NNAnyModule* outAsAnyModule)
71-
{
72-
CATCH_RETURN_NNModule(
73-
auto opts = torch::nn::HardtanhOptions()
74-
.min_val(min_val)
75-
.max_val(max_val)
76-
.inplace(inplace);
77-
res = create_module<torch::nn::HardtanhImpl>(opts, outAsAnyModule);
78-
);
79-
}
80-
81-
Tensor THSNN_Hardtanh_forward(const NNModule module, const Tensor tensor)
82-
{
83-
CATCH_TENSOR((*module)->as<torch::nn::Hardtanh>()->forward(*tensor));
84-
}
85-
86-
87-
NNModule THSNN_LeakyReLU_ctor(const double negative_sloope, const bool inplace, NNAnyModule* outAsAnyModule)
88-
{
89-
CATCH_RETURN_NNModule(
90-
auto opts = torch::nn::LeakyReLUOptions().negative_slope(negative_sloope).inplace(inplace);
91-
res = create_module<torch::nn::LeakyReLUImpl>(opts, outAsAnyModule);
92-
);
93-
}
94-
95-
Tensor THSNN_LeakyReLU_forward(const NNModule module, const Tensor tensor)
96-
{
97-
CATCH_TENSOR((*module)->as<torch::nn::LeakyReLU>()->forward(*tensor));
98-
}
99-
100-
NNModule THSNN_LogSoftmax_ctor(int64_t dim, NNAnyModule* outAsAnyModule)
101-
{
102-
CATCH_RETURN_NNModule(
103-
auto opts = torch::nn::LogSoftmaxOptions(dim);
104-
res = create_module<torch::nn::LogSoftmaxImpl>(opts, outAsAnyModule);
105-
);
106-
}
107-
108-
Tensor THSNN_LogSoftmax_forward(const NNModule module, const Tensor tensor)
109-
{
110-
CATCH_TENSOR((*module)->as<torch::nn::LogSoftmax>()->forward(*tensor));
111-
}
112-
113-
NNModule THSNN_Mish_ctor(NNAnyModule* outAsAnyModule)
114-
{
115-
CATCH_RETURN_NNModule(
116-
res = create_module<torch::nn::MishImpl>(outAsAnyModule);
117-
);
118-
}
119-
120-
Tensor THSNN_Mish_forward(const NNModule module, const Tensor tensor)
121-
{
122-
CATCH_TENSOR((*module)->as<torch::nn::Mish>()->forward(*tensor));
123-
}
124-
125-
NNModule THSNN_PReLU_ctor(const int64_t nparams, const double init, NNAnyModule* outAsAnyModule)
126-
{
127-
CATCH_RETURN_NNModule(
128-
auto opts = torch::nn::PReLUOptions().num_parameters(nparams).init(init);
129-
res = create_module<torch::nn::PReLUImpl>(opts, outAsAnyModule);
130-
);
131-
}
132-
133-
Tensor THSNN_PReLU_forward(const NNModule module, const Tensor tensor)
134-
{
135-
CATCH_TENSOR((*module)->as<torch::nn::PReLU>()->forward(*tensor));
136-
}
137-
138-
Tensor THSNN_PReLU_weight(const NNModule module)
139-
{
140-
return get_weight<torch::nn::PReLU>(module);
141-
}
142-
143-
void THSNN_PReLU_set_weight(const NNModule module, const Tensor weight)
144-
{
145-
set_weight<torch::nn::PReLU>(module, weight);
146-
}
147-
148-
NNModule THSNN_ReLU_ctor(bool inplace, NNAnyModule* outAsAnyModule)
149-
{
150-
CATCH_RETURN_NNModule(
151-
auto opts = torch::nn::ReLUOptions(inplace);
152-
res = create_module<torch::nn::ReLUImpl>(opts, outAsAnyModule);
153-
);
154-
}
155-
156-
Tensor THSNN_ReLU_forward(const NNModule module, const Tensor tensor)
157-
{
158-
CATCH_TENSOR((*module)->as<torch::nn::ReLU>()->forward(*tensor));
159-
}
160-
161-
NNModule THSNN_RReLU_ctor(const double lower, const double upper, const bool inplace, NNAnyModule* outAsAnyModule)
162-
{
163-
CATCH_RETURN_NNModule(
164-
auto opts = torch::nn::RReLUOptions().lower(lower).upper(upper).inplace(inplace);
165-
res = create_module<torch::nn::RReLUImpl>(opts, outAsAnyModule);
166-
);
167-
}
168-
169-
Tensor THSNN_RReLU_forward(const NNModule module, const Tensor tensor)
170-
{
171-
CATCH_TENSOR((*module)->as<torch::nn::RReLU>()->forward(*tensor));
172-
}
173-
174-
NNModule THSNN_ReLU6_ctor(bool inplace, NNAnyModule* outAsAnyModule)
175-
{
176-
CATCH_RETURN_NNModule(
177-
auto opts = torch::nn::ReLU6Options(inplace);
178-
res = create_module<torch::nn::ReLU6Impl>(opts, outAsAnyModule);
179-
);
180-
}
181-
182-
Tensor THSNN_ReLU6_forward(const NNModule module, const Tensor tensor)
183-
{
184-
CATCH_TENSOR((*module)->as<torch::nn::ReLU6>()->forward(*tensor));
185-
}
186-
187-
NNModule THSNN_SELU_ctor(bool inplace, NNAnyModule* outAsAnyModule)
188-
{
189-
CATCH_RETURN_NNModule(
190-
auto opts = torch::nn::SELUOptions(inplace);
191-
res = create_module<torch::nn::SELUImpl>(opts, outAsAnyModule);
192-
);
193-
}
194-
195-
Tensor THSNN_SELU_forward(const NNModule module, const Tensor tensor)
196-
{
197-
CATCH_TENSOR((*module)->as<torch::nn::SELU>()->forward(*tensor));
198-
}
199-
200-
NNModule THSNN_Sigmoid_ctor(NNAnyModule* outAsAnyModule)
201-
{
202-
CATCH_RETURN_NNModule(
203-
res = create_module<torch::nn::SigmoidImpl>(outAsAnyModule);
204-
);
205-
}
206-
207-
Tensor THSNN_Sigmoid_forward(const NNModule module, const Tensor tensor)
208-
{
209-
CATCH_TENSOR((*module)->as<torch::nn::Sigmoid>()->forward(*tensor));
210-
}
211-
212-
NNModule THSNN_SiLU_ctor(NNAnyModule* outAsAnyModule)
213-
{
214-
CATCH_RETURN_NNModule(
215-
res = create_module<torch::nn::SiLUImpl>(outAsAnyModule);
216-
);
217-
}
218-
219-
Tensor THSNN_SiLU_forward(const NNModule module, const Tensor tensor)
220-
{
221-
CATCH_TENSOR((*module)->as<torch::nn::SiLU>()->forward(*tensor));
222-
}
223-
224-
NNModule THSNN_Softmax2d_ctor(NNAnyModule* outAsAnyModule)
225-
{
226-
CATCH_RETURN_NNModule(
227-
res = create_module<torch::nn::Softmax2dImpl>(outAsAnyModule);
228-
);
229-
}
230-
231-
Tensor THSNN_Softmax2d_forward(const NNModule module, const Tensor tensor)
232-
{
233-
CATCH_TENSOR((*module)->as<torch::nn::Softmax2d>()->forward(*tensor));
234-
}
235-
236-
NNModule THSNN_Softmax_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
237-
{
238-
CATCH_RETURN_NNModule(
239-
auto opts = torch::nn::SoftmaxOptions(dim);
240-
res = create_module<torch::nn::SoftmaxImpl>(opts, outAsAnyModule);
241-
);
242-
}
243-
244-
Tensor THSNN_Softmax_forward(const NNModule module, const Tensor tensor)
245-
{
246-
CATCH_TENSOR((*module)->as<torch::nn::Softmax>()->forward(*tensor));
247-
}
248-
249-
NNModule THSNN_Softmin_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
250-
{
251-
CATCH_RETURN_NNModule(
252-
auto opts = torch::nn::SoftminOptions(dim);
253-
res = create_module<torch::nn::SoftminImpl>(opts, outAsAnyModule);
254-
);
255-
}
256-
257-
Tensor THSNN_Softmin_forward(const NNModule module, const Tensor tensor)
258-
{
259-
CATCH_TENSOR((*module)->as<torch::nn::Softmin>()->forward(*tensor));
260-
}
261-
262-
NNModule THSNN_Softplus_ctor(const double beta, const double threshold, NNAnyModule* outAsAnyModule)
263-
{
264-
CATCH_RETURN_NNModule(
265-
auto opts = torch::nn::SoftplusOptions().beta(beta).threshold(threshold);
266-
res = create_module<torch::nn::SoftplusImpl>(opts, outAsAnyModule);
267-
);
268-
}
269-
270-
Tensor THSNN_Softplus_forward(const NNModule module, const Tensor tensor) {
271-
CATCH_TENSOR((*module)->as<torch::nn::Softplus>()->forward(*tensor));
272-
}
273-
274-
NNModule THSNN_Softshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule)
275-
{
276-
CATCH_RETURN_NNModule(
277-
auto opts = torch::nn::SoftshrinkOptions().lambda(lambda);
278-
res = create_module<torch::nn::SoftshrinkImpl>(opts, outAsAnyModule);
279-
);
280-
}
281-
282-
Tensor THSNN_Softshrink_forward(const NNModule module, const Tensor tensor) {
283-
CATCH_TENSOR((*module)->as<torch::nn::Softshrink>()->forward(*tensor));
284-
}
285-
286-
NNModule THSNN_Softsign_ctor(NNAnyModule* outAsAnyModule)
287-
{
288-
CATCH_RETURN_NNModule(
289-
res = create_module<torch::nn::SoftsignImpl>(outAsAnyModule);
290-
);
291-
}
292-
293-
Tensor THSNN_Softsign_forward(const NNModule module, const Tensor tensor) {
294-
CATCH_TENSOR((*module)->as<torch::nn::Softsign>()->forward(*tensor));
295-
}
296-
297-
NNModule THSNN_Tanh_ctor(NNAnyModule* outAsAnyModule)
298-
{
299-
CATCH_RETURN_NNModule(
300-
res = create_module<torch::nn::TanhImpl>(outAsAnyModule);
301-
);
302-
}
303-
304-
Tensor THSNN_Tanh_forward(const NNModule module, const Tensor tensor)
305-
{
306-
CATCH_TENSOR((*module)->as<torch::nn::Tanh>()->forward(*tensor));
307-
}
308-
309-
NNModule THSNN_Tanhshrink_ctor(NNAnyModule* outAsAnyModule)
310-
{
311-
CATCH_RETURN_NNModule(
312-
res = create_module<torch::nn::TanhshrinkImpl>(outAsAnyModule);
313-
);
314-
}
315-
316-
Tensor THSNN_Tanhshrink_forward(const NNModule module, const Tensor tensor) {
317-
CATCH_TENSOR((*module)->as<torch::nn::Tanhshrink>()->forward(*tensor));
318-
}
319-
320-
NNModule THSNN_Threshold_ctor(const double threshold, const double value, const bool inplace, NNAnyModule* outAsAnyModule)
321-
{
322-
CATCH_RETURN_NNModule(
323-
auto opts = torch::nn::ThresholdOptions(threshold, value).inplace(inplace);
324-
res = create_module<torch::nn::ThresholdImpl>(opts, outAsAnyModule);
325-
);
326-
}
327-
328-
Tensor THSNN_Threshold_forward(const NNModule module, const Tensor tensor) {
329-
CATCH_TENSOR((*module)->as<torch::nn::Threshold>()->forward(*tensor));
330-
}
331-

0 commit comments

Comments
 (0)