Skip to content

PyTorch bindings for R

License

Unknown, MIT licenses found

Licenses found

Unknown
LICENSE
MIT
LICENSE.md
Notifications You must be signed in to change notification settings

WLOGSolutions/rTorch

 
 

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 

History

482 Commits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 

Repository files navigation

Travis build status AppVeyor build status

rTorch

The goal of rTorch is providing an R wrapper to [PyTorch](https://pytorch.org/. We have borrowed ideas and code used in R tensorflow to implement rTorch.

Besides the module torch, which provides PyTorch methods, classes and functions, the package also provides numpy as a method called np, and torchvision, as well. The dollar sign $ after the module will provide you access to those objects.

Installation

rTorch is available in GitHub only at this moment.

Install rTorch with:

devtools::install_github("f0nzie/rTorch")

Before start running rTorch, install a Python Anaconda environment first.

  1. Create a conda environment with conda create -n myenv python=3.7

  2. Activate the new environment with conda activate myenv

  3. Install PyTorch related packages with:

conda install python=3.6.6 pytorch-cpu torchvision-cpu matplotlib pandas -c pytorch

Now, you can load rTorch in R or RStudio.

The automatic installation, like in rtensorflow, may be available later.

Note. matplotlib and pandas are not really necessary, but I was asked if matplotlib or pandas would in PyTorch, that I decided to put them for testing and experimentation. They both work.

Matrices and Linear Algebra

There are five major type of Tensors in PyTorch

library(rTorch)

bt <- torch$ByteTensor(3L, 3L)
ft <- torch$FloatTensor(3L, 3L)
dt <- torch$DoubleTensor(3L, 3L)
lt <- torch$LongTensor(3L, 3L)
Bt <- torch$BoolTensor(5L, 5L)

ft
#> tensor([[4.7644e-44, 0.0000e+00, 0.0000e+00],
#>         [0.0000e+00, 1.5414e-44, 3.2230e-44],
#>         [1.4013e-45, 5.3249e-44, 2.8026e-45]])
dt
#> tensor([[2.1220e-314, 2.1220e-314, 2.1220e-314],
#>         [2.1220e-314, 2.1220e-314, 2.1220e-314],
#>         [2.1220e-314, 2.1220e-314, 2.1220e-314]], dtype=torch.float64)
Bt
#> tensor([[ True, False, False, False, False],
#>         [False, False, False, False, False],
#>         [False, False, False, False, False],
#>         [False, False, False, False, False],
#>         [False, False, False, False, False]], dtype=torch.bool)

A 4D tensor like in MNIST hand-written digits recognition dataset:

mnist_4d <- torch$FloatTensor(60000L, 3L, 28L, 28L)

# size
mnist_4d$size()
#> torch.Size([60000, 3, 28, 28])

# length
length(mnist_4d)
#> [1] 141120000

# shape, like in numpy
mnist_4d$shape
#> torch.Size([60000, 3, 28, 28])

# number of elements
mnist_4d$numel()
#> [1] 141120000

A 3D tensor:

ft3d <- torch$FloatTensor(4L, 3L, 2L)
ft3d
#> tensor([[[6.6692e-14, 4.5586e-41],
#>          [1.6222e+09, 3.0722e-41],
#>          [5.3976e+07, 3.0722e-41]],
#> 
#>         [[1.0438e-13, 4.5586e-41],
#>          [5.0447e-44, 0.0000e+00],
#>          [0.0000e+00, 0.0000e+00]],
#> 
#>         [[0.0000e+00, 0.0000e+00],
#>          [0.0000e+00, 0.0000e+00],
#>          [0.0000e+00, 0.0000e+00]],
#> 
#>         [[0.0000e+00, 0.0000e+00],
#>          [0.0000e+00, 0.0000e+00],
#>          [0.0000e+00, 0.0000e+00]]])
# get first element in a tensor
ft3d[1, 1, 1]
#> tensor(6.6692e-14)
bt
#> tensor([[ 80,  45, 150],
#>         [ 41,  19, 127],
#>         [  0,   0,  16]], dtype=torch.uint8)
# [torch.ByteTensor of size 3x3]
ft
#> tensor([[4.7644e-44, 0.0000e+00, 0.0000e+00],
#>         [0.0000e+00, 1.5414e-44, 3.2230e-44],
#>         [1.4013e-45, 5.3249e-44, 2.8026e-45]])
# [torch.FloatTensor of size 3x3]
# create a tensor with a value
torch$full(list(2L, 3L), 3.141592)
#> tensor([[3.1416, 3.1416, 3.1416],
#>         [3.1416, 3.1416, 3.1416]])

Basic Tensor Operations

Add tensors

# add a scalar to a tensor
# 3x5 matrix uniformly distributed between 0 and 1
mat0 <- torch$FloatTensor(3L, 5L)$uniform_(0L, 1L)
mat0 + 0.1
#> tensor([[0.2163, 0.4416, 0.7405, 0.6640, 0.7111],
#>         [0.4478, 0.6105, 0.3057, 1.0389, 0.5661],
#>         [0.8474, 0.8210, 0.6615, 0.3182, 0.9772]])

The expression tensor.index(m) is equivalent to tensor[m].

# add an element of tensor to a tensor
# fill a 3x5 matrix with 0.1
mat1 <- torch$FloatTensor(3L, 5L)$uniform_(0.1, 0.1)
# a vector with all ones
mat2 <- torch$FloatTensor(5L)$uniform_(1, 1)
mat1[1, 1] + mat2
#> tensor([1.1000, 1.1000, 1.1000, 1.1000, 1.1000])
# add two tensors
mat1 + mat0
#> tensor([[0.2163, 0.4416, 0.7405, 0.6640, 0.7111],
#>         [0.4478, 0.6105, 0.3057, 1.0389, 0.5661],
#>         [0.8474, 0.8210, 0.6615, 0.3182, 0.9772]])
# PyTorch add two tensors
x = torch$rand(5L, 4L)
y = torch$rand(5L, 4L)

print(x$add(y))
#> tensor([[0.8144, 1.4515, 1.2712, 0.9070],
#>         [0.5151, 0.6795, 0.8082, 0.9323],
#>         [0.9739, 0.9622, 1.1908, 0.9362],
#>         [0.9701, 1.2294, 1.4636, 0.7612],
#>         [0.9880, 0.6853, 0.4393, 0.7315]])
print(x + y)
#> tensor([[0.8144, 1.4515, 1.2712, 0.9070],
#>         [0.5151, 0.6795, 0.8082, 0.9323],
#>         [0.9739, 0.9622, 1.1908, 0.9362],
#>         [0.9701, 1.2294, 1.4636, 0.7612],
#>         [0.9880, 0.6853, 0.4393, 0.7315]])

Multiply tensor by scalar

# Multiply tensor by scalar
tensor = torch$ones(4L, dtype=torch$float64)
scalar = np$float64(4.321)
print(scalar)
#> [1] 4.321
print(torch$scalar_tensor(scalar))
#> tensor(4.3210)
(prod = torch$mul(tensor, torch$scalar_tensor(scalar)))
#> tensor([4.3210, 4.3210, 4.3210, 4.3210], dtype=torch.float64)
# short version using generics
(prod = tensor * scalar)
#> tensor([4.3210, 4.3210, 4.3210, 4.3210], dtype=torch.float64)

NumPy and PyTorch

numpy has been made available as a module in rTorch. We can call functions from numpy refrerring to it as np$_a_function. Examples:

# a 2D numpy array  
syn0 <- np$random$rand(3L, 5L)
syn0
#>           [,1]      [,2]      [,3]       [,4]      [,5]
#> [1,] 0.9038473 0.6693252 0.2982889 0.52537259 0.2291053
#> [2,] 0.6670118 0.2364839 0.2983554 0.23475955 0.2886768
#> [3,] 0.9017605 0.5691967 0.7190021 0.07707008 0.5145782
# numpy arrays of zeros
syn1 <- np$zeros(c(5L, 10L))
syn1
#>      [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#> [1,]    0    0    0    0    0    0    0    0    0     0
#> [2,]    0    0    0    0    0    0    0    0    0     0
#> [3,]    0    0    0    0    0    0    0    0    0     0
#> [4,]    0    0    0    0    0    0    0    0    0     0
#> [5,]    0    0    0    0    0    0    0    0    0     0
# add a scalar to a numpy array
syn1 = syn1 + 0.1
syn1
#>      [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#> [1,]  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1   0.1
#> [2,]  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1   0.1
#> [3,]  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1   0.1
#> [4,]  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1   0.1
#> [5,]  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1   0.1
# in numpy a multidimensional array needs to be defined with a tuple
# in R we do it with a vector
l1 <- np$ones(c(5L, 5L))
l1
#>      [,1] [,2] [,3] [,4] [,5]
#> [1,]    1    1    1    1    1
#> [2,]    1    1    1    1    1
#> [3,]    1    1    1    1    1
#> [4,]    1    1    1    1    1
#> [5,]    1    1    1    1    1
# vector-matrix multiplication
np$dot(syn0, syn1)
#>           [,1]      [,2]      [,3]      [,4]      [,5]      [,6]      [,7]
#> [1,] 0.2625939 0.2625939 0.2625939 0.2625939 0.2625939 0.2625939 0.2625939
#> [2,] 0.1725287 0.1725287 0.1725287 0.1725287 0.1725287 0.1725287 0.1725287
#> [3,] 0.2781608 0.2781608 0.2781608 0.2781608 0.2781608 0.2781608 0.2781608
#>           [,8]      [,9]     [,10]
#> [1,] 0.2625939 0.2625939 0.2625939
#> [2,] 0.1725287 0.1725287 0.1725287
#> [3,] 0.2781608 0.2781608 0.2781608
# build a numpy array from three R vectors
X <- np$array(rbind(c(1,2,3), c(4,5,6), c(7,8,9)))
X
#>      [,1] [,2] [,3]
#> [1,]    1    2    3
#> [2,]    4    5    6
#> [3,]    7    8    9
# transpose the array
np$transpose(X)
#>      [,1] [,2] [,3]
#> [1,]    1    4    7
#> [2,]    2    5    8
#> [3,]    3    6    9
# as_tensor. Modifying tensor modifies numpy object as well
a = np$array(list(1, 2, 3))
t = torch$as_tensor(a)
print(t)
#> tensor([1., 2., 3.], dtype=torch.float64)

torch$tensor(list( 1,  2,  3))
#> tensor([1., 2., 3.])
t[1L]$fill_(-1)
#> tensor(-1., dtype=torch.float64)
print(a)
#> [1] -1  2  3

Create tensors

# a random 1D tensor
ft1 <- torch$FloatTensor(np$random$rand(5L))
ft1
#> tensor([0.4738, 0.2879, 0.3424, 0.0044, 0.3120])
# tensor as a float of 64-bits
ft2 <- torch$as_tensor(np$random$rand(5L), dtype= torch$float64)
ft2
#> tensor([0.1295, 0.7139, 0.5817, 0.8095, 0.6424], dtype=torch.float64)
# convert tensor to float 16-bits
ft2_dbl <- torch$as_tensor(ft2, dtype = torch$float16)
ft2_dbl
#> tensor([0.1295, 0.7139, 0.5815, 0.8096, 0.6426], dtype=torch.float16)

Create a tensor of size (5 x 7) with uninitialized memory:

a <- torch$FloatTensor(5L, 7L)
print(a)
#> tensor([[0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
#>          0.0000e+00],
#>         [0.0000e+00, 5.0209e-20, 4.5586e-41, 0.0000e+00, 0.0000e+00, 0.0000e+00,
#>          0.0000e+00],
#>         [3.8194e+11, 3.0722e-41, 1.1057e-19, 0.0000e+00, 0.0000e+00, 0.0000e+00,
#>          9.4167e-43],
#>         [6.7262e-43, 9.1227e+06, 3.0722e-41, 9.1227e+06, 3.0722e-41, 9.4167e-43,
#>          0.0000e+00],
#>         [0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
#>          0.0000e+00]])
# using arange to create tensor. starts from 0
v = torch$arange(9L)
(v = v$view(3L, 3L))
#> tensor([[0, 1, 2],
#>         [3, 4, 5],
#>         [6, 7, 8]])

Distributions

Initialize a tensor randomized with a normal distribution with mean=0, var=1:

a  <- torch$randn(5L, 7L)
print(a)
#> tensor([[ 2.3758, -0.4211, -1.5005,  1.1152,  0.8154, -0.6480, -2.1379],
#>         [-0.2613, -0.4025,  0.5637, -0.9903,  0.9483,  1.3798,  0.9638],
#>         [ 0.5819, -0.3537,  0.3733, -0.8580,  0.5854,  1.0156, -1.0063],
#>         [-2.2317, -0.4844, -0.1601, -0.1593,  0.8876, -0.9792,  1.9140],
#>         [-0.6835, -0.2189, -0.6588,  0.9796, -1.2463,  0.8335,  0.0454]])
print(a$size())
#> torch.Size([5, 7])

Uniform matrix

library(rTorch)

# 3x5 matrix uniformly distributed between 0 and 1
mat0 <- torch$FloatTensor(3L, 5L)$uniform_(0L, 1L)

# fill a 3x5 matrix with 0.1
mat1 <- torch$FloatTensor(3L, 5L)$uniform_(0.1, 0.1)

# a vector with all ones
mat2 <- torch$FloatTensor(5L)$uniform_(1, 1)

mat0
#> tensor([[0.7818, 0.4073, 0.6300, 0.7939, 0.6365],
#>         [0.8130, 0.7822, 0.0055, 0.0432, 0.1714],
#>         [0.1478, 0.2362, 0.7475, 0.0200, 0.3828]])
mat1
#> tensor([[0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#>         [0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#>         [0.1000, 0.1000, 0.1000, 0.1000, 0.1000]])

Binomial distribution

Binomial <- torch$distributions$binomial$Binomial

m = Binomial(100, torch$tensor(list(0 , .2, .8, 1)))
(x = m$sample())
#> tensor([  0.,  22.,  81., 100.])
m = Binomial(torch$tensor(list(list(5.), list(10.))), 
             torch$tensor(list(0.5, 0.8)))
(x = m$sample())
#> tensor([[3., 3.],
#>         [7., 8.]])

Exponential distribution

Exponential <- torch$distributions$exponential$Exponential

m = Exponential(torch$tensor(list(1.0)))
m$sample()  # Exponential distributed with rate=1
#> tensor([0.8019])

Weibull distribution

Weibull <- torch$distributions$weibull$Weibull

m = Weibull(torch$tensor(list(1.0)), torch$tensor(list(1.0)))
m$sample()  # sample from a Weibull distribution with scale=1, concentration=1
#> tensor([0.4590])

Tensor data types

# Default data type
torch$tensor(list(1.2, 3))$dtype  # default for floating point is torch.float32
#> torch.float32
# change default data type to float64
torch$set_default_dtype(torch$float64)
torch$tensor(list(1.2, 3))$dtype         # a new floating point tensor
#> torch.float64

This is a very common operation in machine learning:

# convert tensor to a numpy array
a = torch$rand(5L, 4L)
b = a$numpy()
print(b)
#>           [,1]       [,2]        [,3]      [,4]
#> [1,] 0.2332372 0.70280876 0.342186279 0.9221623
#> [2,] 0.8590770 0.41800424 0.819966310 0.7328199
#> [3,] 0.3597716 0.76391045 0.606901012 0.6638108
#> [4,] 0.9411453 0.77198088 0.495232307 0.3418834
#> [5,] 0.7322847 0.07950783 0.006881642 0.3518654
# convert a numpy array to a tensor
np_a = np$array(c(c(3, 4), c(3, 6)))
t_a = torch$from_numpy(np_a)
print(t_a)
#> tensor([3., 4., 3., 6.])

Tensor resizing

x = torch$randn(2L, 3L)            # Size 2x3
y = x$view(6L)                    # Resize x to size 6
z = x$view(-1L, 2L)                # Size 3x2
print(y)
#> tensor([-1.0349,  0.5687,  0.2354, -0.2582, -1.3115, -0.3551])
print(z)
#> tensor([[-1.0349,  0.5687],
#>         [ 0.2354, -0.2582],
#>         [-1.3115, -0.3551]])

concatenate tensors

# concatenate tensors
x = torch$randn(2L, 3L)
print(x)
#> tensor([[-1.1337, -0.8079, -1.3109],
#>         [ 1.0822,  0.1185,  0.1843]])

# concatenate tensors by dim=0"
torch$cat(list(x, x, x), 0L)
#> tensor([[-1.1337, -0.8079, -1.3109],
#>         [ 1.0822,  0.1185,  0.1843],
#>         [-1.1337, -0.8079, -1.3109],
#>         [ 1.0822,  0.1185,  0.1843],
#>         [-1.1337, -0.8079, -1.3109],
#>         [ 1.0822,  0.1185,  0.1843]])

# concatenate tensors by dim=1
torch$cat(list(x, x, x), 1L)
#> tensor([[-1.1337, -0.8079, -1.3109, -1.1337, -0.8079, -1.3109, -1.1337, -0.8079,
#>          -1.3109],
#>         [ 1.0822,  0.1185,  0.1843,  1.0822,  0.1185,  0.1843,  1.0822,  0.1185,
#>           0.1843]])
# 0 1 2
# 3 4 5
# 6 7 8
v = torch$arange(9L)
(v = v$view(3L, 3L))
#> tensor([[0, 1, 2],
#>         [3, 4, 5],
#>         [6, 7, 8]])

Reshape tensors

# ----- Reshape tensors -----
img <- torch$ones(3L, 28L, 28L)
print(img$size())
#> torch.Size([3, 28, 28])

img_chunks <- torch$chunk(img, chunks = 3L, dim = 0L)
print(length(img_chunks))
#> [1] 3

# 1st chunk member
img_chunk_1 <- img_chunks[[1]]
print(img_chunk_1$size())
#> torch.Size([1, 28, 28])
print(img_chunk_1$sum())
#> tensor(784.)

# 2nd chunk member
img_chunk_1 <- img_chunks[[2]]
print(img_chunk_1$size())
#> torch.Size([1, 28, 28])
print(img_chunk_1$sum())
#> tensor(784.)


# index_select. get layer 1
indices = torch$tensor(c(0L))
img2 <- torch$index_select(img, dim = 0L, index = indices)
print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)

# index_select. get layer 2
indices = torch$tensor(c(1L))
img2 <- torch$index_select(img, dim = 0L, index = indices)
print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)

# index_select. get layer 3
indices = torch$tensor(c(2L))
img2 <- torch$index_select(img, dim = 0L, index = indices)
print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)

Special tensors

Identity matrix

# identity matrix
eye = torch$eye(3L)              # Create an identity 3x3 tensor
print(eye)
#> tensor([[1., 0., 0.],
#>         [0., 1., 0.],
#>         [0., 0., 1.]])

Ones

(v = torch$ones(10L))              # A tensor of size 10 containing all ones
#> tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
(v = torch$ones(2L, 1L, 2L, 1L))      # Size 2x1x2x1
#> tensor([[[[1.],
#>           [1.]]],
#> 
#> 
#>         [[[1.],
#>           [1.]]]])
v = torch$ones_like(eye)     # A tensor with same shape as eye. Fill it with 1.
v
#> tensor([[1., 1., 1.],
#>         [1., 1., 1.],
#>         [1., 1., 1.]])

Zeros

(z = torch$zeros(10L))             # A tensor of size 10 containing all zeros
#> tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])

Tensor fill

(v = torch$ones(3L, 3L))
#> tensor([[1., 1., 1.],
#>         [1., 1., 1.],
#>         [1., 1., 1.]])
v[1L, ]$fill_(2L)         # fill row 1 with 2s
#> tensor([2., 2., 2.])
v[2L, ]$fill_(3L)         # fill row 2 with 3s
#> tensor([3., 3., 3.])
print(v)
#> tensor([[2., 2., 2.],
#>         [3., 3., 3.],
#>         [1., 1., 1.]])
# Initialize Tensor with a range of value
v = torch$arange(10L)             # similar to range(5) but creating a Tensor
(v = torch$arange(0L, 10L, step = 1L))  # Size 5. Similar to range(0, 5, 1)
#> tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

Initialize a linear or log scale Tensor

# Initialize a linear or log scale Tensor

# Create a Tensor with 10 linear points for (1, 10) inclusively
(v = torch$linspace(1L, 10L, steps = 10L)) 
#> tensor([ 1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9., 10.])

# Size 5: 1.0e-10 1.0e-05 1.0e+00, 1.0e+05, 1.0e+10
(v = torch$logspace(start=-10L, end = 10L, steps = 5L)) 
#> tensor([1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])

Inplace / Out-of-place

a$fill_(3.5)
#> tensor([[3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000]])
# a has now been filled with the value 3.5

# add a scalar to a tensor
b <- a$add(4.0)

# a is still filled with 3.5
# new tensor b is returned with values 3.5 + 4.0 = 7.5

print(a)
#> tensor([[3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000],
#>         [3.5000, 3.5000, 3.5000, 3.5000]])
print(b)
#> tensor([[7.5000, 7.5000, 7.5000, 7.5000],
#>         [7.5000, 7.5000, 7.5000, 7.5000],
#>         [7.5000, 7.5000, 7.5000, 7.5000],
#>         [7.5000, 7.5000, 7.5000, 7.5000],
#>         [7.5000, 7.5000, 7.5000, 7.5000]])
# this will throw an error because we don't still have a function for assignment
a[1, 1] <- 7.7
print(a)
# Error in a[1, 1] <- 7.7 : object of type 'environment' is not subsettable

Some operations likenarrow do not have in-place versions, and hence, .narrow_ does not exist. Similarly, some operations like fill_ do not have an out-of-place version, so .fill does not exist.

# a[[0L, 3L]]
a[1, 4]
#> tensor(3.5000)

Access to tensor elements

# replace an element at position 0, 0
(new_tensor = torch$Tensor(list(list(1, 2), list(3, 4))))
#> tensor([[1., 2.],
#>         [3., 4.]])

print(new_tensor[1L, 1L])
#> tensor(1.)
new_tensor[1L, 1L]$fill_(5)
#> tensor(5.)
print(new_tensor)   # tensor([[ 5.,  2.],[ 3.,  4.]])
#> tensor([[5., 2.],
#>         [3., 4.]])
# access an element at position 1, 0
print(new_tensor[2L, 1L])           # tensor([ 3.])
#> tensor(3.)
print(new_tensor[2L, 1L]$item())    # 3.
#> [1] 3
# Select indices
x = torch$randn(3L, 4L)
print(x)
#> tensor([[-0.0333, -0.1179,  0.0561,  1.4256],
#>         [ 1.0540, -0.2959, -0.8004, -1.0370],
#>         [-0.7412, -0.1924, -0.2997, -2.1003]])

# Select indices, dim=0
indices = torch$tensor(list(0L, 2L))
torch$index_select(x, 0L, indices)
#> tensor([[-0.0333, -0.1179,  0.0561,  1.4256],
#>         [-0.7412, -0.1924, -0.2997, -2.1003]])

# "Select indices, dim=1
torch$index_select(x, 1L, indices)
#> tensor([[-0.0333,  0.0561],
#>         [ 1.0540, -0.8004],
#>         [-0.7412, -0.2997]])
# Take by indices
src = torch$tensor(list(list(4, 3, 5),
                        list(6, 7, 8)) )
print(src)
#> tensor([[4., 3., 5.],
#>         [6., 7., 8.]])
print( torch$take(src, torch$tensor(list(0L, 2L, 5L))) )
#> tensor([4., 5., 8.])

Tensor operations

cross product

m1 = torch$ones(3L, 5L)
m2 = torch$ones(3L, 5L)
v1 = torch$ones(3L)
# Cross product
# Size 3x5
(r = torch$cross(m1, m2))
#> tensor([[0., 0., 0., 0., 0.],
#>         [0., 0., 0., 0., 0.],
#>         [0., 0., 0., 0., 0.]])

Dot product

# Dot product of 2 tensors
# Dot product of 2 tensors

p <- torch$Tensor(list(4L, 2L))
q <- torch$Tensor(list(3L, 1L))                   

(r = torch$dot(p, q)) # 14
#> tensor(14.)
(r <- p %.*% q)
#> tensor(14.)

Logical operations

m0 = torch$zeros(3L, 5L)
m1 = torch$ones(3L, 5L)
m2 = torch$eye(3L, 5L)

print(m1 == m0)
#> tensor([[False, False, False, False, False],
#>         [False, False, False, False, False],
#>         [False, False, False, False, False]], dtype=torch.bool)
print(m1 != m1)
#> tensor([[False, False, False, False, False],
#>         [False, False, False, False, False],
#>         [False, False, False, False, False]], dtype=torch.bool)
print(m2 == m2)
#> tensor([[True, True, True, True, True],
#>         [True, True, True, True, True],
#>         [True, True, True, True, True]], dtype=torch.bool)
# AND
m1 & m1
#> tensor([[True, True, True, True, True],
#>         [True, True, True, True, True],
#>         [True, True, True, True, True]], dtype=torch.bool)
# OR
m0 | m2
#> tensor([[ True, False, False, False, False],
#>         [False,  True, False, False, False],
#>         [False, False,  True, False, False]], dtype=torch.bool)
# OR
m1 | m2
#> tensor([[True, True, True, True, True],
#>         [True, True, True, True, True],
#>         [True, True, True, True, True]], dtype=torch.bool)
# all_boolean <- function(x) {
#   # convert tensor of 1s and 0s to a unique boolean
#   as.logical(torch$all(x)$numpy())
# }

# tensor is less than
A <- torch$ones(60000L, 1L, 28L, 28L)
C <- A * 0.5

# is C < A
all(torch$lt(C, A))
#> tensor(1, dtype=torch.uint8)
all(C < A)
#> tensor(1, dtype=torch.uint8)
# is A < C
all(A < C)
#> tensor(0, dtype=torch.uint8)
# tensor is greater than
A <- torch$ones(60000L, 1L, 28L, 28L)
D <- A * 2.0
all(torch$gt(D, A))
#> tensor(1, dtype=torch.uint8)
all(torch$gt(A, D))
#> tensor(0, dtype=torch.uint8)
# tensor is less than or equal
A1 <- torch$ones(60000L, 1L, 28L, 28L)
all(torch$le(A1, A1))
#> tensor(1, dtype=torch.uint8)
all(A1 <= A1)
#> tensor(1, dtype=torch.uint8)

# tensor is greater than or equal
A0 <- torch$zeros(60000L, 1L, 28L, 28L)
all(torch$ge(A0, A0))
#> tensor(1, dtype=torch.uint8)
all(A0 >= A0)
#> tensor(1, dtype=torch.uint8)

all(A1 >= A0)
#> tensor(1, dtype=torch.uint8)
all(A1 <= A0)
#> tensor(0, dtype=torch.uint8)

Logical NOT

all_true <- torch$BoolTensor(list(TRUE, TRUE, TRUE, TRUE))
all_true
#> tensor([True, True, True, True], dtype=torch.bool)

# logical NOT
not_all_true <- !all_true
not_all_true
#> tensor([False, False, False, False], dtype=torch.bool)
diag <- torch$eye(5L)
diag
#> tensor([[1., 0., 0., 0., 0.],
#>         [0., 1., 0., 0., 0.],
#>         [0., 0., 1., 0., 0.],
#>         [0., 0., 0., 1., 0.],
#>         [0., 0., 0., 0., 1.]])

# logical NOT
not_diag <- !diag

# convert to integer
not_diag$to(dtype=torch$uint8)
#> tensor([[0, 1, 1, 1, 1],
#>         [1, 0, 1, 1, 1],
#>         [1, 1, 0, 1, 1],
#>         [1, 1, 1, 0, 1],
#>         [1, 1, 1, 1, 0]], dtype=torch.uint8)

About

PyTorch bindings for R

Resources

License

Unknown, MIT licenses found

Licenses found

Unknown
LICENSE
MIT
LICENSE.md

Stars

Watchers

Forks

Packages

No packages published

Languages

  • R 97.8%
  • Python 2.2%