Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
ba71874
Add cupy rabel
Pinkyboi Oct 26, 2022
8207c2d
Add docstring for ravel
Pinkyboi Oct 26, 2022
d949086
fix cuda copy array printing
Pinkyboi Oct 26, 2022
4d56a59
Fix errors in Kernel Semantic checks
Pinkyboi Oct 26, 2022
8719440
Add tests for cupy_ravel
Pinkyboi Oct 26, 2022
319d71d
Remove unused imports
Pinkyboi Oct 27, 2022
a136516
revert back to using more specific type for data pointer
Pinkyboi Oct 27, 2022
c9b80dd
Merge branch 'cuda_main_temp' into cuda_cupy_ravel
Pinkyboi Oct 27, 2022
918a63d
Add memory location to cupy ravel
Pinkyboi Oct 27, 2022
1679229
Add cupy printer
Pinkyboi Oct 27, 2022
dbb62a9
Avoid calculating rank in ravel
Pinkyboi Oct 28, 2022
dcdc81f
Separate tests and remove Ravel infere type
Pinkyboi Nov 4, 2022
7c1ce50
Implement cupy ravel with AliasAssign
Pinkyboi Nov 5, 2022
adcf864
Fix address printing in cupy_ravel
Pinkyboi Nov 5, 2022
d938561
Fix typo in cuda_ravel implementation
Pinkyboi Nov 5, 2022
a16bbee
Fix case of passed variable
Pinkyboi Nov 5, 2022
5878446
avoid call to _print
Pinkyboi Nov 5, 2022
a895e71
Fix typo in dest nd
Pinkyboi Nov 6, 2022
0c9c070
Remove memory location option for cupy functions
Pinkyboi Nov 11, 2022
573e03b
Avoid if statement for memory location
Pinkyboi Nov 11, 2022
3d97f17
Add tests for cupy ravel
Pinkyboi Nov 11, 2022
6127038
Line endings
EmilyBourne Dec 30, 2022
2a94f14
Remove slot from CupyRavel
Pinkyboi Dec 30, 2022
a0d2c4b
Merge 'cuda_main_temp' of into cuda_cupy_ravel
Pinkyboi Dec 31, 2022
27c4250
Fix typos in cupy_ravel
Pinkyboi Dec 31, 2022
d83f526
Fix codacy problems
Pinkyboi Jan 1, 2023
5ff5c22
Use managed_memory to allocate shape and strides
Pinkyboi Jan 2, 2023
d472a7c
Do a copy for host arrays
Pinkyboi Jan 6, 2023
be2fd09
Remove _attribute_nodes from CupyRavel
Pinkyboi Jan 7, 2023
3fb754f
Use tuple instead of list for variable in test
Pinkyboi Jan 7, 2023
def5e4e
Remove unnecessary arg property getter CupyRavel
Pinkyboi Jan 7, 2023
6d8489d
Add case of PyccelArraySize for cupy ravel
Pinkyboi Jan 10, 2023
c970bf7
Use cudaMallocManaged in cupy_ravel
Pinkyboi Jan 11, 2023
a8c5db4
Clean up cupy ravel tests
Pinkyboi Jan 11, 2023
cff898e
Merge branch 'cuda_main_temp' into cuda_cupy_ravel
Pinkyboi Jan 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyccel/ast/cudaext.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,4 +306,4 @@ def __new__(cls, dim=0):
}
cuda_mod = Module('cuda',
variables = cuda_constants.values(),
funcs = cuda_funcs.values())
funcs = cuda_funcs.values())
40 changes: 39 additions & 1 deletion pyccel/ast/cupyext.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
'CupyOnesLike',
'CupyZeros',
'CupyZerosLike',
'CupyRavel'
'Shape'
)

Expand Down Expand Up @@ -83,7 +84,7 @@ class CupyArray(CupyNewArray):
arg : list, tuple, PythonList

"""
__slots__ = ('_arg','_dtype','_precision','_shape','_rank','_order')
__slots__ = ('_arg','_dtype','_precision','_shape','_rank','_order', '_memory_location')
_attribute_nodes = ('_arg',)
name = 'array'

Expand Down Expand Up @@ -131,6 +132,7 @@ def __init__(self, arg, dtype=None, order='C'):
self._dtype = dtype
self._order = order
self._precision = prec
self._memory_location = 'device'
super().__init__()

def __str__(self):
Expand All @@ -140,6 +142,12 @@ def __str__(self):
def arg(self):
return self._arg

@property
def memory_location(self):
""" Indicate if the array is in the host or device memory
"""
return self._memory_location

#==============================================================================
class CupyArange(CupyNewArray):
"""
Expand Down Expand Up @@ -454,6 +462,35 @@ def __str__(self):

#==============================================================================

class CupyRavel(CupyArray):
"""
Class representing a call to the cupy ravel function which
returns flattened version of the passed array

Parameters
==========
arg : PyccelAstNode
A PyccelAstNode of unknown shape
memory_location : string
The location where the new array memory should be allocated
"""
name = 'ravel'
__slots__ = ()
def __new__(cls, arg):
if not isinstance(arg, (list, tuple, PyccelAstNode)):
raise TypeError('Unknown type of %s.' % type(arg))
if arg.rank == 0:
raise TypeError('Unknown type of %s.' % type(arg))
return super().__new__(cls)

def __init__(self, arg):
super().__init__(arg = arg)
shape = reduce((lambda x, y: x.python_value * y.python_value), self.shape)
self._shape = [shape if isinstance(shape, (LiteralInteger, PyccelArraySize)) else LiteralInteger(shape)]
self._rank = 1


#==============================================================================
cupy_funcs = {
# ... array creation routines
'full' : PyccelFunctionDef('full' , CupyFull),
Expand All @@ -466,6 +503,7 @@ def __str__(self):
'ones_like' : PyccelFunctionDef('ones_like' , CupyOnesLike),
'array' : PyccelFunctionDef('array' , CupyArray),
'arange' : PyccelFunctionDef('arange' , CupyArange),
'ravel' : PyccelFunctionDef('ravel' , CupyRavel),
# ...
'shape' : PyccelFunctionDef('shape' , Shape),
'size' : PyccelFunctionDef('size' , CupyArraySize),
Expand Down
44 changes: 37 additions & 7 deletions pyccel/codegen/printing/ccudacode.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
from pyccel.ast.numpyext import NumpyFull, NumpyArray, NumpyArange
from pyccel.ast.numpyext import NumpyReal, NumpyImag, NumpyFloat

from pyccel.ast.cupyext import CupyFull, CupyArray, CupyArange
from pyccel.ast.cupyext import CupyFull, CupyArray, CupyArange, CupyRavel

from pyccel.ast.cudaext import CudaCopy, cuda_Internal_Var, CudaArray

Expand Down Expand Up @@ -424,6 +424,37 @@ def _print_Assign(self, expr):
rhs = self._print(expr.rhs)
return prefix_code+'{} = {};\n'.format(lhs, rhs)

def _print_AliasAssign(self, expr):
lhs_var = expr.lhs
rhs_var = expr.rhs

lhs_address = ObjectAddress(lhs_var)
rhs_address = ObjectAddress(rhs_var)

# the below condition handles the case of reassinging a pointer to an array view.
# setting the pointer's is_view attribute to false so it can be ignored by the free_pointer function.

if not self.stored_in_c_pointer(lhs_var) and \
isinstance(lhs_var, Variable) and lhs_var.is_ndarray:
if isinstance(rhs_var, CupyRavel):
lhs = self._print(lhs_address)
return f'cupy_ravel({lhs}, {rhs_var});\n'
rhs = self._print(rhs_var)
if isinstance(rhs_var, Variable) and rhs_var.is_ndarray:
lhs = self._print(lhs_address)
if lhs_var.order == rhs_var.order:
return f'alias_assign({lhs}, {rhs});\n'
else:
return f'transpose_alias_assign({lhs}, {rhs});\n'
else:
lhs = self._print(lhs_var)
return f'{lhs} = {rhs};\n'
else:
lhs = self._print(lhs_address)
rhs = self._print(rhs_address)

return f'{lhs} = {rhs};\n'

def arrayFill(self, expr):
""" print the assignment of a NdArray

Expand Down Expand Up @@ -521,19 +552,18 @@ def copy_CudaArray_Data(self, expr):
declare_dtype = self.find_in_dtype_registry(self._print(rhs.dtype), rhs.precision)
dtype = self.find_in_ndarray_type_registry(self._print(rhs.dtype), rhs.precision)
arg = rhs.arg if isinstance(rhs, (CudaArray, CupyArray)) else rhs
if rhs.rank > 1:
# flattening the args to use them in C initialization.
arg = self._flatten_list(arg)

self.add_import(c_imports['string'])
if isinstance(arg, Variable):
arg = self._print(arg)
cpy_data = "cudaMemcpy({0}.raw_data, {1}.{2}, {0}.buffer_size, cudaMemcpyHostToDevice);".format(lhs, arg, dtype)
cpy_data = f"cudaMemcpy({lhs}.{dtype}, {arg}.{dtype}, {lhs}.buffer_size, cudaMemcpyHostToDevice);"
return '%s\n' % (cpy_data)
else :
if arg.rank > 1:
arg = self._flatten_list(arg)
arg = ', '.join(self._print(i) for i in arg)
dummy_array = "%s %s[] = {%s};\n" % (declare_dtype, dummy_array_name, arg)
cpy_data = "cudaMemcpy({0}.raw_data, {1}, {0}.buffer_size, cudaMemcpyHostToDevice);".format(self._print(lhs), dummy_array_name, dtype)
target_array_name = self._print(lhs)
cpy_data = f"cudaMemcpy({target_array_name}.{dtype}, {dummy_array_name}, {target_array_name}.buffer_size, cudaMemcpyHostToDevice);"
return '%s%s\n' % (dummy_array, cpy_data)

def _print_CudaSynchronize(self, expr):
Expand Down
15 changes: 15 additions & 0 deletions pyccel/parser/semantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
from pyccel.ast.core import Assert

from pyccel.ast.class_defs import NumpyArrayClass, TupleClass, get_cls_base, CudaArrayClass
from pyccel.ast.cupyext import CupyRavel

from pyccel.ast.datatypes import NativeRange, str_dtype
from pyccel.ast.datatypes import NativeSymbol
Expand Down Expand Up @@ -493,6 +494,20 @@ def _infere_type(self, expr, **settings):
d_var['cls_base' ] = NumpyArrayClass
return d_var

elif isinstance(expr, CupyRavel):
if isinstance(expr.arg, Variable) and expr.arg.memory_location == "device":
d_var['memory_handling'] = 'alias'
else:
d_var['memory_handling'] = 'heap'
d_var['memory_location'] = expr.memory_location
d_var['datatype' ] = expr.dtype
d_var['shape' ] = expr.shape
d_var['rank' ] = expr.rank
d_var['precision' ] = expr.precision
d_var['order' ] = None
d_var['cls_base' ] = CudaNewArray
return d_var

elif isinstance(expr, CupyNewArray):
d_var['datatype' ] = expr.dtype
d_var['memory_handling'] = 'heap' if expr.rank > 0 else 'stack'
Expand Down
11 changes: 11 additions & 0 deletions pyccel/stdlib/cuda_ndarrays/cuda_ndarrays.cu
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,17 @@ t_ndarray cuda_array_create(int32_t nd, int64_t *shape,
return (arr);
}

void cupy_ravel(t_ndarray *dest, t_ndarray src)
{
*dest = src;
dest->nd = 1;
cudaMallocManaged(&(dest->shape), sizeof(int64_t));
cudaMallocManaged(&(dest->strides), sizeof(int64_t));
*(dest->shape) = src.length;
*(dest->strides) = 1;
dest->is_view = true;
}

int32_t cuda_free_array(t_ndarray arr)
{
if (arr.shape == NULL)
Expand Down
4 changes: 3 additions & 1 deletion pyccel/stdlib/cuda_ndarrays/cuda_ndarrays.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ void _cuda_array_fill_int64(int64_t c, t_ndarray arr);
__global__
void _cuda_array_fill_double(double c, t_ndarray arr);

t_ndarray cuda_array_create(int32_t nd, int64_t *shape, enum e_types type, bool is_view, enum e_memory_locations location);
void cupy_ravel(t_ndarray *dest, t_ndarray src);

t_ndarray cuda_array_create(int32_t nd, int64_t *shape, enum e_types type, bool is_view, enum e_memory_locations location);
int32_t cuda_free_array(t_ndarray dump);
int32_t cuda_free_pointer(t_ndarray dump);
#endif
11 changes: 11 additions & 0 deletions tests/internal/scripts/ccuda/cupy_ravel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# pylint: disable=missing-function-docstring, disable=unused-variable, missing-module-docstring

import cupy as cp
from pyccel.decorators import kernel, types
from pyccel import cuda

if __name__ == '__main__':
threads_per_block = 32
n_blocks = 1
arr1 = cp.ravel([[1,2],[1,3]])
arr2 = cp.ravel([1,2,3,4])
12 changes: 12 additions & 0 deletions tests/internal/scripts/ccuda/cupy_ravel_variable.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# pylint: disable=missing-function-docstring, disable=unused-variable, missing-module-docstring

import cupy as cp
from pyccel import cuda

if __name__ == '__main__':
c = ((1, 2), (1, 3))
host_arr = cuda.array(c, dtype=int)
device_arr = cuda.array(c, dtype=int, memory_location='device')
arr1 = cp.ravel(host_arr)
arr2 = cp.ravel(device_arr)
arr3 = cp.ravel(c)