@@ -116,8 +116,7 @@ def _print_CudaSynchronize(self, expr):
116
116
return 'cudaDeviceSynchronize();\n '
117
117
118
118
def _print_CudaEmpty (self , expr ):
119
- print (expr )
120
- return 'cudaDeviceSynchronize();\n '
119
+ return 'cuda_array_create(1, (int64_t[]){INT64_C(10)}, nd_double, false,allocateMemoryOnHost);\n '
121
120
def _print_ModuleHeader (self , expr ):
122
121
self .set_scope (expr .module .scope )
123
122
self ._in_header = True
@@ -158,7 +157,7 @@ def _print_Allocate(self, expr):
158
157
else :
159
158
raise NotImplementedError (f"Don't know how to index { variable .class_type } type" )
160
159
shape_dtype = self .get_c_type (NumpyInt64Type ())
161
- shape_Assign = "(" + shape_dtype + "[]) {" + shape + "}"
160
+ shape_Assign = "int64_t shape_Assign [] = {" + shape + "}; \n "
162
161
is_view = 'false' if variable .on_heap else 'true'
163
162
memory_location = expr .variable .memory_location
164
163
if memory_location in ('device' , 'host' ):
@@ -167,8 +166,8 @@ def _print_Allocate(self, expr):
167
166
memory_location = 'managedMemory'
168
167
self .add_import (c_imports ['cuda_ndarrays' ])
169
168
self .add_import (c_imports ['ndarrays' ])
170
- alloc_code = f"{ self ._print (expr .variable )} = cuda_array_create({ variable .rank } , { shape_Assign } , { dtype } , { is_view } ,{ memory_location } );\n "
171
- return f'{ alloc_code } '
169
+ alloc_code = f"{ self ._print (expr .variable )} = cuda_array_create({ variable .rank } , shape_Assign, { dtype } , { is_view } ,{ memory_location } );\n "
170
+ return f'{ shape_Assign } { alloc_code } '
172
171
173
172
def _print_Deallocate (self , expr ):
174
173
var_code = self ._print (expr .variable )
0 commit comments