@@ -446,16 +446,16 @@ cdef gpucontext *kernel_context(GpuKernel k) except NULL:
446446 raise GpuArrayException, " Invalid kernel or destroyed context"
447447 return res
448448
449- cdef int kernel_sched(GpuKernel k, size_t n, size_t * ls , size_t * gs ) except - 1 :
449+ cdef int kernel_sched(GpuKernel k, size_t n, size_t * gs , size_t * ls ) except - 1 :
450450 cdef int err
451- err = GpuKernel_sched(& k.k, n, ls, gs )
451+ err = GpuKernel_sched(& k.k, n, gs, ls )
452452 if err != GA_NO_ERROR:
453453 raise get_exc(err), kernel_error(k, err)
454454
455- cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * ls ,
456- const size_t * gs , size_t shared, void ** args) except - 1 :
455+ cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * gs ,
456+ const size_t * ls , size_t shared, void ** args) except - 1 :
457457 cdef int err
458- err = GpuKernel_call(& k.k, n, ls, gs , shared, args)
458+ err = GpuKernel_call(& k.k, n, gs, ls , shared, args)
459459 if err != GA_NO_ERROR:
460460 raise get_exc(err), kernel_error(k, err)
461461
@@ -2105,10 +2105,10 @@ cdef class GpuKernel:
21052105 sure to test against the size of your data.
21062106
21072107 If you want more control over thread allocation you can use the
2108- `ls ` and `gs ` parameters like so::
2108+ `gs ` and `ls ` parameters like so::
21092109
21102110 k = GpuKernel(...)
2111- k(param1, param2, ls=ls, gs=gs )
2111+ k(param1, param2, gs=gs, ls=ls )
21122112
21132113 If you choose to use this interface, make sure to stay within the
21142114 limits of `k.maxlsize` and `ctx.maxgsize` or the call will fail.
@@ -2192,12 +2192,12 @@ cdef class GpuKernel:
21922192 finally :
21932193 free(_types)
21942194
2195- def __call__ (self , *args , n = None , ls = None , gs = None , shared = 0 ):
2195+ def __call__ (self , *args , n = None , gs = None , ls = None , shared = 0 ):
21962196 if n == None and (ls == None or gs == None ):
21972197 raise ValueError , " Must specify size (n) or both gs and ls"
2198- self .do_call(n, ls, gs , args, shared)
2198+ self .do_call(n, gs, ls , args, shared)
21992199
2200- cdef do_call(self , py_n, py_ls, py_gs , py_args, size_t shared):
2200+ cdef do_call(self , py_n, py_gs, py_ls , py_args, size_t shared):
22012201 cdef size_t n
22022202 cdef size_t gs[3 ]
22032203 cdef size_t ls[3 ]
@@ -2264,8 +2264,8 @@ cdef class GpuKernel:
22642264 if nd != 1 :
22652265 raise ValueError , " n is specified and nd != 1"
22662266 n = py_n
2267- kernel_sched(self , n, & ls [0 ], & gs [0 ])
2268- kernel_call(self , nd, ls, gs , shared, self .callbuf)
2267+ kernel_sched(self , n, & gs [0 ], & ls [0 ])
2268+ kernel_call(self , nd, gs, ls , shared, self .callbuf)
22692269
22702270 cdef _setarg(self , unsigned int index, int typecode, object o):
22712271 if typecode == GA_BUFFER:
0 commit comments