|
249 | 249 | - arg_meta: null |
250 | 250 | kernel_name: impl::reference::quantized_relu_asym8u_asym8u_per_tensor_out |
251 | 251 |
|
| 252 | +- func: cadence::quantized_add.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!) |
| 253 | + kernels: |
| 254 | + - arg_meta: null |
| 255 | + kernel_name: impl::reference::quantized_add_per_tensor_out |
| 256 | + |
| 257 | +- func: cadence::quantized_add_asym8sxasym8s_asym8s.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!) |
| 258 | + kernels: |
| 259 | + - arg_meta: null |
| 260 | + kernel_name: impl::reference::quantized_add_asym8sxasym8s_asym8s_per_tensor_out |
| 261 | + |
| 262 | +- func: cadence::quantized_add_asym8uxasym8u_asym8u.per_tensor_out(Tensor X, float X_scale, int X_zero_point, Tensor Y, float Y_scale, int Y_zero_point, float out_scale, int out_zero_point, *, Tensor(a!) out) -> Tensor(a!) |
| 263 | + kernels: |
| 264 | + - arg_meta: null |
| 265 | + kernel_name: impl::reference::quantized_add_asym8uxasym8u_asym8u_per_tensor_out |
| 266 | + |
252 | 267 | - func: cadence::quantized_matmul.out(Tensor X, int X_zero_point, Tensor Y, int Y_zero_point, Tensor? bias, int out_multiplier, int out_shift, int out_zero_point, bool transposed, *, Tensor(a!) out) -> Tensor(a!) |
253 | 268 | kernels: |
254 | 269 | - arg_meta: null |
|
304 | 319 | - arg_meta: null |
305 | 320 | kernel_name: impl::reference::quantized_conv_nhwc_asym8uxsym8u_asym8u_per_tensor_out |
306 | 321 |
|
| 322 | +- func: cadence::quantized_conv_nchw_dilated_asym8sxsym8s_asym8s.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, int weight_zero_point, float bias_scale, float out_scale, int out_zero_point, int out_multiplier, int out_shift, *, Tensor(a!) out) -> Tensor(a!) |
| 323 | + kernels: |
| 324 | + - arg_meta: null |
| 325 | + kernel_name: impl::reference::quantized_conv_nchw_dilated_asym8sxsym8s_asym8s_per_tensor_out |
| 326 | + |
| 327 | +- func: cadence::quantized_conv_nchw_dilated_asym8uxsym8u_asym8u.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, int weight_zero_point, float bias_scale, float out_scale, int out_zero_point, int out_multiplier, int out_shift, *, Tensor(a!) out) -> Tensor(a!) |
| 328 | + kernels: |
| 329 | + - arg_meta: null |
| 330 | + kernel_name: impl::reference::quantized_conv_nchw_dilated_asym8uxsym8u_asym8u_per_tensor_out |
| 331 | + |
| 332 | +- func: cadence::quantized_conv_nhwc_dilated_asym8sxsym8s_asym8s.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, int weight_zero_point, float bias_scale, float out_scale, int out_zero_point, int out_multiplier, int out_shift, *, Tensor(a!) out) -> Tensor(a!) |
| 333 | + kernels: |
| 334 | + - arg_meta: null |
| 335 | + kernel_name: impl::reference::quantized_conv_nhwc_dilated_asym8sxsym8s_asym8s_per_tensor_out |
| 336 | + |
| 337 | +- func: cadence::quantized_conv_nhwc_dilated_asym8uxsym8u_asym8u.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, int weight_zero_point, float bias_scale, float out_scale, int out_zero_point, int out_multiplier, int out_shift, *, Tensor(a!) out) -> Tensor(a!) |
| 338 | + kernels: |
| 339 | + - arg_meta: null |
| 340 | + kernel_name: impl::reference::quantized_conv_nhwc_dilated_asym8uxsym8u_asym8u_per_tensor_out |
| 341 | + |
307 | 342 | - func: cadence::quantized_fully_connected.out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, Tensor weight_zero_point, Tensor out_multiplier, Tensor out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!) |
308 | 343 | kernels: |
309 | 344 | - arg_meta: null |
|
0 commit comments