Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 0 additions & 114 deletions mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,56 +44,6 @@ structured_op: !LinalgStructuredOpConfig
- !ScalarExpression
scalar_arg: I
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
name: elemwise_unary
cpp_class_name: ElemwiseUnaryOp
doc: |-
Applies the unary function fun elementwise.

Numeric casting is performed on the input operand, promoting it to the same
data type as the accumulator/output.
structured_op: !LinalgStructuredOpConfig
args:
- !LinalgOperandDefConfig
name: I
kind: input_tensor
type_var: T1
shape_map: affine_map<() -> ()>
- !LinalgOperandDefConfig
name: O
kind: output_tensor
type_var: U
shape_map: affine_map<() -> ()>
- !LinalgOperandDefConfig
name: fun
kind: unary_fn_attr
default_fn: exp
- !LinalgOperandDefConfig
name: cast
kind: type_fn_attr
default_fn: cast_signed
indexing_maps: !LinalgIndexingMapsConfig
static_indexing_maps:
- affine_map<() -> ()>
- affine_map<() -> ()>
iterator_types: []
assignments:
- !ScalarAssign
arg: O
value: !ScalarExpression
scalar_fn:
kind: unary
attr_name: fun
operands:
- !ScalarExpression
scalar_fn:
kind: type
attr_name: cast
type_var: U
operands:
- !ScalarExpression
scalar_arg: I
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
name: exp
cpp_class_name: ExpOp
Expand Down Expand Up @@ -549,70 +499,6 @@ structured_op: !LinalgStructuredOpConfig
- !ScalarExpression
scalar_arg: I
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
name: elemwise_binary
cpp_class_name: ElemwiseBinaryOp
doc: |-
Applies the binary function fun elementwise.

Numeric casting is performed on the input operand, promoting it to the same
data type as the accumulator/output.
structured_op: !LinalgStructuredOpConfig
args:
- !LinalgOperandDefConfig
name: lhs
kind: input_tensor
type_var: T1
shape_map: affine_map<() -> ()>
- !LinalgOperandDefConfig
name: rhs
kind: input_tensor
type_var: T2
shape_map: affine_map<() -> ()>
- !LinalgOperandDefConfig
name: O
kind: output_tensor
type_var: U
shape_map: affine_map<() -> ()>
- !LinalgOperandDefConfig
name: fun
kind: binary_fn_attr
default_fn: add
- !LinalgOperandDefConfig
name: cast
kind: type_fn_attr
default_fn: cast_signed
indexing_maps: !LinalgIndexingMapsConfig
static_indexing_maps:
- affine_map<() -> ()>
- affine_map<() -> ()>
- affine_map<() -> ()>
iterator_types: []
assignments:
- !ScalarAssign
arg: O
value: !ScalarExpression
scalar_fn:
kind: binary
attr_name: fun
operands:
- !ScalarExpression
scalar_fn:
kind: type
attr_name: cast
type_var: U
operands:
- !ScalarExpression
scalar_arg: lhs
- !ScalarExpression
scalar_fn:
kind: type
attr_name: cast
type_var: U
operands:
- !ScalarExpression
scalar_arg: rhs
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
name: add
cpp_class_name: AddOp
Expand Down
31 changes: 0 additions & 31 deletions mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,6 @@ def copy(
O[None] = cast(U, I[None])


@linalg_structured_op
def elemwise_unary(
I=TensorDef(T1),
O=TensorDef(U, output=True),
fun=UnaryFnAttrDef(default=UnaryFn.exp),
cast=TypeFnAttrDef(default=TypeFn.cast_signed),
):
"""Applies the unary function fun elementwise.

Numeric casting is performed on the input operand, promoting it to the same
data type as the accumulator/output.
"""
O[None] = fun(cast(U, I[None]))


@linalg_structured_op
def exp(
I=TensorDef(T1),
Expand Down Expand Up @@ -192,22 +177,6 @@ def erf(
O[None] = UnaryFn.erf(I[None])


@linalg_structured_op
def elemwise_binary(
lhs=TensorDef(T1),
rhs=TensorDef(T2),
O=TensorDef(U, output=True),
fun=BinaryFnAttrDef(default=BinaryFn.add),
cast=TypeFnAttrDef(default=TypeFn.cast_signed),
):
"""Applies the binary function fun elementwise.

Numeric casting is performed on the input operand, promoting it to the same
data type as the accumulator/output.
"""
O[None] = fun(cast(U, lhs[None]), cast(U, rhs[None]))


@linalg_structured_op
def add(
lhs=TensorDef(T1),
Expand Down
111 changes: 0 additions & 111 deletions mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -434,117 +434,6 @@ func.func @generalize_const(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf

// -----

// Verifies the default value of the fun attribute is an exp op.
func.func @generalize_elemwise_exp(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_exp
// CHECK: = math.exp

// -----

// Verifies the fun attribute controls the unary function used.
func.func @generalize_elemwise_log(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary {fun = #linalg.unary_fn<log>}
ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_log
// CHECK: = math.log

// -----

// Verifies the fun attribute controls the unary function used.
func.func @generalize_elemwise_abs(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary {fun = #linalg.unary_fn<abs>}
ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_abs
// CHECK: = math.absf

// -----

// Verifies the fun attribute controls the unary function used.
func.func @generalize_elemwise_ceil(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary {fun = #linalg.unary_fn<ceil>}
ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_ceil
// CHECK: = math.ceil

// -----

// Verifies the fun attribute controls the unary function used.
func.func @generalize_elemwise_floor(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary {fun = #linalg.unary_fn<floor>}
ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_floor
// CHECK: = math.floor

// -----

// Verifies the fun attribute controls the unary function used.
func.func @generalize_elemwise_negf(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_unary {fun = #linalg.unary_fn<negf>}
ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_negf
// CHECK: = arith.negf

// -----

// Verifies the default value of the fun attribute is an add op.
func.func @generalize_elemwise_add(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_binary ins(%lhs, %rhs: tensor<4x8xf32>, tensor<4x8xf32>)
outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_add
// CHECK: = arith.addf

// -----

// Verifies the fun attribute controls the binary function used.
func.func @generalize_elemwise_mul(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_binary {fun = #linalg.binary_fn<mul>}
ins(%lhs, %rhs: tensor<4x8xf32>, tensor<4x8xf32>)
outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_mul
// CHECK: = arith.mulf

// -----

// Verifies pointwise ops support rank zero input tensors
func.func @generalize_elemwise_rank_zero(%lhs : tensor<f32>, %rhs : tensor<f32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.elemwise_binary {fun = #linalg.binary_fn<sub>}
ins(%lhs, %rhs: tensor<f32>, tensor<f32>)
outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
return %0: tensor<4x8xf32>
}

// CHECK-LABEL: @generalize_elemwise_rank_zero
// CHECK: linalg.generic
// CHECK-SAME: iterator_types = ["parallel", "parallel"]
// CHECK: = arith.subf

// -----

// Verifies the fun attribute controls the binary function used.
func.func @generalize_copy(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = linalg.copy ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
Expand Down
8 changes: 0 additions & 8 deletions mlir/test/Dialect/Linalg/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1909,14 +1909,6 @@ module {

// -----

func.func @elemwise_unary_invalid_mixed_types(%arg0 : tensor<?xi32>) -> tensor<?xi32> {
// expected-error @below {{unsupported non numeric type}}
%0 = linalg.elemwise_unary ins(%arg0 : tensor<?xi32>) outs(%arg0 : tensor<?xi32>) -> tensor<?xi32>
return %0 : tensor<?xi32>
}

// -----

func.func @matmul_invalid_mixed_types(%t: tensor<?xf16>, %f: vector<4xf16>)
-> (tensor<?xf16>, vector<4xf16>)
{
Expand Down
40 changes: 0 additions & 40 deletions mlir/test/Dialect/Linalg/library-calls.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -59,43 +59,3 @@ module {
return
}
}


// -----

// CHECK: func.func private @linalg_elemwise_unary_negf_view16x8xf32_view16x8xf32(memref<16x8xf32, strided<[?, ?], offset: ?>>, memref<16x8xf32, strided<[?, ?], offset: ?>>) attributes {llvm.emit_c_interface}
// CHECK: func.func private @linalg_elemwise_unary_negf_view16xf32_view16xf32(memref<16xf32, strided<[?], offset: ?>>, memref<16xf32, strided<[?], offset: ?>>) attributes {llvm.emit_c_interface}

func.func @test_neg(%A : memref<16x8xf32>, %B: memref<16x8xf32>, %C: memref<16xf32>, %D: memref<16xf32>) {
linalg.elemwise_unary {fun = #linalg.unary_fn<negf>}
ins(%A: memref<16x8xf32>) outs(%B: memref<16x8xf32>)
linalg.elemwise_unary {fun = #linalg.unary_fn<negf>}
ins(%C: memref<16xf32>) outs(%D: memref<16xf32>)
return
}

// -----

// CHECK: func.func private @linalg_elemwise_unary_exp_view16x8xf32_view16x8xf32(memref<16x8xf32, strided<[?, ?], offset: ?>>, memref<16x8xf32, strided<[?, ?], offset: ?>>) attributes {llvm.emit_c_interface}
// CHECK: func.func private @linalg_elemwise_unary_exp_view16xf32_view16xf32(memref<16xf32, strided<[?], offset: ?>>, memref<16xf32, strided<[?], offset: ?>>) attributes {llvm.emit_c_interface}

func.func @test_exp(%A : memref<16x8xf32>, %B: memref<16x8xf32>, %C: memref<16xf32>, %D: memref<16xf32>) {
linalg.elemwise_unary {fun = #linalg.unary_fn<exp>}
ins(%A: memref<16x8xf32>) outs(%B: memref<16x8xf32>)
linalg.elemwise_unary {fun = #linalg.unary_fn<exp>}
ins(%C: memref<16xf32>) outs(%D: memref<16xf32>)
return
}

// -----

// CHECK: func.func private @linalg_elemwise_binary_add_view16x8xf32_view16x8xf32_view16x8xf32(memref<16x8xf32, strided<[?, ?], offset: ?>>, memref<16x8xf32, strided<[?, ?], offset: ?>>, memref<16x8xf32, strided<[?, ?], offset: ?>>) attributes {llvm.emit_c_interface}
// CHECK: func.func private @linalg_elemwise_binary_add_view16xf32_view16xf32_view16xf32(memref<16xf32, strided<[?], offset: ?>>, memref<16xf32, strided<[?], offset: ?>>, memref<16xf32, strided<[?], offset: ?>>) attributes {llvm.emit_c_interface}

func.func @test_add(%A : memref<16x8xf32>, %B: memref<16x8xf32>, %C: memref<16x8xf32>, %D: memref<16xf32>, %E: memref<16xf32>, %F: memref<16xf32>) {
linalg.elemwise_binary {fun = #linalg.binary_fn<add>}
ins(%A, %B: memref<16x8xf32>, memref<16x8xf32>) outs(%C: memref<16x8xf32>)
linalg.elemwise_binary {fun = #linalg.binary_fn<add>}
ins(%D, %E: memref<16xf32>, memref<16xf32>) outs(%F: memref<16xf32>)
return
}
6 changes: 3 additions & 3 deletions mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -842,15 +842,15 @@ module attributes { transform.with_named_sequence } {
// expected-remark @below {{op result}}
// expected-note @below {{value handle points to an op result #0}}
// expected-remark @below {{single user}}
linalg.elemwise_unary {fun = #linalg.unary_fn<negf>} ins(%2 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
linalg.negf ins(%2 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
// expected-remark @below {{matched result value}}
// expected-remark @below {{op result}}
// expected-note @below {{value handle points to an op result #0}}
linalg.elemwise_unary {fun = #linalg.unary_fn<exp>} ins(%3 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
linalg.exp ins(%3 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
// expected-remark @below {{matched result value}}
// expected-remark @below {{op result}}
// expected-note @below {{value handle points to an op result #0}}
linalg.elemwise_unary {fun = #linalg.unary_fn<exp>} ins(%3 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
linalg.exp ins(%3 : tensor<42x42xf32>) outs(%0 : tensor<42x42xf32>) -> tensor<42x42xf32>
return
}
}
Expand Down
Loading
Loading