Skip to content

Commit 487db47

Browse files
committed
[mlir][linalg] Add support for scalable vectorization of linalg.mmt4d
This patch introduces support for scalable vectorization of `linalg.mmt4d`. The key design addition is a new state variable in the Linalg vectorizer: * `assumeScalableVecSizesMatchDimSize` This flag informs the vectorizer that the memref/tensor dimensions corresponding to scalable vector sizes (typically dynamic) _match the vector sizes_ at runtime. While this assumption is not generally valid, it does hold for `linalg.mmt4d` because inputs and outputs are explicitly packed (via `linalg.pack`). Packing includes padding, which ensures that dimension sizes align with the scalable vector lengths (*). See discussion here: * #143920 (*) Provided that the tile sizes used for packing match the vector sizes used during vectorization. Enforcing this is left to the user.
1 parent cb80651 commit 487db47

File tree

5 files changed

+157
-56
lines changed

5 files changed

+157
-56
lines changed

mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2440,12 +2440,11 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
24402440
}];
24412441

24422442
let arguments = (ins TransformHandleTypeInterface:$target,
2443-
Variadic<TransformAnyParamTypeOrAnyHandle>:$vector_sizes,
2444-
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
2445-
$static_vector_sizes,
2446-
OptionalAttr<UnitAttr>:$vectorize_nd_extract,
2447-
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "{}">:
2448-
$scalable_sizes);
2443+
Variadic<TransformAnyParamTypeOrAnyHandle>:$vector_sizes,
2444+
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:$static_vector_sizes,
2445+
OptionalAttr<UnitAttr>:$vectorize_nd_extract,
2446+
OptionalAttr<UnitAttr>:$assume_scalable_sizes_match_dim_size,
2447+
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "{}">:$scalable_sizes);
24492448

24502449
let results = (outs);
24512450

mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -871,7 +871,8 @@ FailureOr<VectorizationResult>
871871
vectorize(RewriterBase &rewriter, Operation *op,
872872
ArrayRef<int64_t> inputVectorSizes = {},
873873
ArrayRef<bool> inputScalableVecDims = {},
874-
bool vectorizeNDExtract = false, bool flatten1DDepthwiseConv = false);
874+
bool vectorizeNDExtract = false, bool flatten1DDepthwiseConv = false,
875+
bool assumeScalableSizesMultipleOfDim = false);
875876

876877
/// Emit a suitable vector form for a Copy op with fully static shape.
877878
LogicalResult vectorizeCopy(RewriterBase &builder, memref::CopyOp copyOp);

mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3921,7 +3921,8 @@ DiagnosedSilenceableFailure transform::VectorizeOp::apply(
39213921
}
39223922
FailureOr<VectorizationResult> vectorResults =
39233923
linalg::vectorize(rewriter, target, vectorSizes, getScalableSizes(),
3924-
getVectorizeNdExtract().value_or(false));
3924+
getVectorizeNdExtract().value_or(false), false,
3925+
getAssumeScalableSizesMatchDimSize().value_or(false));
39253926
if (failed(vectorResults)) {
39263927
return mlir::emitSilenceableFailure(target->getLoc())
39273928
<< "Attempted to vectorize, but failed";

mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

Lines changed: 55 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -222,9 +222,11 @@ struct VectorizationState {
222222
/// canonical vector shape for vectorization.
223223
LogicalResult initState(RewriterBase &rewriter, LinalgOp linalgOp,
224224
ArrayRef<int64_t> inputVectorSizes,
225-
ArrayRef<bool> inputScalableVecDims);
225+
ArrayRef<bool> inputScalableVecDims,
226+
bool assumeScalableVecSizesMatchDimSize = false);
226227

227-
/// Returns the canonical vector shape used to vectorize the iteration space.
228+
/// Returns the canonical vector shape used to vectorize the iteration
229+
/// space.
228230
ArrayRef<int64_t> getCanonicalVecShape() const { return canonicalVecShape; }
229231

230232
/// Returns the vector dimensions that are scalable in the canonical vector
@@ -233,8 +235,8 @@ struct VectorizationState {
233235

234236
/// Returns a vector type of the provided `elementType` with the canonical
235237
/// vector shape and the corresponding fixed/scalable dimensions bit. If
236-
/// `dimPermutation` is provided, the canonical vector dimensions are permuted
237-
/// accordingly.
238+
/// `dimPermutation` is provided, the canonical vector dimensions are
239+
/// permuted accordingly.
238240
VectorType getCanonicalVecType(
239241
Type elementType,
240242
std::optional<AffineMap> dimPermutation = std::nullopt) const {
@@ -254,9 +256,9 @@ struct VectorizationState {
254256
}
255257

256258
/// Masks an operation with the canonical vector mask if the operation needs
257-
/// masking. Returns the masked operation or the original operation if masking
258-
/// is not needed. If provided, the canonical mask for this operation is
259-
/// permuted using `maybeIndexingMap`.
259+
/// masking. Returns the masked operation or the original operation if
260+
/// masking is not needed. If provided, the canonical mask for this
261+
/// operation is permuted using `maybeIndexingMap`.
260262
Operation *
261263
maskOperation(RewriterBase &rewriter, Operation *opToMask, LinalgOp linalgOp,
262264
std::optional<AffineMap> maybeIndexingMap = std::nullopt);
@@ -276,15 +278,15 @@ struct VectorizationState {
276278

277279
/// Create or retrieve an existing mask value to mask `opToMask` in the
278280
/// canonical vector iteration space. If `maybeMaskingMap` the mask is
279-
/// permuted using that permutation map. If a new mask is created, it will be
280-
/// cached for future users.
281+
/// permuted using that permutation map. If a new mask is created, it will
282+
/// be cached for future users.
281283
Value getOrCreateMaskFor(RewriterBase &rewriter, Operation *opToMask,
282284
LinalgOp linalgOp,
283285
std::optional<AffineMap> maybeMaskingMap);
284286

285287
/// Check whether this permutation map can be used for masking. At the
286-
/// moment we only make sure that there are no broadcast dimensions, but this
287-
/// might change if indexing maps evolve.
288+
/// moment we only make sure that there are no broadcast dimensions, but
289+
/// this might change if indexing maps evolve.
288290
bool isValidMaskingMap(AffineMap maskingMap) {
289291
return maskingMap.getBroadcastDims().size() == 0;
290292
}
@@ -324,13 +326,24 @@ struct VectorizationState {
324326
/// shape.
325327
SmallVector<bool> scalableVecDims;
326328

327-
/// Holds the active masks for permutations of the canonical vector iteration
328-
/// space.
329+
/// Holds the active masks for permutations of the canonical vector
330+
/// iteration space.
329331
DenseMap<AffineMap, Value> activeMaskCache;
330332

331333
/// Global vectorization guard for the incoming rewriter. It's initialized
332334
/// when the vectorization state is initialized.
333335
OpBuilder::InsertionGuard rewriterGuard;
336+
337+
/// Do all scalable vector sizes match the corresponding input dim sizes?
338+
/// (tensor or memref)
339+
///
340+
/// At the Tensor + MemRef levels, scalable sizes are modelled using
341+
/// dynamic dimensions (i.e. `?`). In many cases these sizes result from
342+
/// e.g. "scalable packing + tiling" and are known to always match the
343+
/// scalable vector sizes. In such cases, masking can be safely skipped,
344+
/// despite the presence of dynamic shapes. Use this flag with care and
345+
/// only for cases where you are confident the assumption holds.
346+
bool assumeScalableVecSizesMatchDimSize = false;
334347
};
335348

336349
LogicalResult
@@ -367,10 +380,12 @@ VectorizationState::precomputeIterSpaceValueSizes(RewriterBase &rewriter,
367380
/// Initializes the vectorization state, including the computation of the
368381
/// canonical vector shape for vectorization.
369382
// TODO: Move this to the constructor when we can remove the failure cases.
370-
LogicalResult
371-
VectorizationState::initState(RewriterBase &rewriter, LinalgOp linalgOp,
372-
ArrayRef<int64_t> inputVectorSizes,
373-
ArrayRef<bool> inputScalableVecDims) {
383+
LogicalResult VectorizationState::initState(RewriterBase &rewriter,
384+
LinalgOp linalgOp,
385+
ArrayRef<int64_t> inputVectorSizes,
386+
ArrayRef<bool> inputScalableVecDims,
387+
bool assumeScalableSizes) {
388+
assumeScalableVecSizesMatchDimSize = assumeScalableSizes;
374389
// Initialize the insertion point.
375390
rewriter.setInsertionPoint(linalgOp);
376391

@@ -470,6 +485,21 @@ Value VectorizationState::getOrCreateMaskFor(
470485
return Value();
471486
}
472487

488+
if (assumeScalableVecSizesMatchDimSize) {
489+
// Given that all _scalable vector sizes_ match the corresponding
490+
// memref/tensor dim sizes, masking can be skipped provided that:
491+
// * all vector sizes corresponding to dynamic dims are scalable.
492+
if (llvm::all_of(llvm::zip(permutedStaticSizes, maskType.getScalableDims()),
493+
[](auto it) {
494+
return std::get<0>(it) == ShapedType::kDynamic
495+
? std::get<1>(it)
496+
: false;
497+
}))
498+
LDBG("Masking is not needed for masking map: " << maskingMap << "\n");
499+
activeMaskCache[maskingMap] = Value();
500+
return Value();
501+
}
502+
473503
// Permute the iteration space value sizes to compute the mask upper bounds.
474504
SmallVector<Value> upperBounds =
475505
applyPermutationMap(maskingMap, ArrayRef<Value>(iterSpaceValueSizes));
@@ -2479,7 +2509,8 @@ vectorizeScalableVectorPrecondition(Operation *op,
24792509
return success(isElementwise(linalgOp) || isa<linalg::MatmulOp>(op) ||
24802510
isa<linalg::MatmulTransposeAOp>(op) ||
24812511
isa<linalg::DepthwiseConv1DNwcWcOp>(op) ||
2482-
isa<linalg::MatvecOp>(op) || hasReductionIterator(linalgOp));
2512+
isa<linalg::MatvecOp>(op) || isa<linalg::Mmt4DOp>(op) ||
2513+
hasReductionIterator(linalgOp));
24832514
}
24842515

24852516
LogicalResult mlir::linalg::vectorizeOpPrecondition(
@@ -2535,11 +2566,10 @@ bool mlir::linalg::hasVectorizationImpl(Operation *op) {
25352566
tensor::InsertSliceOp>(op);
25362567
}
25372568

2538-
FailureOr<VectorizationResult>
2539-
mlir::linalg::vectorize(RewriterBase &rewriter, Operation *op,
2540-
ArrayRef<int64_t> inputVectorSizes,
2541-
ArrayRef<bool> inputScalableVecDims,
2542-
bool vectorizeNDExtract, bool flatten1DDepthwiseConv) {
2569+
FailureOr<VectorizationResult> mlir::linalg::vectorize(
2570+
RewriterBase &rewriter, Operation *op, ArrayRef<int64_t> inputVectorSizes,
2571+
ArrayRef<bool> inputScalableVecDims, bool vectorizeNDExtract,
2572+
bool flatten1DDepthwiseConv, bool assumeScalableSizesMultipleOfDim) {
25432573
LDBG("Attempting to vectorize:\n" << *op << "\n");
25442574
LDBG("Input vector sizes: ");
25452575
LLVM_DEBUG(llvm::interleaveComma(inputVectorSizes, llvm::dbgs()));
@@ -2559,7 +2589,8 @@ mlir::linalg::vectorize(RewriterBase &rewriter, Operation *op,
25592589
VectorizationState state(rewriter);
25602590
if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) {
25612591
if (failed(state.initState(rewriter, linalgOp, inputVectorSizes,
2562-
inputScalableVecDims))) {
2592+
inputScalableVecDims,
2593+
assumeScalableSizesMultipleOfDim))) {
25632594
LDBG("Vectorization state couldn't be initialized\n");
25642595
return failure();
25652596
}

mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir

Lines changed: 93 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -840,6 +840,99 @@ module attributes {transform.with_named_sequence} {
840840
}
841841
}
842842

843+
// -----
844+
845+
///----------------------------------------------------------------------------------------
846+
/// Tests for linalg.mmt4d
847+
///----------------------------------------------------------------------------------------
848+
849+
func.func @mmt4d(%A: memref<16x16x8x1xf32>, %B: memref<16x16x8x1xf32>, %C_in: memref<16x16x8x8xf32>) {
850+
linalg.mmt4d ins(%A, %B: memref<16x16x8x1xf32>, memref<16x16x8x1xf32>)
851+
outs(%C_in: memref<16x16x8x8xf32>)
852+
return
853+
}
854+
855+
// CHECK-LABEL: func.func @mmt4d(
856+
// CHECK-SAME: %[[A:.*]]: memref<16x16x8x1xf32>, %[[B:.*]]: memref<16x16x8x1xf32>, %[[C:.*]]: memref<16x16x8x8xf32>) {
857+
// CHECK: %[[VEC_A:.*]] = vector.transfer_read %[[A]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
858+
// CHECK: %[[VEC_B:.*]] = vector.transfer_read %[[B]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
859+
// CHECK: %[[VEC_C:.*]] = vector.transfer_read %[[C]]{{.*}} : memref<16x16x8x8xf32>, vector<16x16x8x8xf32>
860+
// CHECK: %[[MUL:.*]] = arith.mulf %[[VEC_A]], %[[VEC_B]] : vector<16x16x16x8x8x1xf32>
861+
// CHECK: %[[RED:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[VEC_C]] [2, 5] : vector<16x16x16x8x8x1xf32> to vector<16x16x8x8xf32>
862+
// CHECK: vector.transfer_write %[[RED]], %[[C]]{{.*}} : vector<16x16x8x8xf32>, memref<16x16x8x8xf32>
863+
864+
module attributes {transform.with_named_sequence} {
865+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
866+
%mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
867+
transform.structured.vectorize %mmt4d : !transform.any_op
868+
transform.yield
869+
}
870+
}
871+
872+
// -----
873+
874+
func.func @mmt4d_scalable(%A: memref<16x16x8x1xf32>, %B: memref<16x16x?x1xf32>, %C_in: memref<16x16x8x?xf32>) {
875+
linalg.mmt4d ins(%A, %B: memref<16x16x8x1xf32>, memref<16x16x?x1xf32>)
876+
outs(%C_in: memref<16x16x8x?xf32>)
877+
return
878+
}
879+
// CHECK-LABEL: func.func @mmt4d_scalable(
880+
// CHECK-SAME: %[[A:.*]]: memref<16x16x8x1xf32>,
881+
// CHECK-SAME: %[[B:.*]]: memref<16x16x?x1xf32>,
882+
// CHECK-SAME: %[[C_IN:.*]]: memref<16x16x8x?xf32>) {
883+
// CHECK: %[[VAL_0:.*]] = arith.constant 16 : index
884+
// CHECK: %[[VAL_1:.*]] = arith.constant 16 : index
885+
// CHECK: %[[VAL_2:.*]] = arith.constant 16 : index
886+
// CHECK: %[[C8:.*]] = arith.constant 8 : index
887+
// CHECK: %[[C2:.*]] = arith.constant 2 : index
888+
// CHECK: %[[DIM_2:.*]] = memref.dim %[[B]], %[[C2]] : memref<16x16x?x1xf32>
889+
// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
890+
// CHECK: %[[VEC_A:.*]] = vector.transfer_read %[[A]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x[4]x1xf32>
891+
// CHECK: %[[MASK_1:.*]] = vector.create_mask %[[VAL_1]], %[[VAL_2]], %[[DIM_2]], %[[VAL_6]] : vector<16x16x[4]x1xi1>
892+
// CHECK: %[[VEC_B:.*]] = vector.mask %[[MASK_1]] { vector.transfer_read %[[B]]{{.*}} : memref<16x16x?x1xf32>, vector<16x16x16x8x[4]x1xf32> } : vector<16x16x[4]x1xi1> -> vector<16x16x16x8x[4]x1xf32>
893+
// CHECK: %[[MASK_2:.*]] = vector.create_mask %[[VAL_0]], %[[VAL_1]], %[[C8]], %[[DIM_2]] : vector<16x16x8x[4]xi1>
894+
// CHECK: %[[VAL_15:.*]] = vector.mask %[[MASK_2]] { vector.transfer_read %[[C_IN]]{{.*}} : memref<16x16x8x?xf32>, vector<16x16x8x[4]xf32> } : vector<16x16x8x[4]xi1> -> vector<16x16x8x[4]xf32>
895+
// CHECK: %[[VAL_16:.*]] = arith.mulf %[[VEC_A]], %[[VEC_B]] : vector<16x16x16x8x[4]x1xf32>
896+
// CHECK: %[[MASK_3:.*]] = vector.create_mask %[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[C8]], %[[DIM_2]], %[[VAL_6]] : vector<16x16x16x8x[4]x1xi1>
897+
// CHECK: %[[VAL_18:.*]] = vector.mask %[[MASK_3]] { vector.multi_reduction <add>, %[[VAL_16]], %[[VAL_15]] [2, 5] : vector<16x16x16x8x[4]x1xf32> to vector<16x16x8x[4]xf32> } : vector<16x16x16x8x[4]x1xi1> -> vector<16x16x8x[4]xf32>
898+
// CHECK: vector.mask %[[MASK_2]] { vector.transfer_write %[[VAL_18]], %[[C_IN]]{{.*}} : vector<16x16x8x[4]xf32>, memref<16x16x8x?xf32> } : vector<16x16x8x[4]xi1>
899+
900+
901+
module attributes {transform.with_named_sequence} {
902+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
903+
%mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
904+
transform.structured.vectorize %mmt4d vector_sizes [16, 16, 16, 8, [4], 1] : !transform.any_op
905+
transform.yield
906+
}
907+
}
908+
909+
// -----
910+
911+
func.func @mmt4d_scalable_with_assume(%A: memref<16x16x8x1xf32>, %B: memref<16x16x?x1xf32>, %C_in: memref<16x16x8x?xf32>) {
912+
linalg.mmt4d ins(%A, %B: memref<16x16x8x1xf32>, memref<16x16x?x1xf32>)
913+
outs(%C_in: memref<16x16x8x?xf32>)
914+
return
915+
}
916+
// CHECK-LABEL: func.func @mmt4d_scalable_with_assume(
917+
// CHECK-SAME: %[[A:.*]]: memref<16x16x8x1xf32>,
918+
// CHECK-SAME: %[[B:.*]]: memref<16x16x?x1xf32>,
919+
// CHECK-SAME: %[[C_IN:.*]]: memref<16x16x8x?xf32>) {
920+
// CHECK-NOT: mask
921+
// CHECK: %[[VEC_A:.*]] = vector.transfer_read %[[A]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x[4]x1xf32>
922+
// CHECK: %[[VEC_B:.*]] = vector.transfer_read %[[B]]{{.*}} : memref<16x16x?x1xf32>, vector<16x16x16x8x[4]x1xf32>
923+
// CHECK: %[[VAL_13:.*]] = vector.transfer_read %[[C_IN]]{{.*}} : memref<16x16x8x?xf32>, vector<16x16x8x[4]xf32>
924+
// CHECK: %[[VAL_14:.*]] = arith.mulf %[[VEC_A]], %[[VEC_B]] : vector<16x16x16x8x[4]x1xf32>
925+
// CHECK: %[[VAL_15:.*]] = vector.multi_reduction <add>, %[[VAL_14]], %[[VAL_13]] [2, 5] : vector<16x16x16x8x[4]x1xf32> to vector<16x16x8x[4]xf32>
926+
// CHECK: vector.transfer_write %[[VAL_15]], %[[C_IN]]{{.*}} : vector<16x16x8x[4]xf32>, memref<16x16x8x?xf32>
927+
928+
module attributes {transform.with_named_sequence} {
929+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
930+
%mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
931+
transform.structured.vectorize %mmt4d vector_sizes [16, 16, 16, 8, [4], 1] {assume_scalable_sizes_match_dim_size} : !transform.any_op
932+
transform.yield
933+
}
934+
}
935+
843936
///----------------------------------------------------------------------------------------
844937
/// Tests for other Ops
845938
///----------------------------------------------------------------------------------------
@@ -1094,30 +1187,6 @@ module attributes {transform.with_named_sequence} {
10941187
}
10951188
}
10961189

1097-
// -----
1098-
1099-
func.func @mmt4d(%A: memref<16x16x8x1xf32>, %B: memref<16x16x8x1xf32>, %C_in: memref<16x16x8x8xf32>) {
1100-
linalg.mmt4d ins(%A, %B: memref<16x16x8x1xf32>, memref<16x16x8x1xf32>)
1101-
outs(%C_in: memref<16x16x8x8xf32>)
1102-
return
1103-
}
1104-
1105-
// CHECK-LABEL: func.func @mmt4d(
1106-
// CHECK-SAME: %[[A:.*]]: memref<16x16x8x1xf32>, %[[B:.*]]: memref<16x16x8x1xf32>, %[[C:.*]]: memref<16x16x8x8xf32>) {
1107-
// CHECK: %[[VEC_A:.*]] = vector.transfer_read %[[A]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
1108-
// CHECK: %[[VEC_B:.*]] = vector.transfer_read %[[B]]{{.*}} : memref<16x16x8x1xf32>, vector<16x16x16x8x8x1xf32>
1109-
// CHECK: %[[VEC_C:.*]] = vector.transfer_read %[[C]]{{.*}} : memref<16x16x8x8xf32>, vector<16x16x8x8xf32>
1110-
// CHECK: %[[MUL:.*]] = arith.mulf %[[VEC_A]], %[[VEC_B]] : vector<16x16x16x8x8x1xf32>
1111-
// CHECK: %[[RED:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[VEC_C]] [2, 5] : vector<16x16x16x8x8x1xf32> to vector<16x16x8x8xf32>
1112-
// CHECK: vector.transfer_write %[[RED]], %[[C]]{{.*}} : vector<16x16x8x8xf32>, memref<16x16x8x8xf32>
1113-
1114-
module attributes {transform.with_named_sequence} {
1115-
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1116-
%mmt4d = transform.structured.match ops{["linalg.mmt4d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1117-
transform.structured.vectorize %mmt4d : !transform.any_op
1118-
transform.yield
1119-
}
1120-
}
11211190

11221191
// -----
11231192

0 commit comments

Comments
 (0)