@@ -540,26 +540,9 @@ static void buildTransConvOpWithQuantInfo(
540540 }
541541}
542542
543- // / The tosa.fully_connected op has its own builder as it does not have
544- // / strides/dilation/padding.
545- static void buildFCOpWithQuantInfo (OpBuilder &builder, OperationState &result,
546- Type outputType, Value input, Value weight,
547- Value bias) {
548-
549- result.addOperands ({input, weight, bias});
550- auto quantAttr = ::buildConvOpQuantizationAttr (builder, input, weight);
551- if (quantAttr) {
552- result.addAttribute (" quantization_info" , quantAttr);
553- result.addTypes (
554- buildConvOpResultTypeInfo (builder, outputType, input, weight));
555- } else {
556- result.addTypes (outputType);
557- }
558- }
559-
560- // / The tosa.matmul op is also intended to be generated where a
561- // / fully_connected op must be constructed where the weight is not a constant.
562- // / In this case, the fully_connected op must be expressed using matmul.
543+ // / The tosa.matmul op is also intended to be generated where a fully_connected
544+ // / op must be constructed where the weight is not a constant. In this case,
545+ // / the fully_connected op must be expressed using matmul.
563546// / TODO: Add link to the leglization document explaining this.
564547static void buildMatMulOpWithQuantInfo (OpBuilder &builder,
565548 OperationState &result, Type outputType,
@@ -863,76 +846,6 @@ bool tosa::EqualOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
863846 return succeeded (verifyCompatibleShape (l[0 ], r[0 ]));
864847}
865848
866- LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents (
867- MLIRContext *context, ::std::optional<Location> location,
868- FullyConnectedOp::Adaptor adaptor,
869- SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
870- ShapeAdaptor inputShape (adaptor.getInput ().getType ());
871- ShapeAdaptor weightShape (adaptor.getWeight ().getType ());
872- ShapeAdaptor biasShape (adaptor.getBias ().getType ());
873-
874- // All shapes are dynamic.
875- SmallVector<int64_t > outShape;
876- outShape.resize (2 , ShapedType::kDynamic );
877-
878- if (inputShape.hasRank ()) {
879- outShape[0 ] = inputShape.getDimSize (0 );
880- }
881-
882- if (weightShape.hasRank ()) {
883- outShape[1 ] = weightShape.getDimSize (0 );
884- }
885-
886- if (biasShape.hasRank ()) {
887- outShape[1 ] = outShape[1 ] == ShapedType::kDynamic ? biasShape.getDimSize (0 )
888- : outShape[1 ];
889- }
890-
891- inferredReturnShapes.push_back (ShapedTypeComponents (outShape));
892- return success ();
893- }
894-
895- LogicalResult FullyConnectedOp::verify () {
896- // All TOSA conv ops have an input() and weight().
897- auto inputType = llvm::dyn_cast<RankedTensorType>(getInput ().getType ());
898-
899- RankedTensorType weightType =
900- llvm::dyn_cast<RankedTensorType>(getWeight ().getType ());
901-
902- // Must be ranked tensor types
903- if (!inputType) {
904- emitOpError (" expect a ranked tensor for input, got " ) << getInput ();
905- return failure ();
906- }
907- if (!weightType) {
908- emitOpError (" expect a ranked tensor for weight, got " ) << getWeight ();
909- return failure ();
910- }
911-
912- auto inputEType = inputType.getElementType ();
913- auto weightEType = weightType.getElementType ();
914-
915- bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
916- bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
917-
918- // Either both must be quantized or both unquantized.
919- if (inputIsQuant != weightIsQuant) {
920- emitOpError (
921- " expect both input and weight to be float or not together, got " )
922- << inputEType << " and " << weightEType;
923- return failure ();
924- }
925-
926- // Quantized type must have constructed the quantizationattr, and unquantized
927- // types should not have a quantizationattr.
928- if ((inputIsQuant && !getInputZp ()) || (!inputIsQuant && getInputZp ())) {
929- emitOpError (" input zero point is required for quantized type, and not "
930- " allowed for float type" );
931- return failure ();
932- }
933- return success ();
934- }
935-
936849LogicalResult tosa::MatMulOp::inferReturnTypeComponents (
937850 MLIRContext *context, ::std::optional<Location> location,
938851 MatMulOp::Adaptor adaptor,
0 commit comments