-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[mlir][tosa] Add error if and level checks for COND_IF & WHILE_LOOP #136194
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-mlir-tosa Author: TatWai Chong (tatwaichong) ChangesError if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING. Patch is 50.44 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/136194.diff 5 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index c94edad62cac7..70702647bb50a 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -2559,6 +2559,7 @@ def Tosa_IfOp : Tosa_Op<"cond_if",
);
let hasCustomAssemblyFormat = 1;
+ let hasVerifier = 1;
}
//===----------------------------------------------------------------------===//
@@ -2597,6 +2598,7 @@ def Tosa_WhileOp : Tosa_Op<"while_loop", [
);
let hasCustomAssemblyFormat = 1;
+ let hasVerifier = 1;
}
include "mlir/Dialect/Tosa/IR/TosaUtilOps.td"
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 8b4f6ef0d0980..365ea458d25d5 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -428,6 +428,51 @@ static LogicalResult verifyConvOpModes(T op) {
return success();
}
+// Verify whether same length and type of block arguments and tensor list.
+static LogicalResult errorIfTensorListShapeMismatch(Operation *op,
+ ValueRange blocksArgs,
+ StringRef blockName,
+ ValueRange tensorList,
+ StringRef listName) {
+ if (blocksArgs.size() != tensorList.size())
+ return op->emitOpError() << "require same number of values in " << blockName
+ << " (" << blocksArgs.size() << ") and "
+ << listName << " (" << tensorList.size() << ")";
+
+ for (auto [bbArgType, opArgType] :
+ llvm::zip_equal(blocksArgs.getTypes(), tensorList.getTypes())) {
+ ShapeAdaptor bbShapeAdaptor(bbArgType);
+ ShapeAdaptor opShapeAdaptor(opArgType);
+
+ if (!bbShapeAdaptor.hasRank() || !opShapeAdaptor.hasRank())
+ continue;
+
+ if (!bbShapeAdaptor.hasStaticShape() || !opShapeAdaptor.hasStaticShape())
+ continue;
+
+ if (bbArgType != opArgType)
+ return op->emitOpError()
+ << "require same shapes for " << blockName << " (" << bbArgType
+ << ") and " << listName << " (" << opArgType << ")";
+ }
+
+ return success();
+}
+
+static LogicalResult errorIfShapeNotSizeOne(Operation *op, Type type) {
+ ShapeAdaptor shapeAdaptor(type);
+
+ if (!shapeAdaptor.hasRank() || !shapeAdaptor.hasStaticShape())
+ return success();
+
+ SmallVector<int64_t> shape;
+ shapeAdaptor.getDims(shape);
+ if (!llvm::all_of(shape, [](int64_t dim) { return dim == 1; }))
+ return failure();
+
+ return success();
+}
+
// verify that inType and outType have same element types
template <typename T>
static LogicalResult verifySameElementTypes(T op, Type inType, Type outType) {
@@ -3321,6 +3366,91 @@ void IfOp::print(OpAsmPrinter &p) {
p.printOptionalAttrDict((*this)->getAttrs());
}
+LogicalResult IfOp::verify() {
+ if (getThenGraph().empty() || getElseGraph().empty())
+ return emitOpError("require `then_graph` and `else_graph` not be empty");
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getThenGraph().front().getArguments(),
+ "`then_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getElseGraph().front().getArguments(),
+ "`else_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ auto thenYield = cast<tosa::YieldOp>(getThenGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, thenYield.getInputs(),
+ "`then_graph` results", getOutputList(),
+ "`output_list`")
+ .failed())
+ return failure();
+
+ auto elseYield = cast<tosa::YieldOp>(getElseGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, elseYield.getInputs(),
+ "`else_graph` results", getOutputList(),
+ "`output_list`")
+ .failed())
+ return failure();
+
+ auto condType = getCondition().getType();
+ if (errorIfShapeNotSizeOne(*this, condType).failed())
+ return emitOpError() << "`condition` must be a size 1 tensor, got "
+ << condType;
+
+ return success();
+}
+
+LogicalResult WhileOp::verify() {
+ if (getCondGraph().empty() || getBodyGraph().empty())
+ return emitOpError(
+ "`cond_graph` and `body_graph` regions must not be empty");
+
+ if (errorIfTensorListShapeMismatch(*this, getInputList(), "`input_list`",
+ getOutputList(), "`output_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getCondGraph().front().getArguments(),
+ "`cond_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getBodyGraph().front().getArguments(),
+ "`body_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ auto bodyYield = cast<tosa::YieldOp>(getBodyGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, bodyYield.getInputs(),
+ "`body_graph` results", getInputList(),
+ "`input_list`")
+ .failed())
+ return failure();
+
+ // Condition block output must be a single element tensor with a single bool
+ // value.
+ auto condYield = cast<tosa::YieldOp>(getCondGraph().front().getTerminator());
+ if (condYield.getInputs().size() != 1)
+ return emitOpError() << "require `cond_graph` only have one result";
+
+ auto condOutType = condYield.getInputs()[0].getType();
+ if (errorIfShapeNotSizeOne(*this, condOutType).failed())
+ return emitOpError() << "`cond_graph` result must be a size 1 tensor, got "
+ << condOutType;
+
+ if (!getElementTypeOrSelf(condOutType).isInteger(1))
+ return emitOpError() << "`cond_graph` result must be a boolean tensor, got "
+ << condOutType;
+
+ return success();
+}
+
LogicalResult ReverseOp::verify() {
if (verifySameElementTypes(*this, /* inType = */ getInput1().getType(),
/* outType = */ getOutput().getType())
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index ef9d27f8df0ad..de31198faff2a 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -449,6 +449,42 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
return true;
}
+ // Preform depth-first search in Tosa IR structure to find the maximum nesting
+ // depth. Tosa nesting_depth starts at 0 and increase by one each time a new
+ // nested `region` is encountered.
+
+ static int32_t getMaxNestedDepth(Operation *op) {
+ int32_t depth = 0;
+ for (Region ®ion : op->getRegions())
+ depth = std::max(depth, getMaxNestedDepth(region));
+ return depth;
+ }
+
+ static int32_t getMaxNestedDepth(Block &block) {
+ int32_t depth = 0;
+ for (Operation &op : block.getOperations())
+ depth = std::max(depth, getMaxNestedDepth(&op));
+ return depth;
+ }
+
+ static int32_t getMaxNestedDepth(Region ®ion) {
+ int32_t depth = 0;
+ for (Block &block : region.getBlocks())
+ depth = std::max(depth, getMaxNestedDepth(block));
+ // Increase the nested depth.
+ return depth + 1;
+ }
+
+ bool levelCheckMaxNesting(Operation *op) {
+ int32_t maxNestedDepth = getMaxNestedDepth(op);
+ if (maxNestedDepth >= tosaLevel.MAX_NESTING) {
+ op->emitOpError() << "failed level check: " << maxNestedDepth
+ << " >= MAX_NESTING";
+ return false;
+ }
+ return true;
+ }
+
bool levelCheckListSize(Operation *op) {
if (auto concat = dyn_cast<tosa::ConcatOp>(op)) {
return levelCheckListSize(op, concat.getInput1().size(), "input1");
@@ -750,6 +786,12 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
return failure();
}
+ if (isa<tosa::IfOp>(op) || isa<tosa::WhileOp>(op)) {
+ if (!levelCheckMaxNesting(op)) {
+ return failure();
+ }
+ }
+
return success();
}
diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir
index b48f614770fcb..5cad01d85825d 100644
--- a/mlir/test/Dialect/Tosa/level_check.mlir
+++ b/mlir/test/Dialect/Tosa/level_check.mlir
@@ -1327,33 +1327,42 @@ func.func @test_if_tensor_list_size(%arg0 : tensor<i1>) {
%0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
// expected-error@+1 {{'tosa.cond_if' op failed level check for MAX_TENSOR_LIST_SIZE: inputs}}
%1 = "tosa.cond_if"(%arg0, // condition
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0) ({
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0) ({
+ ^bb0(%00: tensor<1xi32>, %01: tensor<1xi32>, %02: tensor<1xi32>, %03: tensor<1xi32>, %04: tensor<1xi32>, %05: tensor<1xi32>, %06: tensor<1xi32>, %07: tensor<1xi32>, %08: tensor<1xi32>, %09: tensor<1xi32>,
+ %10: tensor<1xi32>, %11: tensor<1xi32>, %12: tensor<1xi32>, %13: tensor<1xi32>, %14: tensor<1xi32>, %15: tensor<1xi32>, %16: tensor<1xi32>, %17: tensor<1xi32>, %18: tensor<1xi32>, %19: tensor<1xi32>,
+ %20: tensor<1xi32>, %21: tensor<1xi32>, %22: tensor<1xi32>, %23: tensor<1xi32>, %24: tensor<1xi32>, %25: tensor<1xi32>, %26: tensor<1xi32>, %27: tensor<1xi32>, %28: tensor<1xi32>, %29: tensor<1xi32>,
+ %30: tensor<1xi32>, %31: tensor<1xi32>, %32: tensor<1xi32>, %33: tensor<1xi32>, %34: tensor<1xi32>, %35: tensor<1xi32>, %36: tensor<1xi32>, %37: tensor<1xi32>, %38: tensor<1xi32>, %39: tensor<1xi32>,
+ %40: tensor<1xi32>, %41: tensor<1xi32>, %42: tensor<1xi32>, %43: tensor<1xi32>, %44: tensor<1xi32>, %45: tensor<1xi32>, %46: tensor<1xi32>, %47: tensor<1xi32>, %48: tensor<1xi32>, %49: tensor<1xi32>,
+ %50: tensor<1xi32>, %51: tensor<1xi32>, %52: tensor<1xi32>, %53: tensor<1xi32>, %54: tensor<1xi32>, %55: tensor<1xi32>, %56: tensor<1xi32>, %57: tensor<1xi32>, %58: tensor<1xi32>, %59: tensor<1xi32>,
+ %60: tensor<1xi32>, %61: tensor<1xi32>, %62: tensor<1xi32>, %63: tensor<1xi32>, %64: tensor<1xi32>
+ ):
+ "tosa.yield"(%64) : (tensor<1xi32>) -> ()
}, {
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ ^bb0(%00: tensor<1xi32>, %01: tensor<1xi32>, %02: tensor<1xi32>, %03: tensor<1xi32>, %04: tensor<1xi32>, %05: tensor<1xi32>, %06: tensor<1xi32>, %07: tensor<1xi32>, %08: tensor<1xi32>, %09: tensor<1xi32>,
+ %10: tensor<1xi32>, %11: tensor<1xi32>, %12: tensor<1xi32>, %13: tensor<1xi32>, %14: tensor<1xi32>, %15: tensor<1xi32>, %16: tensor<1xi32>, %17: tensor<1xi32>, %18: tensor<1xi32>, %19: tensor<1xi32>,
+ %20: tensor<1xi32>, %21: tensor<1xi32>, %22: tensor<1xi32>, %23: tensor<1xi32>, %24: tensor<1xi32>, %25: tensor<1xi32>, %26: tensor<1xi32>, %27: tensor<1xi32>, %28: tensor<1xi32>, %29: tensor<1xi32>,
+ %30: tensor<1xi32>, %31: tensor<1xi32>, %32: tensor<1xi32>, %33: tensor<1xi32>, %34: tensor<1xi32>, %35: tensor<1xi32>, %36: tensor<1xi32>, %37: tensor<1xi32>, %38: tensor<1xi32>, %39: tensor<1xi32>,
+ %40: tensor<1xi32>, %41: tensor<1xi32>, %42: tensor<1xi32>, %43: tensor<1xi32>, %44: tensor<1xi32>, %45: tensor<1xi32>, %46: tensor<1xi32>, %47: tensor<1xi32>, %48: tensor<1xi32>, %49: tensor<1xi32>,
+ %50: tensor<1xi32>, %51: tensor<1xi32>, %52: tensor<1xi32>, %53: tensor<1xi32>, %54: tensor<1xi32>, %55: tensor<1xi32>, %56: tensor<1xi32>, %57: tensor<1xi32>, %58: tensor<1xi32>, %59: tensor<1xi32>,
+ %60: tensor<1xi32>, %61: tensor<1xi32>, %62: tensor<1xi32>, %63: tensor<1xi32>, %64: tensor<1xi32>
+ ):
+ "tosa.yield"(%01) : (tensor<1xi32>) -> ()
}) : (
- tensor<i1>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>
- ) -> tensor<1xi32>
-
+ tensor<i1>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> tensor<1xi32>
return
}
@@ -1361,27 +1370,54 @@ func.func @test_if_tensor_list_size(%arg0 : tensor<i1>) {
// CHECK-LABEL: test_if_tensor_list_size_outputs
func.func @test_if_tensor_list_size_outputs(%arg0 : tensor<i1>) {
- %0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
+ %cst_0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
// expected-error@+1 {{'tosa.cond_if' op failed level check for MAX_TENSOR_LIST_SIZE: outputs}}
- %r:65 = "tosa.cond_if"(%arg0) ({
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ %r:65 = "tosa.cond_if"(%arg0, %cst_0) ({
+ ^bb0(%0: tensor<1xi32>):
+ "tosa.yield"(%0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0
+ ) : (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> ()
}, {
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
- }) : (tensor<i1>) -> (
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>
- )
-
+ ^bb0(%0: tensor<1xi32>):
+ "tosa.yield"(%0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0
+ ) : (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> ()
+ }) : (tensor<i1>, tensor<1xi32>) -> (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ ...
[truncated]
|
@llvm/pr-subscribers-mlir Author: TatWai Chong (tatwaichong) ChangesError if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING. Patch is 50.44 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/136194.diff 5 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index c94edad62cac7..70702647bb50a 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -2559,6 +2559,7 @@ def Tosa_IfOp : Tosa_Op<"cond_if",
);
let hasCustomAssemblyFormat = 1;
+ let hasVerifier = 1;
}
//===----------------------------------------------------------------------===//
@@ -2597,6 +2598,7 @@ def Tosa_WhileOp : Tosa_Op<"while_loop", [
);
let hasCustomAssemblyFormat = 1;
+ let hasVerifier = 1;
}
include "mlir/Dialect/Tosa/IR/TosaUtilOps.td"
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 8b4f6ef0d0980..365ea458d25d5 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -428,6 +428,51 @@ static LogicalResult verifyConvOpModes(T op) {
return success();
}
+// Verify whether same length and type of block arguments and tensor list.
+static LogicalResult errorIfTensorListShapeMismatch(Operation *op,
+ ValueRange blocksArgs,
+ StringRef blockName,
+ ValueRange tensorList,
+ StringRef listName) {
+ if (blocksArgs.size() != tensorList.size())
+ return op->emitOpError() << "require same number of values in " << blockName
+ << " (" << blocksArgs.size() << ") and "
+ << listName << " (" << tensorList.size() << ")";
+
+ for (auto [bbArgType, opArgType] :
+ llvm::zip_equal(blocksArgs.getTypes(), tensorList.getTypes())) {
+ ShapeAdaptor bbShapeAdaptor(bbArgType);
+ ShapeAdaptor opShapeAdaptor(opArgType);
+
+ if (!bbShapeAdaptor.hasRank() || !opShapeAdaptor.hasRank())
+ continue;
+
+ if (!bbShapeAdaptor.hasStaticShape() || !opShapeAdaptor.hasStaticShape())
+ continue;
+
+ if (bbArgType != opArgType)
+ return op->emitOpError()
+ << "require same shapes for " << blockName << " (" << bbArgType
+ << ") and " << listName << " (" << opArgType << ")";
+ }
+
+ return success();
+}
+
+static LogicalResult errorIfShapeNotSizeOne(Operation *op, Type type) {
+ ShapeAdaptor shapeAdaptor(type);
+
+ if (!shapeAdaptor.hasRank() || !shapeAdaptor.hasStaticShape())
+ return success();
+
+ SmallVector<int64_t> shape;
+ shapeAdaptor.getDims(shape);
+ if (!llvm::all_of(shape, [](int64_t dim) { return dim == 1; }))
+ return failure();
+
+ return success();
+}
+
// verify that inType and outType have same element types
template <typename T>
static LogicalResult verifySameElementTypes(T op, Type inType, Type outType) {
@@ -3321,6 +3366,91 @@ void IfOp::print(OpAsmPrinter &p) {
p.printOptionalAttrDict((*this)->getAttrs());
}
+LogicalResult IfOp::verify() {
+ if (getThenGraph().empty() || getElseGraph().empty())
+ return emitOpError("require `then_graph` and `else_graph` not be empty");
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getThenGraph().front().getArguments(),
+ "`then_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getElseGraph().front().getArguments(),
+ "`else_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ auto thenYield = cast<tosa::YieldOp>(getThenGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, thenYield.getInputs(),
+ "`then_graph` results", getOutputList(),
+ "`output_list`")
+ .failed())
+ return failure();
+
+ auto elseYield = cast<tosa::YieldOp>(getElseGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, elseYield.getInputs(),
+ "`else_graph` results", getOutputList(),
+ "`output_list`")
+ .failed())
+ return failure();
+
+ auto condType = getCondition().getType();
+ if (errorIfShapeNotSizeOne(*this, condType).failed())
+ return emitOpError() << "`condition` must be a size 1 tensor, got "
+ << condType;
+
+ return success();
+}
+
+LogicalResult WhileOp::verify() {
+ if (getCondGraph().empty() || getBodyGraph().empty())
+ return emitOpError(
+ "`cond_graph` and `body_graph` regions must not be empty");
+
+ if (errorIfTensorListShapeMismatch(*this, getInputList(), "`input_list`",
+ getOutputList(), "`output_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getCondGraph().front().getArguments(),
+ "`cond_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ if (errorIfTensorListShapeMismatch(
+ *this, getBodyGraph().front().getArguments(),
+ "`body_graph` arguments", getInputList(), "`input_list`")
+ .failed())
+ return failure();
+
+ auto bodyYield = cast<tosa::YieldOp>(getBodyGraph().front().getTerminator());
+ if (errorIfTensorListShapeMismatch(*this, bodyYield.getInputs(),
+ "`body_graph` results", getInputList(),
+ "`input_list`")
+ .failed())
+ return failure();
+
+ // Condition block output must be a single element tensor with a single bool
+ // value.
+ auto condYield = cast<tosa::YieldOp>(getCondGraph().front().getTerminator());
+ if (condYield.getInputs().size() != 1)
+ return emitOpError() << "require `cond_graph` only have one result";
+
+ auto condOutType = condYield.getInputs()[0].getType();
+ if (errorIfShapeNotSizeOne(*this, condOutType).failed())
+ return emitOpError() << "`cond_graph` result must be a size 1 tensor, got "
+ << condOutType;
+
+ if (!getElementTypeOrSelf(condOutType).isInteger(1))
+ return emitOpError() << "`cond_graph` result must be a boolean tensor, got "
+ << condOutType;
+
+ return success();
+}
+
LogicalResult ReverseOp::verify() {
if (verifySameElementTypes(*this, /* inType = */ getInput1().getType(),
/* outType = */ getOutput().getType())
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index ef9d27f8df0ad..de31198faff2a 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -449,6 +449,42 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
return true;
}
+ // Preform depth-first search in Tosa IR structure to find the maximum nesting
+ // depth. Tosa nesting_depth starts at 0 and increase by one each time a new
+ // nested `region` is encountered.
+
+ static int32_t getMaxNestedDepth(Operation *op) {
+ int32_t depth = 0;
+ for (Region ®ion : op->getRegions())
+ depth = std::max(depth, getMaxNestedDepth(region));
+ return depth;
+ }
+
+ static int32_t getMaxNestedDepth(Block &block) {
+ int32_t depth = 0;
+ for (Operation &op : block.getOperations())
+ depth = std::max(depth, getMaxNestedDepth(&op));
+ return depth;
+ }
+
+ static int32_t getMaxNestedDepth(Region ®ion) {
+ int32_t depth = 0;
+ for (Block &block : region.getBlocks())
+ depth = std::max(depth, getMaxNestedDepth(block));
+ // Increase the nested depth.
+ return depth + 1;
+ }
+
+ bool levelCheckMaxNesting(Operation *op) {
+ int32_t maxNestedDepth = getMaxNestedDepth(op);
+ if (maxNestedDepth >= tosaLevel.MAX_NESTING) {
+ op->emitOpError() << "failed level check: " << maxNestedDepth
+ << " >= MAX_NESTING";
+ return false;
+ }
+ return true;
+ }
+
bool levelCheckListSize(Operation *op) {
if (auto concat = dyn_cast<tosa::ConcatOp>(op)) {
return levelCheckListSize(op, concat.getInput1().size(), "input1");
@@ -750,6 +786,12 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
return failure();
}
+ if (isa<tosa::IfOp>(op) || isa<tosa::WhileOp>(op)) {
+ if (!levelCheckMaxNesting(op)) {
+ return failure();
+ }
+ }
+
return success();
}
diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir
index b48f614770fcb..5cad01d85825d 100644
--- a/mlir/test/Dialect/Tosa/level_check.mlir
+++ b/mlir/test/Dialect/Tosa/level_check.mlir
@@ -1327,33 +1327,42 @@ func.func @test_if_tensor_list_size(%arg0 : tensor<i1>) {
%0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
// expected-error@+1 {{'tosa.cond_if' op failed level check for MAX_TENSOR_LIST_SIZE: inputs}}
%1 = "tosa.cond_if"(%arg0, // condition
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0, %0, %0, %0, %0, %0, %0, %0,
- %0) ({
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0) ({
+ ^bb0(%00: tensor<1xi32>, %01: tensor<1xi32>, %02: tensor<1xi32>, %03: tensor<1xi32>, %04: tensor<1xi32>, %05: tensor<1xi32>, %06: tensor<1xi32>, %07: tensor<1xi32>, %08: tensor<1xi32>, %09: tensor<1xi32>,
+ %10: tensor<1xi32>, %11: tensor<1xi32>, %12: tensor<1xi32>, %13: tensor<1xi32>, %14: tensor<1xi32>, %15: tensor<1xi32>, %16: tensor<1xi32>, %17: tensor<1xi32>, %18: tensor<1xi32>, %19: tensor<1xi32>,
+ %20: tensor<1xi32>, %21: tensor<1xi32>, %22: tensor<1xi32>, %23: tensor<1xi32>, %24: tensor<1xi32>, %25: tensor<1xi32>, %26: tensor<1xi32>, %27: tensor<1xi32>, %28: tensor<1xi32>, %29: tensor<1xi32>,
+ %30: tensor<1xi32>, %31: tensor<1xi32>, %32: tensor<1xi32>, %33: tensor<1xi32>, %34: tensor<1xi32>, %35: tensor<1xi32>, %36: tensor<1xi32>, %37: tensor<1xi32>, %38: tensor<1xi32>, %39: tensor<1xi32>,
+ %40: tensor<1xi32>, %41: tensor<1xi32>, %42: tensor<1xi32>, %43: tensor<1xi32>, %44: tensor<1xi32>, %45: tensor<1xi32>, %46: tensor<1xi32>, %47: tensor<1xi32>, %48: tensor<1xi32>, %49: tensor<1xi32>,
+ %50: tensor<1xi32>, %51: tensor<1xi32>, %52: tensor<1xi32>, %53: tensor<1xi32>, %54: tensor<1xi32>, %55: tensor<1xi32>, %56: tensor<1xi32>, %57: tensor<1xi32>, %58: tensor<1xi32>, %59: tensor<1xi32>,
+ %60: tensor<1xi32>, %61: tensor<1xi32>, %62: tensor<1xi32>, %63: tensor<1xi32>, %64: tensor<1xi32>
+ ):
+ "tosa.yield"(%64) : (tensor<1xi32>) -> ()
}, {
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ ^bb0(%00: tensor<1xi32>, %01: tensor<1xi32>, %02: tensor<1xi32>, %03: tensor<1xi32>, %04: tensor<1xi32>, %05: tensor<1xi32>, %06: tensor<1xi32>, %07: tensor<1xi32>, %08: tensor<1xi32>, %09: tensor<1xi32>,
+ %10: tensor<1xi32>, %11: tensor<1xi32>, %12: tensor<1xi32>, %13: tensor<1xi32>, %14: tensor<1xi32>, %15: tensor<1xi32>, %16: tensor<1xi32>, %17: tensor<1xi32>, %18: tensor<1xi32>, %19: tensor<1xi32>,
+ %20: tensor<1xi32>, %21: tensor<1xi32>, %22: tensor<1xi32>, %23: tensor<1xi32>, %24: tensor<1xi32>, %25: tensor<1xi32>, %26: tensor<1xi32>, %27: tensor<1xi32>, %28: tensor<1xi32>, %29: tensor<1xi32>,
+ %30: tensor<1xi32>, %31: tensor<1xi32>, %32: tensor<1xi32>, %33: tensor<1xi32>, %34: tensor<1xi32>, %35: tensor<1xi32>, %36: tensor<1xi32>, %37: tensor<1xi32>, %38: tensor<1xi32>, %39: tensor<1xi32>,
+ %40: tensor<1xi32>, %41: tensor<1xi32>, %42: tensor<1xi32>, %43: tensor<1xi32>, %44: tensor<1xi32>, %45: tensor<1xi32>, %46: tensor<1xi32>, %47: tensor<1xi32>, %48: tensor<1xi32>, %49: tensor<1xi32>,
+ %50: tensor<1xi32>, %51: tensor<1xi32>, %52: tensor<1xi32>, %53: tensor<1xi32>, %54: tensor<1xi32>, %55: tensor<1xi32>, %56: tensor<1xi32>, %57: tensor<1xi32>, %58: tensor<1xi32>, %59: tensor<1xi32>,
+ %60: tensor<1xi32>, %61: tensor<1xi32>, %62: tensor<1xi32>, %63: tensor<1xi32>, %64: tensor<1xi32>
+ ):
+ "tosa.yield"(%01) : (tensor<1xi32>) -> ()
}) : (
- tensor<i1>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>
- ) -> tensor<1xi32>
-
+ tensor<i1>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> tensor<1xi32>
return
}
@@ -1361,27 +1370,54 @@ func.func @test_if_tensor_list_size(%arg0 : tensor<i1>) {
// CHECK-LABEL: test_if_tensor_list_size_outputs
func.func @test_if_tensor_list_size_outputs(%arg0 : tensor<i1>) {
- %0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
+ %cst_0 = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
// expected-error@+1 {{'tosa.cond_if' op failed level check for MAX_TENSOR_LIST_SIZE: outputs}}
- %r:65 = "tosa.cond_if"(%arg0) ({
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
+ %r:65 = "tosa.cond_if"(%arg0, %cst_0) ({
+ ^bb0(%0: tensor<1xi32>):
+ "tosa.yield"(%0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0
+ ) : (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> ()
}, {
- ^bb0(%arg3: tensor<1xi32>):
- "tosa.yield"(%arg3) : (tensor<1xi32>) -> ()
- }) : (tensor<i1>) -> (
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
- tensor<1xi32>
- )
-
+ ^bb0(%0: tensor<1xi32>):
+ "tosa.yield"(%0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0, %0, %0, %0, %0, %0,
+ %0, %0, %0, %0, %0
+ ) : (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>
+ ) -> ()
+ }) : (tensor<i1>, tensor<1xi32>) -> (
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>,
+ ...
[truncated]
|
LGTM |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks! Had a few suggestions for improving some of the checks, otherwise it LGTM!
d8a3f5e
to
f10baf6
Compare
✅ With the latest revision this PR passed the C/C++ code formatter. |
f10baf6
to
21c0050
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for the updates, had a question, otherwise LGTM!
Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
21c0050
to
5b8f507
Compare
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
…lvm#136194) Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks. Level_checks: verify whether the nested depth exceeds MAX_NESTING.
Error if checks: verify whether the same length and type between input list, output list, and control-flow blocks.
Level_checks: verify whether the nested depth exceeds MAX_NESTING.