aboutsummaryrefslogtreecommitdiff
path: root/src/TosaDeserialize.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/TosaDeserialize.cpp')
-rw-r--r--src/TosaDeserialize.cpp135
1 files changed, 112 insertions, 23 deletions
diff --git a/src/TosaDeserialize.cpp b/src/TosaDeserialize.cpp
index f1b7d98..2421d79 100644
--- a/src/TosaDeserialize.cpp
+++ b/src/TosaDeserialize.cpp
@@ -1,5 +1,5 @@
-// Copyright (c) 2023, ARM Limited.
+// Copyright (c) 2023-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 with LLVM Exceptions
// (the "License"); you may not use this file except in compliance with
@@ -139,8 +139,9 @@ mlir::LogicalResult BuildTensorType(mlir::OpBuilder *op_builder,
element_type = op_builder->getBF16Type();
break;
case DType_SHAPE:
- element_type = op_builder->getIntegerType(64);
- break;
+ llvm::errs()
+ << "ERROR: Cannot construct RankedTensorType out of tosa.shape type \n";
+ return mlir::failure();
default:
llvm::errs() << "ERROR: unknown type " << EnumNamesDType()[ts->GetDtype()]
<< "\n";
@@ -236,7 +237,6 @@ ConstructConstAttr(const mlir::RankedTensorType &output_type,
case DType_UINT8:
case DType_UINT16:
case DType_BF16:
- case DType_SHAPE:
default: {
llvm::errs() << "ERROR: " << op_name
<< " contains unsupported element type\n";
@@ -375,10 +375,11 @@ public:
mlir::Block *_block, mlir::Location _loc,
TosaMlirBlockBuilder *_block_builder,
std::unordered_map<std::string, mlir::Value> *_tensor_map,
- std::unordered_map<std::string, mlir::RankedTensorType> *_tensor_type_map)
+ std::unordered_map<std::string, mlir::RankedTensorType> *_tensor_type_map,
+ std::unordered_map<std::string, mlir::tosa::shapeType> *_shape_type_map)
: op_builder(_op_builder), ser_block(_ser_block), block(_block),
loc(_loc), block_builder(_block_builder), tensor_map(_tensor_map),
- tensor_type_map(_tensor_type_map) {}
+ tensor_type_map(_tensor_type_map), shape_type_map(_shape_type_map) {}
template <Op OPCODE>
std::vector<mlir::Value> build(TosaSerializationOperator *op) const;
@@ -420,8 +421,16 @@ private:
template <class MLIR_OP>
std::vector<mlir::Value>
+ BuildEwiseBinaryShapeOp(TosaSerializationOperator *op) const;
+
+ template <class MLIR_OP>
+ std::vector<mlir::Value>
BuildReductionOp(TosaSerializationOperator *op) const;
+ template <class T>
+ mlir::Value BuildConstShape(mlir::OpBuilder *op_builder, mlir::Location loc,
+ const std::vector<T> &values) const;
+
template <class MLIR_OP>
std::vector<mlir::Value> BuildConvOp(TosaSerializationOperator *op) const;
@@ -432,6 +441,7 @@ private:
TosaMlirBlockBuilder *block_builder;
std::unordered_map<std::string, mlir::Value> *tensor_map;
std::unordered_map<std::string, mlir::RankedTensorType> *tensor_type_map;
+ std::unordered_map<std::string, mlir::tosa::shapeType> *shape_type_map;
};
// Main template to catch unimplemented translation
@@ -566,6 +576,22 @@ std::vector<mlir::Value> TosaMlirOperatorBuilder::BuildEwiseBinaryOp(
}
template <class MLIR_OP>
+std::vector<mlir::Value> TosaMlirOperatorBuilder::BuildEwiseBinaryShapeOp(
+ TosaSerializationOperator *op) const {
+ mlir::Value input0_val = tensor_map->at(op->GetInputTensorNames()[0]);
+ mlir::Value input1_val = tensor_map->at(op->GetInputTensorNames()[1]);
+ mlir::tosa::shapeType output_type =
+ shape_type_map->at(op->GetOutputTensorNames()[0]);
+ assert(op->GetAttributeType() ==
+ Attribute_NONE); // double check that there is no attribute
+
+ mlir::Operation *mlir_op =
+ op_builder->create<MLIR_OP>(loc, output_type, input0_val, input1_val);
+ block->push_back(mlir_op);
+ return std::vector<mlir::Value>({mlir_op->getResult(0)});
+}
+
+template <class MLIR_OP>
std::vector<mlir::Value>
TosaMlirOperatorBuilder::BuildReductionOp(TosaSerializationOperator *op) const {
mlir::Value input_val = tensor_map->at(op->GetInputTensorNames()[0]);
@@ -600,6 +626,14 @@ TosaMlirOperatorBuilder::BuildReductionOp(TosaSerializationOperator *op) const {
return BuildEwiseBinaryOp<mlir::tosa::MLIR_OP_NAME##Op>(op); \
}
+#define BUILD_OP_ELEMENTWISE_BINARY_SHAPE(MLIR_OP_NAME, SCHEMA_OP_NAME) \
+ template <> \
+ std::vector<mlir::Value> \
+ TosaMlirOperatorBuilder::build<Op_##SCHEMA_OP_NAME>( \
+ TosaSerializationOperator * op) const { \
+ return BuildEwiseBinaryShapeOp<mlir::tosa::MLIR_OP_NAME##Op>(op); \
+ }
+
#define BUILD_OP_REDUCTION(MLIR_OP_NAME, SCHEMA_OP_NAME) \
template <> \
std::vector<mlir::Value> \
@@ -654,6 +688,11 @@ BUILD_OP_ELEMENTWISE_UNARY(Tanh, TANH)
BUILD_OP_ELEMENTWISE_UNARY(Identity, IDENTITY)
BUILD_OP_ELEMENTWISE_UNARY(Cast, CAST)
+BUILD_OP_ELEMENTWISE_BINARY_SHAPE(AddShape, ADD_SHAPE)
+BUILD_OP_ELEMENTWISE_BINARY_SHAPE(SubShape, SUB_SHAPE)
+BUILD_OP_ELEMENTWISE_BINARY_SHAPE(MulShape, MUL_SHAPE)
+BUILD_OP_ELEMENTWISE_BINARY_SHAPE(DivShape, DIV_SHAPE)
+
template <>
std::vector<mlir::Value>
TosaMlirOperatorBuilder::build<Op_CONST>(TosaSerializationOperator *op) const {
@@ -670,6 +709,40 @@ TosaMlirOperatorBuilder::build<Op_CONST>(TosaSerializationOperator *op) const {
return std::vector<mlir::Value>({mlir_op->getResult(0)});
}
+template <class T>
+mlir::Value
+TosaMlirOperatorBuilder::BuildConstShape(mlir::OpBuilder *op_builder,
+ mlir::Location loc,
+ const std::vector<T> &values) const {
+ std::vector<int64_t> vec;
+ for (auto val : values) {
+ vec.push_back(val);
+ }
+ auto attr = op_builder->getIndexTensorAttr(vec);
+ auto type = mlir::tosa::shapeType::get(op_builder->getContext(),
+ /* rank = */ vec.size());
+ mlir::Operation *mlir_op =
+ op_builder->create<mlir::tosa::ConstShapeOp>(loc, type, attr);
+ block->push_back(mlir_op);
+ return mlir_op->getResult(0);
+}
+
+template <>
+std::vector<mlir::Value> TosaMlirOperatorBuilder::build<Op_CONST_SHAPE>(
+ TosaSerializationOperator *op) const {
+ const auto &output_name = op->GetOutputTensorNames()[0];
+ mlir::tosa::shapeType output_type = shape_type_map->at(output_name);
+ TosaSerializationTensor *ts = ser_block->GetTensorByName(output_name);
+
+ const auto &data = ts->GetData();
+
+ std::vector<int64_t> i64_data;
+ TosaSerializationHandler::ConvertU8toI64(data, output_type.getRank(),
+ i64_data);
+ mlir::Value result = BuildConstShape(op_builder, loc, i64_data);
+ return std::vector<mlir::Value>({result});
+}
+
template <class MLIR_OP>
std::vector<mlir::Value>
TosaMlirOperatorBuilder::BuildConvOp(TosaSerializationOperator *op) const {
@@ -891,6 +964,24 @@ TosaMlirOperatorBuilder::build<Op_CONCAT>(TosaSerializationOperator *op) const {
}
template <>
+std::vector<mlir::Value> TosaMlirOperatorBuilder::build<Op_CONCAT_SHAPE>(
+ TosaSerializationOperator *op) const {
+ mlir::tosa::shapeType output_type =
+ shape_type_map->at(op->GetOutputTensorNames()[0]);
+
+ llvm::SmallVector<mlir::Value> input_values;
+ for (auto &input_name : op->GetInputTensorNames()) {
+ mlir::Value input_val = tensor_map->at(input_name);
+ input_values.push_back(input_val);
+ }
+
+ mlir::Operation *mlir_op = op_builder->create<mlir::tosa::ConcatShapeOp>(
+ loc, output_type, input_values);
+ block->push_back(mlir_op);
+ return std::vector<mlir::Value>({mlir_op->getResult(0)});
+}
+
+template <>
std::vector<mlir::Value>
TosaMlirOperatorBuilder::build<Op_NEGATE>(TosaSerializationOperator *op) const {
mlir::Value input_val = tensor_map->at(op->GetInputTensorNames()[0]);
@@ -926,17 +1017,10 @@ std::vector<mlir::Value> TosaMlirOperatorBuilder::build<Op_RESHAPE>(
mlir::Value input_val = tensor_map->at(op->GetInputTensorNames()[0]);
mlir::RankedTensorType output_type =
tensor_type_map->at(op->GetOutputTensorNames()[0]);
-
- assert(op->GetAttributeType() ==
- Attribute_ReshapeAttribute); // double check attribute type
- TosaReshapeAttribute *attr =
- static_cast<TosaReshapeAttribute *>(op->GetAttribute());
-
- mlir::DenseI64ArrayAttr new_shape =
- BuildDenseI64ArrayAttr(op_builder, attr->new_shape());
+ mlir::Value shape_val = tensor_map->at(op->GetInputTensorNames()[1]);
mlir::Operation *mlir_op = op_builder->create<mlir::tosa::ReshapeOp>(
- loc, output_type, input_val, new_shape);
+ loc, output_type, input_val, shape_val);
block->push_back(mlir_op);
return std::vector<mlir::Value>({mlir_op->getResult(0)});
}
@@ -1081,16 +1165,12 @@ template <>
std::vector<mlir::Value>
TosaMlirOperatorBuilder::build<Op_TILE>(TosaSerializationOperator *op) const {
mlir::Value input_val = tensor_map->at(op->GetInputTensorNames()[0]);
+ mlir::Value multiples = tensor_map->at(op->GetInputTensorNames()[1]);
mlir::RankedTensorType output_type =
tensor_type_map->at(op->GetOutputTensorNames()[0]);
assert(op->GetAttributeType() ==
- Attribute_TileAttribute); // double check attribute type
- TosaTileAttribute *attr =
- static_cast<TosaTileAttribute *>(op->GetAttribute());
-
- mlir::DenseI64ArrayAttr multiples =
- BuildDenseI64ArrayAttr(op_builder, attr->multiples());
+ Attribute_NONE); // double check attribute type
mlir::Operation *mlir_op = op_builder->create<mlir::tosa::TileOp>(
loc, output_type, input_val, multiples);
@@ -1395,6 +1475,7 @@ private:
TosaMlirRegionBuilder *region_builder;
mlir::Block *block;
std::unordered_map<std::string, mlir::RankedTensorType> tensor_type_map;
+ std::unordered_map<std::string, mlir::tosa::shapeType> shape_type_map;
std::unordered_set<std::string> unranked_tensors;
};
@@ -1562,17 +1643,25 @@ mlir::LogicalResult TosaMlirBlockBuilder::BuildAllOpsInBlock(
std::queue<TosaSerializationOperator *> operator_queue;
TosaMlirOperatorBuilder tosa_op_builder(op_builder, ser_block, block, loc,
- this, &tensor_map, &tensor_type_map);
+ this, &tensor_map, &tensor_type_map,
+ &shape_type_map);
for (auto ts : ser_block->GetTensors()) {
if (ts->GetVariable()) {
RegisterVariableTensor(ts);
}
+ const auto &ts_name = ts->GetName();
+ if (ts->GetDtype() == DType::DType_SHAPE) {
+ // ts is tosa.shape type
+ auto shape_rank = ts->GetShape()[0];
+ shape_type_map[ts_name] =
+ mlir::tosa::shapeType::get(op_builder->getContext(), shape_rank);
+ continue;
+ }
mlir::RankedTensorType type;
if (BuildTensorType(op_builder, ts, type).failed()) {
return mlir::failure();
}
- const auto &ts_name = ts->GetName();
tensor_type_map[ts_name] = type;
if (ts->GetIsUnranked()) {
assert(ts->GetShape().empty()); // unranked tensors should have shape = {}