// // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include using namespace armnn; using namespace tosa; inline void VerifyTosaAttribute(const BaseDescriptor& descriptor, const TosaAttributeBase* attribute, std::vector inputShape, std::vector outputShape, LayerType type, uint32_t mappingOpNumber = 0) { switch (type) { case LayerType::Convolution2d: { auto conv2dDesc = PolymorphicDowncast(&descriptor); std::vector pad = {static_cast(conv2dDesc->m_PadTop), static_cast(conv2dDesc->m_PadBottom), static_cast(conv2dDesc->m_PadLeft), static_cast(conv2dDesc->m_PadRight)}; std::vector dilation = {static_cast(conv2dDesc->m_DilationY), static_cast(conv2dDesc->m_DilationX)}; std::vector stride = {static_cast(conv2dDesc->m_StrideY), static_cast(conv2dDesc->m_StrideX)}; TosaConvAttribute convAttribute(attribute); CHECK(pad == convAttribute.pad()); CHECK(dilation == convAttribute.dilation()); CHECK(stride == convAttribute.stride()); break; } case LayerType::Pooling2d: { auto poolDesc = PolymorphicDowncast(&descriptor); std::vector pad = {static_cast(poolDesc->m_PadTop), static_cast(poolDesc->m_PadBottom), static_cast(poolDesc->m_PadLeft), static_cast(poolDesc->m_PadRight)}; bool avgPoolIgnoreValue = (poolDesc->m_PoolType == PoolingAlgorithm::Average) && (poolDesc->m_PaddingMethod == PaddingMethod::IgnoreValue); if (avgPoolIgnoreValue) { if (mappingOpNumber == 0) { if (poolDesc->m_DataLayout == DataLayout::NHWC) { pad = {0, 0, static_cast(poolDesc->m_PadTop), static_cast(poolDesc->m_PadBottom), static_cast(poolDesc->m_PadLeft), static_cast(poolDesc->m_PadRight), 0, 0 }; } else { pad = {0, 0, 0, 0, static_cast(poolDesc->m_PadTop), static_cast(poolDesc->m_PadBottom), static_cast(poolDesc->m_PadLeft), static_cast(poolDesc->m_PadRight) }; } TosaPadAttribute padAttribute(attribute); CHECK(pad == padAttribute.padding()); CHECK(0.0f == padAttribute.pad_const_fp()); CHECK(0 == padAttribute.pad_const_int()); break; } pad = {0, 0, 0, 0}; } std::vector kernel = {static_cast(poolDesc->m_PoolHeight), static_cast(poolDesc->m_PoolWidth)}; std::vector stride = {static_cast(poolDesc->m_StrideY), static_cast(poolDesc->m_StrideX)}; TosaPoolAttribute poolAttribute(attribute); CHECK(pad == poolAttribute.pad()); CHECK(kernel == poolAttribute.kernel()); CHECK(stride == poolAttribute.stride()); break; } case LayerType::Reshape: { auto reshapeDesc = PolymorphicDowncast(&descriptor); TosaReshapeAttribute reshapeAttribute(attribute); std::vector shapeAttrib = reshapeAttribute.new_shape(); CHECK(GetTosaTensorShape(reshapeDesc->m_TargetShape) == shapeAttrib); CHECK(outputShape == shapeAttrib); auto numInputElements = std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); auto numAttributeShapeElements = std::accumulate(std::begin(shapeAttrib), std::end(shapeAttrib), 1, std::multiplies()); CHECK(numInputElements == numAttributeShapeElements); break; } case LayerType::Resize: { auto resizeDesc = PolymorphicDowncast(&descriptor); TosaResizeAttribute resizeAttribute(attribute); // Check output shape uint32_t outputHeight = resizeDesc->m_TargetHeight; uint32_t outputWidth = resizeDesc->m_TargetWidth; CHECK((outputShape.size() == 4)); if (resizeDesc->m_DataLayout == DataLayout::NHWC) { //Check output is not dynamic CHECK((outputShape[1] > 0)); CHECK((outputShape[2] > 0)); CHECK((outputHeight == static_cast(outputShape[1]))); CHECK((outputWidth == static_cast(outputShape[2]))); } else if (resizeDesc->m_DataLayout == DataLayout::NCHW) { //Check output is not dynamic CHECK((outputShape[2] > 0)); CHECK((outputShape[3] > 0)); CHECK((outputHeight == static_cast(outputShape[2]))); CHECK((outputWidth == static_cast(outputShape[3]))); } else { throw armnn::Exception("VerifyTosaAttribute: Invalid DataLayout in Resize."); } // Check Resize mode/method if (resizeDesc->m_Method == ResizeMethod::NearestNeighbor) { CHECK((resizeAttribute.mode() == tosa::ResizeMode_NEAREST)); } else if (resizeDesc->m_Method == ResizeMethod::Bilinear) { CHECK((resizeAttribute.mode() == tosa::ResizeMode_BILINEAR)); } else { throw armnn::Exception("VerifyTosaAttribute: Unsupported Resize method."); } break; } case LayerType::Slice: { auto sliceDesc = PolymorphicDowncast(&descriptor); TosaSliceAttribute sliceAttribute(attribute); std::vector begin(sliceDesc->m_Begin.begin(), sliceDesc->m_Begin.end()); std::vector size(sliceDesc->m_Size.begin(), sliceDesc->m_Size.end()); CHECK(begin == sliceAttribute.start()); CHECK(size == sliceAttribute.size()); CHECK(begin.size() == inputShape.size()); CHECK(size.size() == inputShape.size()); CHECK(begin.size() == outputShape.size()); CHECK(size.size() == outputShape.size()); break; } case LayerType::Splitter: { auto splitDesc = PolymorphicDowncast(&descriptor); TosaSliceAttribute sliceAttribute(attribute); // Each slice op has a different beginning point. // The size is the same for each slice op. std::vector beginVals; beginVals.reserve(inputShape.size()); std::vector sizeVals; sizeVals.reserve(inputShape.size()); for (unsigned int j = 0; j < inputShape.size(); ++j) { beginVals.emplace_back(0); int32_t dim = inputShape[j]; sizeVals.emplace_back(dim); } uint32_t axis = static_cast(splitDesc->GetAxis()); sizeVals[axis] = sizeVals[axis] / static_cast(splitDesc->GetNumViews()); beginVals[axis] = static_cast(mappingOpNumber) * sizeVals[axis]; CHECK(beginVals == sliceAttribute.start()); CHECK(sizeVals == sliceAttribute.size()); CHECK(beginVals.size() == inputShape.size()); CHECK(sizeVals.size() == inputShape.size()); CHECK(beginVals.size() == outputShape.size()); CHECK(sizeVals.size() == outputShape.size()); break; } case LayerType::TransposeConvolution2d: { auto transposeConv2dDesc = PolymorphicDowncast(&descriptor); std::vector outPad = {-static_cast(transposeConv2dDesc->m_PadTop), -static_cast(transposeConv2dDesc->m_PadBottom), -static_cast(transposeConv2dDesc->m_PadLeft), -static_cast(transposeConv2dDesc->m_PadRight)}; std::vector stride = {static_cast(transposeConv2dDesc->m_StrideY), static_cast(transposeConv2dDesc->m_StrideX)}; TosaTransposeConvAttribute transposeConvAttribute(attribute); CHECK(outPad == transposeConvAttribute.out_pad()); CHECK(stride == transposeConvAttribute.stride()); break; } case LayerType::Transpose: { auto transposeDesc = PolymorphicDowncast(&descriptor); std::vector outPerm(transposeDesc->m_DimMappings.begin(), transposeDesc->m_DimMappings.end()); TosaTransposeAttribute transposeAttribute(attribute); CHECK(outPerm == transposeAttribute.perms()); break; } default: break; } return; } inline void AssertTosaOneToOneMappingBasicBlock(TosaSerializationBasicBlock* basicBlock, std::vector> inputShape, std::vector> outputShape, Op tosaOp, Attribute tosaAttribute, const BaseDescriptor& descriptor, LayerType type, DType dataType = DType_FP32) { uint32_t numInputs = static_cast(inputShape.size()); uint32_t numInputTensors = static_cast(inputShape.size()); uint32_t numOutputs = static_cast(outputShape.size()); std::string operatorString = TosaOpToString(tosaOp); // The number of tensors in the block can be different if there are constant layers, as they are created separately. if(type == LayerType::Convolution2d) { numInputTensors = PolymorphicDowncast(&descriptor)->m_BiasEnabled ? 3 : 2; } std::string blockStr = operatorString + "_block_"; CHECK(basicBlock->GetName().find(blockStr) != std::string::npos); CHECK(basicBlock->GetInputs().size() == numInputTensors); CHECK(basicBlock->GetOutputs().size() == numOutputs); CHECK(basicBlock->GetOperators().size() == 1); CHECK(basicBlock->GetTensors().size() == (numInputs + numOutputs)); TosaSerializationOperator* op = basicBlock->GetOperators().at(0); CHECK(op->GetInputTensorNames().size() == numInputTensors); CHECK(op->GetOutputTensorNames().size() == numOutputs); for (uint32_t i = 0; i < numInputs; i++) { std::basic_string blockInputName = basicBlock->GetInputs()[i]; std::basic_string operatorInputName = op->GetInputTensorNames()[i]; std::basic_string tensorName = basicBlock->GetTensors()[i]->GetName(); std::string opStr = "input" + std::to_string(i) + "_"; CHECK(blockInputName == operatorInputName); CHECK(tensorName == operatorInputName); CHECK(blockInputName.find(opStr) != std::string::npos); } for (uint32_t i = 0; i < numOutputs; i++) { std::basic_string blockOutputName = basicBlock->GetOutputs()[i]; std::basic_string operatorOutputName = op->GetOutputTensorNames()[i]; std::basic_string tensorName = basicBlock->GetTensors()[numInputs + i]->GetName(); std::string opStr = "output" + std::to_string(i) + "_"; if (tosaOp == Op_CONST) { opStr = "constant_"; } CHECK(blockOutputName == operatorOutputName); CHECK(tensorName == operatorOutputName); CHECK(blockOutputName.find(opStr) != std::string::npos); } CHECK(op->GetAttributeType() == tosaAttribute); CHECK(op->GetOp() == tosaOp); for (uint32_t i = 0; i < numInputs; i++) { TosaSerializationTensor* tensor = basicBlock->GetTensors()[i]; CHECK(tensor->GetDtype() == dataType); CHECK(tensor->GetData().size() == 0); CHECK(tensor->GetShape() == inputShape[static_cast(i)]); } for (uint32_t i = 0; i < numOutputs; i++) { TosaSerializationTensor* tensor = basicBlock->GetTensors()[i + inputShape.size()]; CHECK(tensor->GetDtype() == dataType); CHECK(tensor->GetShape() == outputShape[static_cast(i)]); if (tosaOp != Op_CONST) { // Const tensors contain data. CHECK(tensor->GetData().size() == 0); } } std::vector input = {}; std::vector output = {}; if (!inputShape.empty()) { input = inputShape[0]; } if (!outputShape.empty()) { output = outputShape[0]; } VerifyTosaAttribute(descriptor, op->GetAttribute(), input, output, type); }