From e5e2676409a936431f87d31fb74d825257b20804 Mon Sep 17 00:00:00 2001 From: Eric Kunze Date: Tue, 13 Oct 2020 16:11:07 -0700 Subject: Initial checkin of TOSA reference_model and tests Change-Id: I2f8e7fa63e2ae40203e57d2cc8814bde3b312cb6 Signed-off-by: Eric Kunze --- reference_model/src/ops/tensor_ops.cc | 1229 +++++++++++++++++++++++++++++++++ 1 file changed, 1229 insertions(+) create mode 100644 reference_model/src/ops/tensor_ops.cc (limited to 'reference_model/src/ops/tensor_ops.cc') diff --git a/reference_model/src/ops/tensor_ops.cc b/reference_model/src/ops/tensor_ops.cc new file mode 100644 index 0000000..a735334 --- /dev/null +++ b/reference_model/src/ops/tensor_ops.cc @@ -0,0 +1,1229 @@ + +// Copyright (c) 2020, ARM Limited. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensor_ops.h" +#include "quant_util.h" +#include "template_types.h" + +using namespace TosaReference; +using namespace Eigen; +using namespace tosa; + +template +OpArgMax::OpArgMax(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) + : GraphNode(Op_ARGMAX, id_) +{ + setRequiredOperands(1, 1); + setRequiredRank(0, 6); + + INIT_ATTRIBUTE(Axis); +} + +template +OpArgMax::~OpArgMax() +{ + if (attribute) + delete attribute; +} + +template +int OpArgMax::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + input = dynamic_cast*>(inputs[0]); + output = dynamic_cast*>(outputs[0]); + + return 0; +} + +template +int OpArgMax::eval() +{ + Eigen::Tensor index = this->input->getTensor().argmax(attribute->axis()); + + this->output->getTensor() = index.unaryExpr([](DenseIndex in) -> OutEigenType { return (OutEigenType)in; }); + + return GraphNode::eval(); +} + +template +OpAvgPool2d::OpAvgPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) + : GraphNode(Op_AVG_POOL2D, id_) +{ + setRequiredOperands(1, 1); + setRequiredRank(4); + + INIT_ATTRIBUTE(Pool2d); + INIT_QINFO(Unary); +} + +template +OpAvgPool2d::~OpAvgPool2d() +{ + if (attribute) + delete attribute; +} + +template +int OpAvgPool2d::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + if (inputs[0]->matchType(*outputs[0])) + { + printNodeValidationError("OpAvgPool2d: input and output tensor type mismatch"); + return 1; + } + + in = dynamic_cast*>(inputs[0]); + out = dynamic_cast*>(outputs[0]); + + if (!in->hasFormat(Format_NHWC)) + { + printNodeValidationError("OpAvgPool2d: unsupported tensor format"); + return 1; + } + + if (attribute->padding().size() != 4) + { + printNodeValidationError("OpAvgPool2d: illegal size for attribute padding"); + return 1; + } + + if (attribute->kernel().size() != 2) + { + printNodeValidationError("OpAvgPool2d: illegal size for attribute kernel"); + return 1; + } + + if (attribute->stride().size() != 2) + { + printNodeValidationError("OpAvgPool2d: illegal size for attribute stride"); + return 1; + } + + return 0; +} + +template +ETensor1 OpAvgPool2d::calculate_div_map_1d(int in_size, int out_size, int kernel_size, int stride) +{ + ETensor1 result(out_size); + + int32_t total_pad = (out_size - 1) * stride + kernel_size - in_size; + total_pad = total_pad < 0 ? 0 : total_pad; + + int32_t pad_left = total_pad >> 1; + int32_t pad_right = total_pad - pad_left; + + result.setConstant(kernel_size); + + // the index left to 'left_index' and index right to 'right_index' indicates + // the input window of this output covers a pad bit + int32_t left_index = pad_left / stride; + int32_t right_index = pad_right / stride; + + // not handle ultra small activation yet + ASSERT_MSG_NODE((out_size - 1 - right_index) >= left_index, "AvgPool2d: Small activations not supported yet"); + + // minus the number of pad bit this index cover + while (left_index >= 0) + { + result(left_index) -= (pad_left - left_index * stride); + left_index--; + } + + while (right_index >= 0) + { + result(out_size - 1 - right_index) -= (pad_right - right_index * stride); + right_index--; + } + + return result; +} + +// assuming input and output tensor have same scales like tflite reference +// so no need to scale input and output +template +int OpAvgPool2d::eval() +{ + int in_batch = this->in->getShape()[0]; + int in_height = this->in->getShape()[1]; + int in_width = this->in->getShape()[2]; + int in_channels = this->in->getShape()[3]; + + int out_batch = this->out->getShape()[0]; + int out_height = this->out->getShape()[1]; + int out_width = this->out->getShape()[2]; + int out_channels = this->out->getShape()[3]; + + ASSERT_MSG_NODE(in_batch == out_batch, "OpAvgPool2d: tensor batch mismatch %d != %d", in_batch, out_batch); + + int padding_top = this->attribute->padding()[0]; + int padding_bottom = this->attribute->padding()[1]; + int padding_left = this->attribute->padding()[2]; + int padding_right = this->attribute->padding()[3]; + int kernel_h = this->attribute->kernel()[0]; + int kernel_w = this->attribute->kernel()[1]; + int stride_h = this->attribute->stride()[0]; + int stride_w = this->attribute->stride()[1]; + + DEBUG_INFO(OP, + "perform AvgPool2d, input.shape=[%d,%d,%d,%d], output.shape=[%d,%d,%d,%d], kernel=[%d,%d], " + "stride=[%d,%d], padding=[%d,%d,%d,%d]", + in_batch, in_height, in_width, in_channels, out_batch, out_height, out_width, out_channels, kernel_h, + kernel_w, stride_h, stride_w, padding_top, padding_bottom, padding_left, padding_right); + + Eigen::array im2col_input_dims; + im2col_input_dims[0] = kernel_h * kernel_w; + im2col_input_dims[1] = out_batch * out_height * out_width * out_channels; + + Eigen::array col2im_output_dims; + col2im_output_dims[0] = out_batch; + col2im_output_dims[1] = out_height; + col2im_output_dims[2] = out_width; + col2im_output_dims[3] = out_channels; + + Eigen::array, 4> padding; + padding[0] = std::make_pair(0, 0); + padding[1] = std::make_pair(padding_top, padding_bottom); + padding[2] = std::make_pair(padding_left, padding_right); + padding[3] = std::make_pair(0, 0); + + ETensor4 input_val = this->in->getTensor(); + if (this->qinfo) + { + input_val = input_val - (InEigenType)this->qinfo->input_zp(); + } + + ETensor4 input_padded = input_val.pad(padding); + + // assuming input and output have same scales + // so input and output scaling is not required + // TODO: check if this assumption TOSA made + + // extract_image_patches() output [N, KH, KW, H * W, C] + // transpose to [KH, KW, N, H * W, C] + // reshape to [KH * KW, N * H * W * C] + ETensor2 input_extract_patches = + input_padded.extract_image_patches(kernel_h, kernel_w, stride_h, stride_w, 1, 1, Eigen::PADDING_VALID) + .shuffle(Eigen::array{ 1, 2, 0, 3, 4 }) + .reshape(im2col_input_dims); + + // 1D result with [N * H * W * C] + ETensor1 out_1d(this->out->getElementCount()); + out_1d.setZero(); + + // sum pool + for (size_t i = 0; i < this->out->getElementCount(); i++) + { + for (int32_t j = 0; j < kernel_h * kernel_w; j++) + { + out_1d(i) += (AccEigenType)input_extract_patches(j, i); + } + } + + // reshape result to [N, H, W, C] and divide with div_map + ETensor4 sum = out_1d.reshape(col2im_output_dims); + + // calculate 1d height/width div_map (number of elements this pooling window covers) + // and outer product to get 2d div_map, then reshape/broadcast to [N, H, W, C] + ETensor1 div_map_h = calculate_div_map_1d(in_height, out_height, kernel_h, stride_h); + ETensor1 div_map_w = calculate_div_map_1d(in_width, out_width, kernel_w, stride_w); + Eigen::array, 1> contract_dims = { Eigen::IndexPair(1, 0) }; + Eigen::array bcast{ out_batch, 1, 1, out_channels }; + + ETensor4 div_map = + div_map_h.reshape(Eigen::array{ out_height, 1 }) + .contract(div_map_w.reshape(Eigen::array{ 1, out_width }), contract_dims) + .reshape(Eigen::array{ 1, out_height, out_width, 1 }) + .broadcast(bcast); + + if (Dtype != DType_FLOAT) + { + this->out->getTensor() = sum.binaryExpr(div_map, [](AccEigenType value, int32_t div) -> OutEigenType { + int32_t multiplier, shift; + TosaReference::QuantUtil::reciprocal_scale(div, multiplier, shift); + + return (OutEigenType)TosaReference::QuantUtil::apply_scale(value, multiplier, shift, false); + }); + this->out->getTensor() = this->out->getTensor() + (OutEigenType)(this->qinfo->output_zp()); + this->out->getTensor() = this->out->getTensor().cwiseMax((OutEigenType)QMin); + this->out->getTensor() = this->out->getTensor().cwiseMin((OutEigenType)QMax); + } + else + { + this->out->getTensor() = (sum / div_map.template cast()).template cast(); + } + + return GraphNode::eval(); +} + +template +OpConv2d::OpConv2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) + : GraphNode(Op_CONV2D, id_) +{ + setRequiredOperands(3, 1); + setRequiredRank(4); + + INIT_ATTRIBUTE(Conv2d); + INIT_QINFO(Conv); +} + +template +OpConv2d::~OpConv2d() +{ + if (attribute) + delete attribute; + if (qinfo) + delete qinfo; +} + +template +int OpConv2d::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(inputs[1]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + // 'bias' checked separatedly since it doens't make sense to make required rank ranging from 1 to 4 + if (inputs[2]->getRank() != 1) + { + printNodeValidationError("OpConv2d: bias tensor must be rank 1"); + } + + if (inputs[1]->getIsConst() == 0) + { + printNodeValidationError("OpConv2d: weight tensor is not const typed"); + } + + input = dynamic_cast*>(inputs[0]); + weight = dynamic_cast*>(inputs[1]); + bias = dynamic_cast*>(inputs[2]); + output = dynamic_cast*>(outputs[0]); + + if (!input->hasFormat(Format_NHWC)) + { + printNodeValidationError("OpConv2d: unsupported input tensor format"); + return 1; + } + + if (!weight->hasFormat(Format_OHWI)) + { + printNodeValidationError("OpConv2d: unsupported weight tensor format"); + return 1; + } + + if (attribute->padding().size() != 4) + { + printNodeValidationError("OpConv2d: illegal size for attribute padding"); + return 1; + } + + if (attribute->stride().size() != 2) + { + printNodeValidationError("OpConv2d: illegal size for attribute stride"); + return 1; + } + + if (attribute->dilation().size() != 2) + { + printNodeValidationError("OpConv2d: illegal size for attribute dilation"); + return 1; + } + + return 0; +} + +template +int OpConv2d::eval() +{ + int in_batch = this->input->getShape()[0]; + int in_height = this->input->getShape()[1]; + int in_width = this->input->getShape()[2]; + int in_channels = this->input->getShape()[3]; + + int f_out_channels = this->weight->getShape()[0]; + int f_height = this->weight->getShape()[1]; + int f_width = this->weight->getShape()[2]; + int f_in_channels = this->weight->getShape()[3]; + + int b_out_channels = this->bias->getShape()[0]; + + int out_batch = this->output->getShape()[0]; + int out_height = this->output->getShape()[1]; + int out_width = this->output->getShape()[2]; + int out_channels = this->output->getShape()[3]; + + ASSERT_MSG_NODE(in_batch == out_batch, "OpConv2d: tensor batch mismatch %d != %d", in_batch, out_batch); + ASSERT_MSG_NODE(f_in_channels == in_channels, "OpConv2d: tensor input channel mismatch %d != %d", f_in_channels, + in_channels); + ASSERT_MSG_NODE(f_out_channels == out_channels, "OpConv2d: tensor output channel mismatch %d != %d", f_out_channels, + out_channels); + ASSERT_MSG_NODE(b_out_channels == out_channels, "OpConv2d: tensor output channel mismatch %d != %d", b_out_channels, + out_channels); + + int padding_top = this->attribute->padding()[0]; + int padding_bottom = this->attribute->padding()[1]; + int padding_left = this->attribute->padding()[2]; + int padding_right = this->attribute->padding()[3]; + int stride_h = this->attribute->stride()[0]; + int stride_w = this->attribute->stride()[1]; + int dilation_h = this->attribute->dilation()[0]; + int dilation_w = this->attribute->dilation()[1]; + + DEBUG_INFO(OP, + "perform OpConv2d, input.shape=[%d,%d,%d,%d], weight.shape=[%d,%d,%d,%d], output.shape=[%d,%d,%d,%d], " + "stride=[%d,%d], dilation=[%d,%d], padding=[%d,%d,%d,%d]", + in_batch, in_height, in_width, in_channels, f_height, f_width, f_in_channels, f_out_channels, out_batch, + out_height, out_width, out_channels, stride_h, stride_w, dilation_h, dilation_w, padding_top, + padding_bottom, padding_left, padding_right); + + // GEMM-conv2d, left matrix is input, right matrix is weight + Eigen::array im2col_input_dims; + im2col_input_dims[0] = out_batch * out_height * out_width; + im2col_input_dims[1] = f_height * f_width * f_in_channels; + + Eigen::array im2col_weight_dims; + im2col_weight_dims[0] = f_height * f_width * f_in_channels; + im2col_weight_dims[1] = f_out_channels; + + Eigen::array bias_reshaped_dims; + bias_reshaped_dims[0] = 1; + bias_reshaped_dims[1] = b_out_channels; + + Eigen::array weight_zp_bcast_dims; + weight_zp_bcast_dims[0] = f_height; + weight_zp_bcast_dims[1] = f_width; + weight_zp_bcast_dims[2] = f_in_channels; + + Eigen::array bias_bcast_dims; + bias_bcast_dims[0] = out_batch * out_height * out_width; + bias_bcast_dims[1] = 1; + + Eigen::array col2im_output_dims; + col2im_output_dims[0] = out_batch; + col2im_output_dims[1] = out_height; + col2im_output_dims[2] = out_width; + col2im_output_dims[3] = out_channels; + + Eigen::array, 1> contract_dims = { Eigen::IndexPair(1, 0) }; + + Eigen::array, 4> padding; + padding[0] = std::make_pair(0, 0); + padding[1] = std::make_pair(padding_top, padding_bottom); + padding[2] = std::make_pair(padding_left, padding_right); + padding[3] = std::make_pair(0, 0); + + TIn input_val = this->input->getTensor(); + TWeight weight_val = this->weight->getTensor(); + if (this->qinfo) + { + input_val = input_val - (InEigenType)this->qinfo->input_zp(); + weight_val = weight_val - (WeightEigenType)this->qinfo->weight_zp(); + } + + ETensor4 input_padded = input_val.pad(padding); + + // extract_image_patches() output [N, KH, KW, H * W, C] + // need to transpose to [N, H * W, KH, KW, C] + ETensor5 input_extract_patches = + input_padded + .extract_image_patches(f_height, f_width, stride_h, stride_w, dilation_h, dilation_w, Eigen::PADDING_VALID) + .shuffle(Eigen::array{ 0, 3, 1, 2, 4 }); + + // reshape input to [N * H * W, KH * KW * C] + ETensor2 im2col_input = input_extract_patches.reshape(im2col_input_dims); + + // transpose and reshape weight from [OC, H, W, IC] to [H * W * IC, OC] + ETensor2 im2col_weight = + weight_val.shuffle(Eigen::array({ 1, 2, 3, 0 })).reshape(im2col_weight_dims); + + // don't need to apply bias_multiplier ( * bias_scale and >> bias_shift) since tflite already scale it + // and reshaped from [C] to [1, C], and broadcast to [N * H * W, C] + ETensor2 bias_2d = this->bias->getTensor().reshape(bias_reshaped_dims).broadcast(bias_bcast_dims); + + // output matrix is [N * H * W, C] + ETensor2 contracted_result = + im2col_input.template cast().contract(im2col_weight.template cast(), contract_dims); + + // adding bias + ETensor2 biased_output = contracted_result + bias_2d.template cast(); + + // reshape back to [N, H, W, C] + this->output->getTensor() = biased_output.reshape(col2im_output_dims); + + if (AccDtype == DType_INT48) + { + this->output->getTensor() = this->output->getTensor().cwiseMax((AccEigenType)AccQMin); + this->output->getTensor() = this->output->getTensor().cwiseMin((AccEigenType)AccQMax); + } + + return GraphNode::eval(); +} + +template +OpDepthwiseConv2d::OpDepthwiseConv2d(TosaAttributeBase* attribute_, + TosaQuantInfoBase* qinfo_, + uint64_t id_) + : GraphNode(Op_DEPTHWISE_CONV2D, id_) +{ + setRequiredOperands(3, 1); + setRequiredRank(4); + + INIT_ATTRIBUTE(Conv2d); + INIT_QINFO(Conv); +} + +template +OpDepthwiseConv2d::~OpDepthwiseConv2d() +{ + if (attribute) + delete attribute; + if (qinfo) + delete qinfo; +} + +template +int OpDepthwiseConv2d::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(inputs[1]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + // 'bias' checked separatedly since it doens't make sense to make required rank ranging from 1 to 4 + if (inputs[2]->getRank() != 1) + { + printNodeValidationError("OpDepthwiseConv2d: bias tensor must be rank 1"); + } + + if (inputs[1]->getIsConst() == 0) + { + printNodeValidationError("OpDepthwiseConv2d: weight tensor is not const typed"); + } + + input = dynamic_cast*>(inputs[0]); + weight = dynamic_cast*>(inputs[1]); + bias = dynamic_cast*>(inputs[2]); + output = dynamic_cast*>(outputs[0]); + + if (!input->hasFormat(Format_NHWC)) + { + printNodeValidationError("OpDepthwiseConv2d: unsupported input tensor format"); + return 1; + } + + if (!weight->hasFormat(Format_HWIM)) + { + printNodeValidationError("OpDepthwiseConv2d: unsupported weight tensor format"); + return 1; + } + + if (attribute->padding().size() != 4) + { + printNodeValidationError("OpDepthwiseConv2d: illegal size for attribute padding"); + return 1; + } + + if (attribute->stride().size() != 2) + { + printNodeValidationError("OpDepthwiseConv2d: illegal size for attribute stride"); + return 1; + } + + if (attribute->dilation().size() != 2) + { + printNodeValidationError("OpDepthwiseConv2d: illegal size for attribute dilation"); + return 1; + } + + return 0; +} + +template +int OpDepthwiseConv2d::eval() +{ + int in_batch = this->input->getShape()[0]; + int in_height = this->input->getShape()[1]; + int in_width = this->input->getShape()[2]; + int in_channels = this->input->getShape()[3]; + + int f_height = this->weight->getShape()[0]; + int f_width = this->weight->getShape()[1]; + int f_in_channels = this->weight->getShape()[2]; + int f_multiplier = this->weight->getShape()[3]; + + int b_out_channels = this->bias->getShape()[0]; + + int out_batch = this->output->getShape()[0]; + int out_height = this->output->getShape()[1]; + int out_width = this->output->getShape()[2]; + int out_channels = this->output->getShape()[3]; + + ASSERT_MSG_NODE(in_batch == out_batch, "OpDepthwiseConv2d: tensor batch mismatch %d != %d", in_batch, out_batch); + ASSERT_MSG_NODE(f_in_channels == in_channels, "OpDepthwiseConv2d: tensor input channel mismatch %d != %d", + f_in_channels, in_channels); + ASSERT_MSG_NODE(in_channels * f_multiplier == out_channels, + "OpDepthwiseConv2d: tensor output channel mismatch %d != %d", in_channels * f_multiplier, + out_channels); + ASSERT_MSG_NODE(b_out_channels == out_channels, "OpDepthwiseConv2d: tensor b_out_channels mismatch %d != %d", + b_out_channels, out_channels); + + int padding_top = this->attribute->padding()[0]; + int padding_bottom = this->attribute->padding()[1]; + int padding_left = this->attribute->padding()[2]; + int padding_right = this->attribute->padding()[3]; + int stride_h = this->attribute->stride()[0]; + int stride_w = this->attribute->stride()[1]; + int dilation_h = this->attribute->dilation()[0]; + int dilation_w = this->attribute->dilation()[1]; + + DEBUG_INFO(OP, + "perform OpDepthwiseConv2d, input.shape=[%d,%d,%d,%d], weight.shape=[%d,%d,%d,%d], " + "output.shape=[%d,%d,%d,%d], stride=[%d,%d], dilation=[%d,%d], padding=[%d,%d,%d,%d]", + in_batch, in_height, in_width, in_channels, f_height, f_width, f_in_channels, f_multiplier, out_batch, + out_height, out_width, out_channels, stride_h, stride_w, dilation_h, dilation_w, padding_top, + padding_bottom, padding_left, padding_right); + + Eigen::array, 4> padding; + padding[0] = std::make_pair(0, 0); + padding[1] = std::make_pair(padding_top, padding_bottom); + padding[2] = std::make_pair(padding_left, padding_right); + padding[3] = std::make_pair(0, 0); + + TIn input_val = this->input->getTensor(); + TWeight weight_val = this->weight->getTensor(); + if (this->qinfo) + { + input_val = input_val - (InEigenType)this->qinfo->input_zp(); + weight_val = weight_val - (WeightEigenType)this->qinfo->weight_zp(); + } + + ETensor4 input_padded = input_val.pad(padding); + + // GEMM doesn't fit well with DepthwiseConv2d + // 1. use extract_image_patches() to handle stride/dilation/padding + // 2. perform direct convolution + + // 1. extract_image_patches() output [N, KH, KW, OH * OW, IC] + ETensor5 input_extract_patches = input_padded.extract_image_patches( + f_height, f_width, stride_h, stride_w, dilation_h, dilation_w, Eigen::PADDING_VALID); + + Eigen::array reshape_dim; + reshape_dim.fill(1); + reshape_dim[3] = b_out_channels; + + Eigen::array bcast; + bcast[0] = out_batch; + bcast[1] = out_height; + bcast[2] = out_width; + bcast[3] = 1; + + // initialize with bias + this->output->getTensor() = this->bias->getTensor().reshape(reshape_dim).broadcast(bcast); + + // 2. direct depthwise convolution + for (int ob = 0; ob < out_batch; ob++) + { + for (int oh = 0; oh < out_height; oh++) + { + for (int ow = 0; ow < out_width; ow++) + { + for (int ic = 0; ic < in_channels; ic++) + { + for (int cm = 0; cm < f_multiplier; cm++) + { + for (int fh = 0; fh < f_height; fh++) + { + for (int fw = 0; fw < f_width; fw++) + { + this->output->getTensor()(ob, oh, ow, ic * f_multiplier + cm) += + ((AccEigenType)input_extract_patches(ob, fh, fw, ow * out_height + oh, ic) * + (AccEigenType)weight_val(fh, fw, ic, cm)); + } + } + } + } + } + } + } + + if (AccDtype == DType_INT48) + { + this->output->getTensor() = this->output->getTensor().cwiseMax((AccEigenType)AccQMin); + this->output->getTensor() = this->output->getTensor().cwiseMin((AccEigenType)AccQMax); + } + + return GraphNode::eval(); +} + +template +OpFullyConnected::OpFullyConnected(TosaAttributeBase* attribute_, + TosaQuantInfoBase* qinfo_, + uint64_t id_) + : GraphNode(Op_FULLY_CONNECTED, id_) +{ + setRequiredOperands(3, 1); + setRequiredRank(2); + + INIT_QINFO(Conv); +} + +template +OpFullyConnected::~OpFullyConnected() +{ + if (qinfo) + delete qinfo; +} + +template +int OpFullyConnected::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(inputs[1]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + input = dynamic_cast*>(inputs[0]); + weight = dynamic_cast*>(inputs[1]); + bias = dynamic_cast*>(inputs[2]); + + if (input->getShape()[1] != weight->getShape()[1]) + { + printNodeValidationError("OpFullyConnected operator input.shape[1] should match weight.shape[1]"); + return 1; + } + + if (weight->getShape()[0] != bias->getShape()[0]) + { + printNodeValidationError("OpFullyConnected operator bias.shape[0] should match weight.shape[0]"); + return 1; + } + + output = dynamic_cast*>(outputs[0]); + + return 0; +} + +template +int OpFullyConnected::eval() +{ + typedef Eigen::Tensor::DimensionPair DimPair; + Eigen::array dims{ { DimPair(1, 0) } }; + + Eigen::array weight_shuffle{ 1, 0 }; + + Eigen::array bias_reshape; + bias_reshape[0] = 1; + bias_reshape[1] = this->bias->getShape()[0]; + + Eigen::array bias_bcast; + bias_bcast[0] = this->input->getShape()[0]; + bias_bcast[1] = 1; + + TIn input_val = this->input->getTensor(); + TWeight weight_val = this->weight->getTensor().shuffle(weight_shuffle); + if (this->qinfo) + { + input_val = input_val - (InEigenType)this->qinfo->input_zp(); + weight_val = weight_val - (WeightEigenType)this->qinfo->weight_zp(); + } + + this->output->getTensor() = + input_val.template cast().contract(weight_val.template cast(), dims) + + this->bias->getTensor().reshape(bias_reshape).broadcast(bias_bcast); + + if (AccDtype == DType_INT48) + { + this->output->getTensor() = this->output->getTensor().cwiseMax((AccEigenType)AccQMin); + this->output->getTensor() = this->output->getTensor().cwiseMin((AccEigenType)AccQMax); + } + return GraphNode::eval(); +} + +template +OpMatMul::OpMatMul(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) + : GraphNode(Op_MATMUL, id_) +{ + setRequiredOperands(2, 1); + setRequiredRank(2); + + INIT_QINFO(MatMul); +} + +template +OpMatMul::~OpMatMul() +{ + if (qinfo) + delete qinfo; +} + +template +int OpMatMul::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(inputs[1]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + a = dynamic_cast*>(inputs[0]); + b = dynamic_cast*>(inputs[1]); + + if (a->getShape()[1] != b->getShape()[0]) + { + printNodeValidationError("OpMatMul operator a.shape[1] should match b.shape[0]"); + return 1; + } + + c = dynamic_cast*>(outputs[0]); + + return 0; +} + +template +int OpMatMul::eval() +{ + typedef Eigen::Tensor::DimensionPair DimPair; + Eigen::array dims{ { DimPair(1, 0) } }; + + TIn a_val = this->a->getTensor(); + TIn b_val = this->b->getTensor(); + if (this->qinfo) + { + a_val = a_val - (InEigenType)this->qinfo->a_zp(); + b_val = b_val - (InEigenType)this->qinfo->b_zp(); + } + + this->c->getTensor() = a_val.template cast().contract(b_val.template cast(), dims); + + if (AccDtype == DType_INT48) + { + this->c->getTensor() = this->c->getTensor().cwiseMax((AccEigenType)AccQMin); + this->c->getTensor() = this->c->getTensor().cwiseMin((AccEigenType)AccQMax); + } + + return GraphNode::eval(); +} + +template +OpMaxPool2d::OpMaxPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) + : GraphNode(Op_MAX_POOL2D, id_) +{ + setRequiredOperands(1, 1); + setRequiredRank(4); + + INIT_ATTRIBUTE(Pool2d); +} + +template +OpMaxPool2d::~OpMaxPool2d() +{ + if (attribute) + delete attribute; +} + +template +int OpMaxPool2d::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + if (inputs[0]->matchType(*outputs[0])) + { + printNodeValidationError("OpMaxPool2d: input and output tensor type mismatch"); + return 1; + } + + in = dynamic_cast*>(inputs[0]); + out = dynamic_cast*>(outputs[0]); + + if (!in->hasFormat(Format_NHWC)) + { + printNodeValidationError("OpMaxPool2d: unsupported tensor format"); + return 1; + } + + if (attribute->padding().size() != 4) + { + printNodeValidationError("OpMaxPool2d: illegal size for attribute padding"); + return 1; + } + + if (attribute->kernel().size() != 2) + { + printNodeValidationError("OpMaxPool2d: illegal size for attribute kernel"); + return 1; + } + + if (attribute->stride().size() != 2) + { + printNodeValidationError("OpMaxPool2d: illegal size for attribute stride"); + return 1; + } + + return 0; +} + +template +int OpMaxPool2d::eval() +{ + int in_batch = this->in->getShape()[0]; + int in_height = this->in->getShape()[1]; + int in_width = this->in->getShape()[2]; + int in_channels = this->in->getShape()[3]; + + int out_batch = this->out->getShape()[0]; + int out_height = this->out->getShape()[1]; + int out_width = this->out->getShape()[2]; + int out_channels = this->out->getShape()[3]; + + ASSERT_MSG_NODE(in_batch == out_batch, "OpMaxPool2d: tensor batch mismatch %d != %d", in_batch, out_batch); + + int padding_top = this->attribute->padding()[0]; + int padding_bottom = this->attribute->padding()[1]; + int padding_left = this->attribute->padding()[2]; + int padding_right = this->attribute->padding()[3]; + int kernel_h = this->attribute->kernel()[0]; + int kernel_w = this->attribute->kernel()[1]; + int stride_h = this->attribute->stride()[0]; + int stride_w = this->attribute->stride()[1]; + + DEBUG_INFO(OP, + "perform MaxPool2d, input.shape=[%d,%d,%d,%d], output.shape=[%d,%d,%d,%d], kernel=[%d,%d], " + "stride=[%d,%d], padding=[%d,%d,%d,%d]", + in_batch, in_height, in_width, in_channels, out_batch, out_height, out_width, out_channels, kernel_h, + kernel_w, stride_h, stride_w, padding_top, padding_bottom, padding_left, padding_right); + + Eigen::array im2col_input_dims; + im2col_input_dims[0] = kernel_h * kernel_w; + im2col_input_dims[1] = out_batch * out_height * out_width * out_channels; + + Eigen::array col2im_output_dims; + col2im_output_dims[0] = out_batch; + col2im_output_dims[1] = out_height; + col2im_output_dims[2] = out_width; + col2im_output_dims[3] = out_channels; + + Eigen::array, 4> padding; + padding[0] = std::make_pair(0, 0); + padding[1] = std::make_pair(padding_top, padding_bottom); + padding[2] = std::make_pair(padding_left, padding_right); + padding[3] = std::make_pair(0, 0); + + ETensor4 input_padded = this->in->getTensor().pad(padding, std::numeric_limits::lowest()); + + // extract_image_patches() output [N, KH, KW, H * W, C] + // transpose to [KH, KW, N, H * W, C] + // reshape to [KH * KW, N * H * W * C] + // + // Set the padding value to be the most negative value that can be + // represented by the datatype to ensure that any padding values will be equal + // to or smaller than the actual maximum in the KH x KW patch. + ETensor2 input_extract_patches = + input_padded + .extract_image_patches(kernel_h, kernel_w, stride_h, stride_w, 1, 1, Eigen::PADDING_VALID, + std::numeric_limits::lowest()) + .shuffle(Eigen::array{ 1, 2, 0, 3, 4 }) + .reshape(im2col_input_dims); + + // Get the maximum of the KHxHW patches along axis 0 + Eigen::Tensor tensor_argmax = input_extract_patches.argmax(0); + + // 1D result with [N * H * W * C] + ETensor1 out_1d(this->out->getElementCount()); + + // index input_patches with argmax array should give the result + for (size_t i = 0; i < this->out->getElementCount(); i++) + { + out_1d(i) = (OutEigenType)input_extract_patches(tensor_argmax(i), i); + } + + // reshape result to [N, H, W, C] + this->out->getTensor() = out_1d.reshape(col2im_output_dims); + + return GraphNode::eval(); +} + +template +OpTransposeConv2d::OpTransposeConv2d(TosaAttributeBase* attribute_, + TosaQuantInfoBase* qinfo_, + uint64_t id_) + : GraphNode(Op_TRANSPOSE_CONV2D, id_) +{ + setRequiredOperands(3, 1); + setRequiredRank(4); + + INIT_ATTRIBUTE(TransposeConv2d); + INIT_QINFO(Conv); +} + +template +OpTransposeConv2d::~OpTransposeConv2d() +{ + if (attribute) + delete attribute; + if (qinfo) + delete qinfo; +} + +template +int OpTransposeConv2d::checkTensorAttributes() +{ + if (validateRequiredOperands()) + return 1; + + if (validateRequiredRank(inputs[0]) || validateRequiredRank(inputs[1]) || validateRequiredRank(outputs[0])) + { + return 1; + } + + if (inputs[1]->getIsConst() == 0) + { + printNodeValidationError("OpTransposeConv2d: weight tensor is not const typed"); + } + + input = dynamic_cast*>(inputs[0]); + weight = dynamic_cast*>(inputs[1]); + bias = dynamic_cast*>(inputs[2]); + output = dynamic_cast*>(outputs[0]); + + if (!input->hasFormat(Format_NHWC)) + { + printNodeValidationError("OpTransposeConv2d: unsupported input tensor format"); + return 1; + } + + if (!weight->hasFormat(Format_OHWI)) + { + printNodeValidationError("OpTransposeConv2d: unsupported weight tensor format"); + return 1; + } + + if (attribute->outpad().size() != 2) + { + printNodeValidationError("OpTransposeConv2d: illegal size for attribute outpad"); + return 1; + } + + if (attribute->stride().size() != 2) + { + printNodeValidationError("OpTransposeConv2d: illegal size for attribute stride"); + return 1; + } + + if (attribute->dilation().size() != 2) + { + printNodeValidationError("OpTransposeConv2d: illegal size for attribute dilation"); + return 1; + } + + if (attribute->output_shape().size() != 4) + { + printNodeValidationError("OpTransposeConv2d: illegal size for attribute output_shape"); + return 1; + } + + for (int d = 0; d < 4; d++) + { + if (attribute->output_shape()[d] != this->output->getShape()[d]) + { + printNodeValidationError("OpTransposeConv2d: illegal size for attribute output_shape"); + return 1; + } + } + + return 0; +} + +template +int OpTransposeConv2d::eval() +{ + int in_batch = this->input->getShape()[0]; + int in_height = this->input->getShape()[1]; + int in_width = this->input->getShape()[2]; + int in_channels = this->input->getShape()[3]; + + int f_out_channels = this->weight->getShape()[0]; + int f_height = this->weight->getShape()[1]; + int f_width = this->weight->getShape()[2]; + int f_in_channels = this->weight->getShape()[3]; + + int b_out_channels = this->bias->getShape()[0]; + + int out_batch = this->output->getShape()[0]; + int out_height = this->output->getShape()[1]; + int out_width = this->output->getShape()[2]; + int out_channels = this->output->getShape()[3]; + + int padding_top = this->attribute->outpad()[0]; + int padding_left = this->attribute->outpad()[1]; + int stride_h = this->attribute->stride()[0]; + int stride_w = this->attribute->stride()[1]; + int dilation_h = this->attribute->dilation()[0]; + int dilation_w = this->attribute->dilation()[1]; + + ASSERT_MSG_NODE(in_batch == out_batch, "OpTransposeConv2d: tensor batch mismatch %d != %d", in_batch, out_batch); + ASSERT_MSG_NODE(f_in_channels == in_channels, "OpTransposeConv2d: tensor input channel mismatch %d != %d", + f_in_channels, in_channels); + ASSERT_MSG_NODE(f_out_channels == out_channels, "OpTransposeConv2d: tensor output channel mismatch %d != %d", + f_out_channels, out_channels); + ASSERT_MSG_NODE(b_out_channels == out_channels, "OpDepthwiseConv2d: tensor b_out_channels mismatch %d != %d", + b_out_channels, out_channels); + + DEBUG_INFO(OP, + "perform OpTransposeConv2d, input.shape=[%d,%d,%d,%d], weight.shape=[%d,%d,%d,%d], " + "output.shape=[%d,%d,%d,%d], stride=[%d,%d], dilation=[%d,%d], padding=[%d,%d]", + in_batch, in_height, in_width, in_channels, f_height, f_width, f_out_channels, f_in_channels, out_batch, + out_height, out_width, out_channels, stride_h, stride_w, dilation_h, dilation_w, padding_top, + padding_left); + + TIn input_val = this->input->getTensor(); + TWeight weight_val = this->weight->getTensor(); + if (this->qinfo) + { + input_val = input_val - (InEigenType)this->qinfo->input_zp(); + weight_val = weight_val - (WeightEigenType)this->qinfo->weight_zp(); + } + + Eigen::array reshape_dim; + reshape_dim.fill(1); + reshape_dim[3] = b_out_channels; + + Eigen::array bcast; + bcast[0] = out_batch; + bcast[1] = out_height; + bcast[2] = out_width; + bcast[3] = 1; + + // initialize with bias + this->output->getTensor() = this->bias->getTensor().reshape(reshape_dim).broadcast(bcast); + + int out_x_origin, out_y_origin; + int out_x, out_y; + + // reference implementation from: tensorflow/tensorflow/lite/kernels/internal/reference/reference_ops.h + for (int ob = 0; ob < out_batch; ob++) + { + for (int ih = 0; ih < in_height; ih++) + { + for (int iw = 0; iw < in_width; iw++) + { + out_x_origin = iw * stride_w - padding_left; + out_y_origin = ih * stride_h - padding_top; + for (int ic = 0; ic < in_channels; ic++) + { + for (int fh = 0; fh < f_height; fh++) + { + for (int fw = 0; fw < f_width; fw++) + { + out_x = out_x_origin + fw * dilation_w; + out_y = out_y_origin + fh * dilation_h; + for (int oc = 0; oc < out_channels; oc++) + { + if ((out_x >= 0 && out_x < out_width) && (out_y >= 0 && out_y < out_height)) + { + this->output->getTensor()(ob, out_y, out_x, oc) += + ((AccEigenType)input_val(ob, ih, iw, ic) * + (AccEigenType)weight_val(oc, fh, fw, ic)); + } + } + } + } + } + } + } + } + + if (AccDtype == DType_INT48) + { + this->output->getTensor() = this->output->getTensor().cwiseMax((AccEigenType)AccQMin); + this->output->getTensor() = this->output->getTensor().cwiseMin((AccEigenType)AccQMax); + } + + return GraphNode::eval(); +} + +// template explicit instantiation +DEF_INSTANTIATE_RANK1_6_ONE_RANK_ONE_TYPE(OpArgMax, FLOAT); +DEF_INSTANTIATE_RANK1_6_ONE_RANK_ONE_TYPE(OpArgMax, AINT8); +DEF_INSTANTIATE_RANK1_6_ONE_RANK_ONE_TYPE(OpArgMax, INT16); + +DEF_INSTANTIATE_ONE_TYPE(OpAvgPool2d, FLOAT) +DEF_INSTANTIATE_ONE_TYPE(OpAvgPool2d, AINT8) +DEF_INSTANTIATE_ONE_TYPE(OpAvgPool2d, INT16) + +DEF_INSTANTIATE_TWO_TYPE(OpConv2d, FLOAT, FLOAT); +DEF_INSTANTIATE_TWO_TYPE(OpConv2d, AINT8, INT4); +DEF_INSTANTIATE_TWO_TYPE(OpConv2d, AINT8, INT8); +DEF_INSTANTIATE_TWO_TYPE(OpConv2d, AINT8, AINT8); +DEF_INSTANTIATE_TWO_TYPE(OpConv2d, INT16, INT8); + +DEF_INSTANTIATE_TWO_TYPE(OpDepthwiseConv2d, FLOAT, FLOAT); +DEF_INSTANTIATE_TWO_TYPE(OpDepthwiseConv2d, AINT8, INT4); +DEF_INSTANTIATE_TWO_TYPE(OpDepthwiseConv2d, AINT8, INT8); +DEF_INSTANTIATE_TWO_TYPE(OpDepthwiseConv2d, AINT8, AINT8); +DEF_INSTANTIATE_TWO_TYPE(OpDepthwiseConv2d, INT16, INT8); + +DEF_INSTANTIATE_TWO_TYPE(OpFullyConnected, FLOAT, FLOAT); +DEF_INSTANTIATE_TWO_TYPE(OpFullyConnected, AINT8, INT4); +DEF_INSTANTIATE_TWO_TYPE(OpFullyConnected, AINT8, INT8); +DEF_INSTANTIATE_TWO_TYPE(OpFullyConnected, AINT8, AINT8); +DEF_INSTANTIATE_TWO_TYPE(OpFullyConnected, INT16, INT8); + +DEF_INSTANTIATE_ONE_TYPE(OpMatMul, AINT8); +DEF_INSTANTIATE_ONE_TYPE(OpMatMul, INT16); +DEF_INSTANTIATE_ONE_TYPE(OpMatMul, FLOAT); + +DEF_INSTANTIATE_ONE_TYPE(OpMaxPool2d, FLOAT); +DEF_INSTANTIATE_ONE_TYPE(OpMaxPool2d, AINT8); +DEF_INSTANTIATE_ONE_TYPE(OpMaxPool2d, INT16); + +DEF_INSTANTIATE_TWO_TYPE(OpTransposeConv2d, FLOAT, FLOAT); +DEF_INSTANTIATE_TWO_TYPE(OpTransposeConv2d, AINT8, INT4); +DEF_INSTANTIATE_TWO_TYPE(OpTransposeConv2d, AINT8, INT8); +DEF_INSTANTIATE_TWO_TYPE(OpTransposeConv2d, AINT8, AINT8); +DEF_INSTANTIATE_TWO_TYPE(OpTransposeConv2d, INT16, INT8); -- cgit v1.2.1