From fe392ce8e714e616b5ab5b8a519d3eb84623273d Mon Sep 17 00:00:00 2001 From: Kevin Cheng Date: Mon, 18 Oct 2021 21:51:55 +0000 Subject: Changes for 0.23.0 release - update serialization_lib hash - PAD: 1. make padding as an attribute instead of tensor. 2. add pad_const_int (for non-float type) / pad_const_fp (for float type) - TRANSPOSE: make perm as an attribute instead of tensor - TABLE: make table as attribute instead of tensor - update examples/ tests Signed-off-by: Kevin Cheng Change-Id: Iddc446db4b356ba2f36ea4a79b7220b9cfc2aa4e --- reference_model/src/ops/data_layout.cc | 57 ++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 17 deletions(-) (limited to 'reference_model/src/ops/data_layout.cc') diff --git a/reference_model/src/ops/data_layout.cc b/reference_model/src/ops/data_layout.cc index 674227b..05a11e0 100644 --- a/reference_model/src/ops/data_layout.cc +++ b/reference_model/src/ops/data_layout.cc @@ -128,10 +128,11 @@ OpPad::OpPad(SubgraphTraverser* sgt_, uint64_t id_) : GraphNode(sgt_, Op_PAD, id_) { - setRequiredOperands(2, 1); + setRequiredOperands(1, 1); setRequiredRank(0, 6); INIT_QINFO(Pad); + INIT_ATTRIBUTE(Pad); } template @@ -159,9 +160,22 @@ int OpPad::checkTensorAttributes() return 1; } - in = dynamic_cast*>(inputs[0]); - out = dynamic_cast*>(outputs[0]); - paddings = dynamic_cast>*>(inputs[1]); + in = dynamic_cast*>(inputs[0]); + out = dynamic_cast*>(outputs[0]); + ASSERT_MEM(in && out); + + // padding in spec is 2D array in shape of [Rank, 2] + // Reference model implement this as 1D array of [Rank * 2], with ordering: + // [Rank0_front, Rank0_back, Rank1_front, Rank1_back, ..., Rank(N-1)_front, Rank(N-1)_back] + ERROR_IF(attribute->padding().size() != (Rank * 2), "OpPad: padding length needs to be (rank(input1) * 2)"); + + for (int i = 0; i < Rank; i++) + { + int32_t pad_front = attribute->padding()[2 * i]; + int32_t pad_back = attribute->padding()[2 * i + 1]; + ERROR_IF((pad_front < 0) || (pad_back < 0), "OpPad: padding can't be smaller than 0"); + paddings_array[i] = std::make_pair(pad_front, pad_back); + } if (this->qinfo && Dtype != DType_INT8) { @@ -174,18 +188,24 @@ int OpPad::checkTensorAttributes() template int OpPad::eval() { - // Move this to - for (int i = 0; i < Rank; i++) + InEigenType pad_value = 0; + + switch (Dtype) { - ERROR_IF((paddings->getTensor()(i, 0) < 0) || (paddings->getTensor()(i, 1) < 0), - "OpPad: padding can't be smaller than 0"); - paddings_array[i] = std::make_pair(paddings->getTensor()(i, 0), paddings->getTensor()(i, 1)); + case DType_BOOL: + case DType_INT8: + case DType_INT16: + case DType_INT32: + pad_value = (InEigenType)attribute->pad_const_int(); + break; + case DType_FLOAT: + pad_value = (InEigenType)attribute->pad_const_fp(); + break; } - InEigenType pad_value = 0; - if (this->qinfo) + if (this->qinfo && Dtype == DType_INT8) { - pad_value = (InEigenType)this->qinfo->input_zp(); + pad_value += (InEigenType)this->qinfo->input_zp(); } this->out->getTensor() = this->in->getTensor().pad(this->paddings_array, pad_value); @@ -602,8 +622,10 @@ OpTranspose::OpTranspose(SubgraphTraverser* sgt_, uint64_t id_) : GraphNode(sgt_, Op_TRANSPOSE, id_) { - setRequiredOperands(2, 1); + setRequiredOperands(1, 1); setRequiredRank(0, 6); + + INIT_ATTRIBUTE(Transpose); } template @@ -634,9 +656,10 @@ int OpTranspose::checkTensorAttributes() return 1; } - in = dynamic_cast*>(inputs[0]); - out = dynamic_cast*>(outputs[0]); - perm_tensor = dynamic_cast>*>(inputs[1]); + in = dynamic_cast*>(inputs[0]); + out = dynamic_cast*>(outputs[0]); + + ASSERT_MEM(in && out); return 0; } @@ -646,7 +669,7 @@ int OpTranspose::eval() { for (int32_t d = 0; d < Rank; d++) { - perm_array[d] = this->perm_tensor->getTensor().data()[d]; + perm_array[d] = attribute->perm()[d]; ERROR_IF(perm_array[d] < 0 or perm_array[d] >= Rank, "OpTranspose: index out of boundary"); } -- cgit v1.2.1