From e9059775c0486de4a96d42b41104496f4aefe8e8 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 6 Dec 2023 18:52:30 +0000 Subject: Add support for int16_t inputs in eager interface Implement support of int16_t inputs through int32_t underlying storage buffers. Values are upcasted/downcasted as needed through explicit copy. Signed-off-by: Georgios Pinitas Change-Id: I3752d98531c859002539bf1cb65413ceeff05e95 --- reference_model/src/model_runner_impl.cc | 12 + reference_model/src/tensor.cc | 392 ++++++++++++++++++++++++++++ reference_model/src/tensor.h | 36 +++ reference_model/test/model_runner_tests.cpp | 30 +++ 4 files changed, 470 insertions(+) diff --git a/reference_model/src/model_runner_impl.cc b/reference_model/src/model_runner_impl.cc index 447ee26..311db7c 100644 --- a/reference_model/src/model_runner_impl.cc +++ b/reference_model/src/model_runner_impl.cc @@ -197,6 +197,12 @@ int ModelRunnerImpl::setInput(std::string input_name, uint8_t* raw_ptr, size_t s status = setInput(input_name, ArrayProxy(elements, typed_ptr)); break; } + case TOSA_REF_TYPE_INT16: { + auto typed_ptr = reinterpret_cast(raw_ptr); + const int elements = size / sizeof(int16_t); + status = setInput(input_name, ArrayProxy(elements, typed_ptr)); + break; + } case TOSA_REF_TYPE_INT32: { auto typed_ptr = reinterpret_cast(raw_ptr); const int elements = size / sizeof(int); @@ -281,6 +287,12 @@ int ModelRunnerImpl::getOutput(std::string output_name, uint8_t* raw_ptr, size_t status = tensor->writeToVector(ArrayProxy(elements, typed_ptr)); break; } + case TOSA_REF_TYPE_INT16: { + auto typed_ptr = reinterpret_cast(raw_ptr); + const int elements = size / sizeof(int16_t); + status = tensor->writeToVector(ArrayProxy(elements, typed_ptr)); + break; + } case TOSA_REF_TYPE_INT32: { auto typed_ptr = reinterpret_cast(raw_ptr); const int elements = size / sizeof(int); diff --git a/reference_model/src/tensor.cc b/reference_model/src/tensor.cc index 5fffa8a..645b55f 100644 --- a/reference_model/src/tensor.cc +++ b/reference_model/src/tensor.cc @@ -647,6 +647,31 @@ int TosaReference::Tensor::readfromVector(const ArrayProxy val return 0; } +int TosaReference::Tensor::readfromVector(const ArrayProxy vals) +{ + uint32_t elements = getElementCount(); + switch (getDtype()) + { + case TOSA_REF_TYPE_INT16: + case TOSA_REF_TYPE_UINT16: + if (vals.size() != elements) + { + WARNING("The input size (%ld) doesn't match the number of elements (%d) assigned to the tensor.", + vals.size(), elements); + return -1; + } + + setTensorValueInt16(elements, vals.data()); + break; + default: + WARNING("The input type doesn't match the data type assigned to the tensor (%s).", + EnumNameTOSAREFTYPE(getDtype())); + return -2; + } + setIsValid(); + return 0; +} + int TosaReference::Tensor::readfromVector(const ArrayProxy vals) { uint32_t elements = getElementCount(); @@ -822,6 +847,31 @@ int TosaReference::Tensor::writeToVector(ArrayProxy vals) return 0; } +int TosaReference::Tensor::writeToVector(ArrayProxy vals) +{ + uint32_t elements = getElementCount(); + + switch (getDtype()) + { + case TOSA_REF_TYPE_INT16: + case TOSA_REF_TYPE_UINT16: + if (vals.size() != elements) + { + WARNING("The output size (%ld) doesn't match the number of elements (%d) assigned to the tensor.", + vals.size(), elements); + return -1; + } + + getTensorValueInt16(elements, vals.data()); + break; + default: + WARNING("The output type doesn't match the data type assigned to the tensor (%s).", + EnumNameTOSAREFTYPE(getDtype())); + return -2; + } + return 0; +} + int TosaReference::Tensor::writeToVector(ArrayProxy vals) { uint32_t elements = getElementCount(); @@ -1204,6 +1254,158 @@ int TosaReference::Tensor6::setTensorValueFloat(const size_t bufLen, cons return 0; } +template +int TosaReference::TensorTemplate::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + FATAL_ERROR("TensorTemplate::setTensorValueInt32 should not be called. " + "Implement template specialization version."); + return 0; +} + +template <> +int TosaReference::Tensor0::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + (*tensor)(0) = static_cast(vals[0]); + + return 0; +} + +template <> +int TosaReference::Tensor1::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + (*tensor)(i0) = static_cast(vals[idx++]); + } + + return 0; +} + +template <> +int TosaReference::Tensor2::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + (*tensor)(i0, i1) = static_cast(vals[idx++]); + } + } + + return 0; +} + +template <> +int TosaReference::Tensor3::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + (*tensor)(i0, i1, i2) = static_cast(vals[idx++]); + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor4::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + (*tensor)(i0, i1, i2, i3) = static_cast(vals[idx++]); + } + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor5::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + for (int i4 = 0; i4 < shape[4]; i4++) + { + (*tensor)(i0, i1, i2, i3, i4) = static_cast(vals[idx++]); + } + } + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor6::setTensorValueInt16(const size_t bufLen, const int16_t* vals) +{ + uint32_t idx = 0; + + ASSERT_MSG(bufLen == getElementCount(), "Total elements must match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + for (int i4 = 0; i4 < shape[4]; i4++) + { + for (int i5 = 0; i5 < shape[5]; i5++) + { + (*tensor)(i0, i1, i2, i3, i4, i5) = static_cast(vals[idx++]); + } + } + } + } + } + } + return 0; +} + template int TosaReference::TensorTemplate::setTensorValueInt32(const size_t bufLen, const int32_t* vals) { @@ -2040,6 +2242,196 @@ int TosaReference::Tensor6::getTensorValueFloat(const size_t bufLen, floa return 0; } +template +int TosaReference::TensorTemplate::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + FATAL_ERROR("TensorTemplate::getTensorValueInt32 should not be called. " + "Implement template specialization version."); + return 0; +} + +template <> +int TosaReference::Tensor0::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + int totalVals = 1; + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + vals[0] = (*tensor)(0); + + return 0; +} + +template <> +int TosaReference::Tensor1::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + vals[idx++] = (*tensor)(i0); + } + + return 0; +} + +template <> +int TosaReference::Tensor2::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + vals[idx++] = (*tensor)(i0, i1); + } + } + + return 0; +} + +template <> +int TosaReference::Tensor3::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + vals[idx++] = (*tensor)(i0, i1, i2); + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor4::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + vals[idx++] = (*tensor)(i0, i1, i2, i3); + } + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor5::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + for (int i4 = 0; i4 < shape[4]; i4++) + { + vals[idx++] = (*tensor)(i0, i1, i2, i3, i4); + } + } + } + } + } + + return 0; +} + +template <> +int TosaReference::Tensor6::getTensorValueInt16(const size_t bufLen, int16_t* vals) const +{ + uint32_t idx = 0; + int totalVals = 1; + + for (size_t i = 0; i < shape.size(); i++) + { + totalVals *= shape[i]; + } + + ASSERT_MSG((size_t)totalVals == bufLen, "Output buffer and tensor size do not match"); + + for (int i0 = 0; i0 < shape[0]; i0++) + { + for (int i1 = 0; i1 < shape[1]; i1++) + { + for (int i2 = 0; i2 < shape[2]; i2++) + { + for (int i3 = 0; i3 < shape[3]; i3++) + { + for (int i4 = 0; i4 < shape[4]; i4++) + { + for (int i5 = 0; i5 < shape[5]; i5++) + { + vals[idx++] = (*tensor)(i0, i1, i2, i3, i4, i5); + } + } + } + } + } + } + return 0; +} + template int TosaReference::TensorTemplate::getTensorValueInt32(const size_t bufLen, int32_t* vals) const { diff --git a/reference_model/src/tensor.h b/reference_model/src/tensor.h index 203cfec..5bcd1b2 100644 --- a/reference_model/src/tensor.h +++ b/reference_model/src/tensor.h @@ -241,11 +241,13 @@ public: virtual int setTensorValueDouble(const size_t bufLen, const double* vals) = 0; virtual int setTensorValueFloat(const size_t bufLen, const float* vals) = 0; + virtual int setTensorValueInt16(const size_t bufLen, const int16_t* vals) = 0; virtual int setTensorValueInt32(const size_t bufLen, const int32_t* vals) = 0; virtual int setTensorValueInt64(const size_t bufLen, const int64_t* vals) = 0; virtual int setTensorValueBool(const size_t bufLen, const bool* vals) = 0; virtual int getTensorValueDouble(const size_t bufLen, double* fbuf) const = 0; virtual int getTensorValueFloat(const size_t bufLen, float* fbuf) const = 0; + virtual int getTensorValueInt16(const size_t bufLen, int16_t* ibuf) const = 0; virtual int getTensorValueInt32(const size_t bufLen, int32_t* ibuf) const = 0; virtual int getTensorValueInt64(const size_t bufLen, int64_t* ibuf) const = 0; virtual int getTensorValueBool(const size_t bufLen, bool* ibuf) const = 0; @@ -257,6 +259,7 @@ public: virtual int readfromVector(const ArrayProxy vals); virtual int readfromVector(const ArrayProxy vals); virtual int readfromVector(const ArrayProxy vals); + virtual int readfromVector(const ArrayProxy vals); virtual int readfromVector(const ArrayProxy vals); virtual int readfromVector(const ArrayProxy vals); virtual int readfromVector(const ArrayProxy vals); @@ -264,6 +267,7 @@ public: virtual int writeToVector(ArrayProxy vals); virtual int writeToVector(ArrayProxy vals); virtual int writeToVector(ArrayProxy vals); + virtual int writeToVector(ArrayProxy vals); virtual int writeToVector(ArrayProxy vals); virtual int writeToVector(ArrayProxy vals); virtual int writeToVector(ArrayProxy vals); @@ -357,12 +361,14 @@ public: virtual int setTensorValueDouble(const size_t bufLen, const double* vals); virtual int setTensorValueFloat(const size_t bufLen, const float* vals); + virtual int setTensorValueInt16(const size_t bufLen, const int16_t* vals); virtual int setTensorValueInt32(const size_t bufLen, const int32_t* vals); virtual int setTensorValueInt64(const size_t bufLen, const int64_t* vals); virtual int setTensorValueBool(const size_t bufLen, const bool* vals); virtual int getTensorValueDouble(const size_t bufLen, double* fbuf) const; virtual int getTensorValueFloat(const size_t bufLen, float* fbuf) const; + virtual int getTensorValueInt16(const size_t bufLen, int16_t* ibuf) const; virtual int getTensorValueInt32(const size_t bufLen, int32_t* ibuf) const; virtual int getTensorValueInt64(const size_t bufLen, int64_t* ibuf) const; virtual int getTensorValueBool(const size_t bufLen, bool* bbuf) const; @@ -525,6 +531,21 @@ int Tensor5::copyValueFrom(Tensor* src); template <> int Tensor6::copyValueFrom(Tensor* src); +template <> +int Tensor0::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor1::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor2::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor3::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor4::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor5::setTensorValueInt16(const size_t bufLen, const int16_t* vals); +template <> +int Tensor6::setTensorValueInt16(const size_t bufLen, const int16_t* vals); + template <> int Tensor0::setTensorValueInt32(const size_t bufLen, const int32_t* vals); template <> @@ -540,6 +561,21 @@ int Tensor5::setTensorValueInt32(const size_t bufLen, const int32_t* va template <> int Tensor6::setTensorValueInt32(const size_t bufLen, const int32_t* vals); +template <> +int Tensor0::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor1::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor2::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor3::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor4::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor5::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; +template <> +int Tensor6::getTensorValueInt16(const size_t bufLen, int16_t* vals) const; + template <> int Tensor0::getTensorValueInt32(const size_t bufLen, int32_t* vals) const; template <> diff --git a/reference_model/test/model_runner_tests.cpp b/reference_model/test/model_runner_tests.cpp index 6580774..35e3aa2 100644 --- a/reference_model/test/model_runner_tests.cpp +++ b/reference_model/test/model_runner_tests.cpp @@ -122,6 +122,36 @@ TEST_SUITE("model_runner") compareOutput(dstData, expectedData, expectedData.size()); } + TEST_CASE("op_entry_cast") + { + // Inputs/Outputs + std::vector shape = { 1, 2, 2, 1 }; + std::vector srcData = { 15, 13, 5, -51 }; + std::vector dstData(4, 0.f); + + tosa_tensor_t input; + input.shape = shape.data(); + input.num_dims = shape.size(); + input.data_type = tosa_datatype_int16_t; + input.data = reinterpret_cast(srcData.data()); + input.size = srcData.size() * sizeof(int16_t); + + tosa_tensor_t output; + output.shape = shape.data(); + output.num_dims = shape.size(); + output.data_type = tosa_datatype_fp32_t; + output.data = reinterpret_cast(dstData.data()); + output.size = dstData.size() * sizeof(float); + + // Execution + auto status = tosa_run_cast(input, output, {}); + CHECK((status == tosa_status_valid)); + + // Compare results + std::vector expectedData = { 15.f, 13.f, 5.f, -51.f }; + compareOutput(dstData, expectedData, expectedData.size()); + } + TEST_CASE("op_entry_conv2d") { // Conv parameters -- cgit v1.2.1