aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-10-30 22:29:58 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2023-11-02 11:42:53 +0000
commit5306dc893bbd6a203744e11c242b9811c8cf1d0c (patch)
tree74607a893e0573025feb242df2da6fd02cf23b14
parentb8aeec291e8bc1ef1f41d71ab47f4a8350bd8825 (diff)
downloadarmnn-5306dc893bbd6a203744e11c242b9811c8cf1d0c.tar.gz
IVGCVSW-7889 Add support for Signed64 data type in Cast layer
* Remove mention of "isDepthwise" variable name when not needed and therefore IgnoreUnused call * Improve error messages and change them to throws in Encoder and Decoder Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I8ce30b5075e1e47d54abc12390265ba8e9ee1405
-rw-r--r--src/backends/reference/RefLayerSupport.cpp5
-rw-r--r--src/backends/reference/test/RefPerAxisIteratorTests.cpp2
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp1
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp111
-rw-r--r--src/backends/reference/workloads/Decoders.hpp26
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp1
-rw-r--r--src/backends/reference/workloads/Encoders.hpp26
7 files changed, 110 insertions, 62 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 167639a733..2be227ae8f 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -847,7 +847,7 @@ bool RefLayerSupport::IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType, 9> supportedInputTypes =
+ std::array<DataType, 10> supportedInputTypes =
{
DataType::Float32,
DataType::Float16,
@@ -855,7 +855,8 @@ bool RefLayerSupport::IsCastSupported(const TensorInfo& input,
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
- DataType::Signed32
+ DataType::Signed32,
+ DataType::Signed64
};
bool supported = true;
diff --git a/src/backends/reference/test/RefPerAxisIteratorTests.cpp b/src/backends/reference/test/RefPerAxisIteratorTests.cpp
index 92b828e067..d437f4da77 100644
--- a/src/backends/reference/test/RefPerAxisIteratorTests.cpp
+++ b/src/backends/reference/test/RefPerAxisIteratorTests.cpp
@@ -5,6 +5,8 @@
#include <reference/workloads/Decoders.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <fmt/format.h>
#include <doctest/doctest.h>
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 3bf2853a20..abc0512021 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -7,6 +7,7 @@
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
namespace armnn
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 1665c1ff46..694c22913c 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -7,7 +7,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
@@ -45,9 +44,7 @@ public:
virtual IType Get() const = 0;
- virtual std::vector<float>
- DecodeTensor(const TensorShape &tensorShape,
- bool isDepthwise = false) = 0;
+ virtual std::vector<float> DecodeTensor(const TensorShape &tensorShape, bool isDepthwise = false) = 0;
};
template<typename IType>
@@ -125,11 +122,8 @@ public:
{
return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -162,11 +156,8 @@ public:
{
return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -199,11 +190,8 @@ public:
{
return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -236,11 +224,8 @@ public:
{
return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -275,11 +260,8 @@ public:
armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
return val;
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool ) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -311,10 +293,8 @@ public:
{
return *m_Iterator;
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
@@ -338,11 +318,8 @@ public:
{
return static_cast<float>(*m_Iterator) * m_Scale;
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -374,11 +351,8 @@ public:
{
return static_cast<float>(*m_Iterator);
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -406,11 +380,37 @@ public:
{
return *m_Iterator;
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
+ const unsigned int size = tensorShape.GetNumElements();
+ std::vector<float> decodedTensor;
+ decodedTensor.reserve(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ this->operator[](i);
+ decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
+ }
+
+ return decodedTensor;
+ }
+};
+
+class Int64Decoder : public TypedIterator<const int64_t, Decoder<double_t>>
+{
+public:
+ Int64Decoder(const int64_t* data)
+ : TypedIterator(data) {}
+
+ Int64Decoder()
+ : Int64Decoder(nullptr) {}
+
+ double_t Get() const override
+ {
+ return static_cast<double_t>(*m_Iterator);
+ }
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
+ {
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -438,11 +438,8 @@ public:
{
return *m_Iterator;
}
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor (const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -471,11 +468,8 @@ public:
return *m_Iterator;
}
- std::vector<float> DecodeTensor(const TensorShape& tensorShape,
- const bool isDepthwise) override
+ std::vector<float> DecodeTensor(const TensorShape& tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
@@ -668,6 +662,26 @@ public:
}
};
+class Int64Encoder : public TypedIterator<int64_t, Encoder<double>>
+{
+public:
+ Int64Encoder(int64_t* data)
+ : TypedIterator(data) {}
+
+ Int64Encoder()
+ : Int64Encoder(nullptr) {}
+
+ void Set(double right) override
+ {
+ *m_Iterator = static_cast<int64_t>(right);
+ }
+
+ double_t Get() const override
+ {
+ return static_cast<double>(*m_Iterator);
+ }
+};
+
class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
{
public:
@@ -797,11 +811,8 @@ public:
return m_Scales[m_AxisIndex];
}
- std::vector<float> DecodeTensor(const TensorShape &tensorShape,
- bool isDepthwise) override
+ std::vector<float> DecodeTensor(const TensorShape &tensorShape, const bool) override
{
- IgnoreUnused(isDepthwise);
-
const unsigned int size = tensorShape.GetNumElements();
std::vector<float> decodedTensor;
decodedTensor.reserve(size);
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 54e7008d50..3bf3db7967 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,8 +10,6 @@
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-
namespace armnn
{
@@ -121,7 +119,25 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+ throw InvalidArgumentException("Unsupported target Data Type!");
+ break;
+ }
+ }
+ return nullptr;
+}
+
+template<>
+inline std::unique_ptr<Decoder<double_t>> MakeDecoder(const TensorInfo& info, const void* data)
+{
+ switch(info.GetDataType())
+ {
+ case DataType::Signed64:
+ {
+ return std::make_unique<Int64Decoder>(static_cast<const int64_t*>(data));
+ }
+ default:
+ {
+ throw InvalidArgumentException("Cannot decode to double. Unsupported origin Data Type!");
break;
}
}
@@ -139,7 +155,7 @@ inline std::unique_ptr<Decoder<bool>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+ throw InvalidArgumentException("Cannot decode to bool. Unsupported origin Data Type!");
break;
}
}
@@ -157,7 +173,7 @@ inline std::unique_ptr<Decoder<int32_t>> MakeDecoder(const TensorInfo& info, con
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+ throw InvalidArgumentException("Cannot decode to int32. Unsupported origin Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 2108efe8f3..c5ab327f90 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -6,6 +6,7 @@
#include "DetectionPostProcess.hpp"
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <algorithm>
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 8a702377b2..5de361590a 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,8 +9,6 @@
#include <armnnUtils/TensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-
namespace armnn
{
@@ -89,7 +87,25 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
+ throw InvalidArgumentException("Unsupported target Data Type!");
+ break;
+ }
+ }
+ return nullptr;
+}
+
+template<>
+inline std::unique_ptr<Encoder<double_t>> MakeEncoder(const TensorInfo& info, void* data)
+{
+ switch(info.GetDataType())
+ {
+ case armnn::DataType::Signed64:
+ {
+ return std::make_unique<Int64Encoder>(static_cast<int64_t*>(data));
+ }
+ default:
+ {
+ throw InvalidArgumentException("Cannot encode from double. Unsupported target Data Type!");
break;
}
}
@@ -107,7 +123,7 @@ inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+ throw InvalidArgumentException("Cannot encode from boolean. Unsupported target Data Type!");
break;
}
}
@@ -125,7 +141,7 @@ inline std::unique_ptr<Encoder<int32_t>> MakeEncoder(const TensorInfo& info, voi
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+ throw InvalidArgumentException("Cannot encode from int32. Unsupported Data Type!");
break;
}
}