aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-04-15 18:37:35 +0100
committerderek.lamberti <derek.lamberti@arm.com>2019-04-16 13:50:11 +0000
commit0790dcea1056298d63f97dec904c8ade5d21f439 (patch)
treed75967e2eabe39ec08dc928fa77a7d4a51d85c5d /include
parentb98bbcfa2a809c4ad025883c059ae49c82b37cbd (diff)
downloadarmnn-0790dcea1056298d63f97dec904c8ade5d21f439.tar.gz
IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic
+ Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'include')
-rw-r--r--include/armnn/Tensor.hpp6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp
index 503c161570..160ccca79c 100644
--- a/include/armnn/Tensor.hpp
+++ b/include/armnn/Tensor.hpp
@@ -80,7 +80,11 @@ public:
int32_t GetQuantizationOffset() const { return m_Quantization.m_Offset; }
void SetQuantizationScale(float scale) { m_Quantization.m_Scale = scale; }
void SetQuantizationOffset(int32_t offset) { m_Quantization.m_Offset = offset; }
- bool IsQuantized() const { return m_DataType == DataType::QuantisedAsymm8; }
+ bool IsQuantized() const { return m_DataType == DataType::QuantisedAsymm8 ||
+ m_DataType == DataType::QuantisedSymm16; }
+
+ /// Check that the types are the same and, if quantize, that the quantization parameters are the same.
+ bool IsTypeSpaceMatch(const TensorInfo& other) const;
unsigned int GetNumBytes() const;