aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/NetworkQuantizerUtils.cpp4
-rw-r--r--src/armnn/NetworkQuantizerUtils.hpp6
-rw-r--r--src/armnn/QuantizerVisitor.cpp4
-rw-r--r--src/armnn/test/QuantizerTest.cpp6
4 files changed, 10 insertions, 10 deletions
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index 1bec63b58c..551760f362 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -12,7 +12,7 @@
namespace armnn
{
-std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max)
+std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max)
{
BOOST_ASSERT_MSG(min < max, "min >= max will result in invalid quantization.");
double highest = (1 << numBits) - 1;
@@ -27,7 +27,7 @@ std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max)
// Clamp offset [0-highest]
offset = std::max(0.0, std::min(highest, offset));
- return std::make_pair(static_cast<int>(std::round(offset)), static_cast<float>(scale));
+ return std::make_pair(static_cast<float>(scale), static_cast<int>(std::round(offset)));
}
ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing)
diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp
index 458d21a974..c23517e385 100644
--- a/src/armnn/NetworkQuantizerUtils.hpp
+++ b/src/armnn/NetworkQuantizerUtils.hpp
@@ -17,7 +17,7 @@
namespace armnn
{
-std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max);
+std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max);
template<typename srcType>
void Quantize(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
@@ -34,8 +34,8 @@ void Quantize(const srcType* src, uint8_t* dst, size_t numElements, float& scale
}
auto qParams = ComputeQAsymmParams(8, min, max);
- offset = qParams.first;
- scale = qParams.second;
+ scale = qParams.first;
+ offset = qParams.second;
for (size_t i = 0; i < numElements; ++i)
{
dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 437d7b90d6..110594c1ab 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -50,8 +50,8 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src
// Set the quantization params
TensorInfo info(newOutputSlot.GetTensorInfo());
info.SetDataType(DataType::QuantisedAsymm8);
- info.SetQuantizationOffset(qParams.first);
- info.SetQuantizationScale(qParams.second);
+ info.SetQuantizationOffset(qParams.second);
+ info.SetQuantizationScale(qParams.first);
newOutputSlot.SetTensorInfo(info);
}
}
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index fcce208c59..f7723bd0c0 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -997,15 +997,15 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger)
const OriginsDescriptor& mergerDescriptor,
const char* name = nullptr)
{
- std::pair<int, float> expectedValues = ComputeQAsymmParams(8, m_Min, m_Max);
+ std::pair<float, int> expectedValues = ComputeQAsymmParams(8, m_Min, m_Max);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((info.GetQuantizationOffset() == expectedValues.first));
+ BOOST_TEST((info.GetQuantizationOffset() == expectedValues.second));
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.second, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.first, 0.000001f);
}
private: