aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/armnnTestUtils/MemCopyTestImpl.hpp115
-rw-r--r--include/armnnTestUtils/MockWorkloadFactoryHelper.hpp41
-rw-r--r--include/armnnTestUtils/TensorHelpers.hpp235
-rw-r--r--include/armnnUtils/QuantizeHelper.hpp132
4 files changed, 523 insertions, 0 deletions
diff --git a/include/armnnTestUtils/MemCopyTestImpl.hpp b/include/armnnTestUtils/MemCopyTestImpl.hpp
new file mode 100644
index 0000000000..1856dcb056
--- /dev/null
+++ b/include/armnnTestUtils/MemCopyTestImpl.hpp
@@ -0,0 +1,115 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerTestResult.hpp"
+#include "TensorCopyUtils.hpp"
+#include "TensorHelpers.hpp"
+#include "WorkloadTestUtils.hpp"
+#include <ResolveType.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
+
+namespace
+{
+
+template<armnn::DataType dataType, typename T = armnn::ResolveType<dataType>>
+LayerTestResult<T, 4> MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory,
+ armnn::IWorkloadFactory& dstWorkloadFactory,
+ bool withSubtensors)
+{
+ const std::array<unsigned int, 4> shapeData = { { 1u, 1u, 6u, 5u } };
+ const armnn::TensorShape tensorShape(4, shapeData.data());
+ const armnn::TensorInfo tensorInfo(tensorShape, dataType);
+ std::vector<T> inputData =
+ {
+ 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30,
+ };
+
+ LayerTestResult<T, 4> ret(tensorInfo);
+ ret.m_ExpectedData = inputData;
+
+ std::vector<T> actualOutput(tensorInfo.GetNumElements());
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ auto inputTensorHandle = srcWorkloadFactory.CreateTensorHandle(tensorInfo);
+ auto outputTensorHandle = dstWorkloadFactory.CreateTensorHandle(tensorInfo);
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ AllocateAndCopyDataToITensorHandle(inputTensorHandle.get(), inputData.data());
+ outputTensorHandle->Allocate();
+
+ armnn::MemCopyQueueDescriptor memCopyQueueDesc;
+ armnn::WorkloadInfo workloadInfo;
+
+ const unsigned int origin[4] = {};
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ auto workloadInput = (withSubtensors && srcWorkloadFactory.SupportsSubTensors())
+ ? srcWorkloadFactory.CreateSubTensorHandle(*inputTensorHandle, tensorShape, origin)
+ : std::move(inputTensorHandle);
+ auto workloadOutput = (withSubtensors && dstWorkloadFactory.SupportsSubTensors())
+ ? dstWorkloadFactory.CreateSubTensorHandle(*outputTensorHandle, tensorShape, origin)
+ : std::move(outputTensorHandle);
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ AddInputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadInput.get());
+ AddOutputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadOutput.get());
+
+ dstWorkloadFactory.CreateWorkload(armnn::LayerType::MemCopy, memCopyQueueDesc, workloadInfo)->Execute();
+
+ CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get());
+ ret.m_ActualData = actualOutput;
+
+ return ret;
+}
+
+template <typename WorkloadFactoryType>
+struct MemCopyTestHelper
+{};
+template <>
+struct MemCopyTestHelper<armnn::MockWorkloadFactory>
+{
+ static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager()
+ {
+ armnn::MockBackend backend;
+ return backend.CreateMemoryManager();
+ }
+
+ static armnn::MockWorkloadFactory
+ GetFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+ IgnoreUnused(memoryManager);
+ return armnn::MockWorkloadFactory();
+ }
+};
+
+using MockMemCopyTestHelper = MemCopyTestHelper<armnn::MockWorkloadFactory>;
+
+template <typename SrcWorkloadFactory,
+ typename DstWorkloadFactory,
+ armnn::DataType dataType,
+ typename T = armnn::ResolveType<dataType>>
+LayerTestResult<T, 4> MemCopyTest(bool withSubtensors)
+{
+
+ armnn::IBackendInternal::IMemoryManagerSharedPtr srcMemoryManager =
+ MemCopyTestHelper<SrcWorkloadFactory>::GetMemoryManager();
+
+ armnn::IBackendInternal::IMemoryManagerSharedPtr dstMemoryManager =
+ MemCopyTestHelper<DstWorkloadFactory>::GetMemoryManager();
+
+ SrcWorkloadFactory srcWorkloadFactory = MemCopyTestHelper<SrcWorkloadFactory>::GetFactory(srcMemoryManager);
+ DstWorkloadFactory dstWorkloadFactory = MemCopyTestHelper<DstWorkloadFactory>::GetFactory(dstMemoryManager);
+
+ return MemCopyTest<dataType>(srcWorkloadFactory, dstWorkloadFactory, withSubtensors);
+}
+
+} // anonymous namespace
diff --git a/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp b/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp
new file mode 100644
index 0000000000..e9f2b2f6cf
--- /dev/null
+++ b/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp
@@ -0,0 +1,41 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "MockBackend.hpp"
+#include "MockTensorHandleFactory.hpp"
+#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
+
+namespace
+{
+
+template <>
+struct WorkloadFactoryHelper<armnn::MockWorkloadFactory>
+{
+ static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager()
+ {
+ armnn::MockBackend backend;
+ return backend.CreateMemoryManager();
+ }
+
+ static armnn::MockWorkloadFactory
+ GetFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+ IgnoreUnused(memoryManager);
+ return armnn::MockWorkloadFactory();
+ }
+
+ static armnn::MockTensorHandleFactory
+ GetTensorHandleFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+
+ return armnn::MockTensorHandleFactory(std::static_pointer_cast<armnn::MockMemoryManager>(memoryManager));
+ }
+};
+
+using MockWorkloadFactoryHelper = WorkloadFactoryHelper<armnn::MockWorkloadFactory>;
+
+} // anonymous namespace
diff --git a/include/armnnTestUtils/TensorHelpers.hpp b/include/armnnTestUtils/TensorHelpers.hpp
new file mode 100644
index 0000000000..ca17e621c3
--- /dev/null
+++ b/include/armnnTestUtils/TensorHelpers.hpp
@@ -0,0 +1,235 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnnTestUtils/PredicateResult.hpp>
+
+#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnnUtils/FloatingPointComparison.hpp>
+
+#include <armnnUtils/QuantizeHelper.hpp>
+
+#include <doctest/doctest.h>
+
+#include <array>
+#include <cmath>
+#include <random>
+#include <vector>
+
+constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f;
+
+template<typename T, bool isQuantized = true>
+struct SelectiveComparer
+{
+ static bool Compare(T a, T b)
+ {
+ return (std::max(a, b) - std::min(a, b)) <= 1;
+ }
+
+};
+
+template<typename T>
+struct SelectiveComparer<T, false>
+{
+ static bool Compare(T a, T b)
+ {
+ // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead.
+ if (a == 0.0f || b == 0.0f)
+ {
+ return std::abs(a - b) <= g_FloatCloseToZeroTolerance;
+ }
+
+ if (std::isinf(a) && a == b)
+ {
+ return true;
+ }
+
+ if (std::isnan(a) && std::isnan(b))
+ {
+ return true;
+ }
+
+ // For unquantized floats we use a tolerance of 1%.
+ return armnnUtils::within_percentage_tolerance(a, b);
+ }
+};
+
+template<typename T>
+bool SelectiveCompare(T a, T b)
+{
+ return SelectiveComparer<T, armnn::IsQuantizedType<T>()>::Compare(a, b);
+};
+
+template<typename T>
+bool SelectiveCompareBoolean(T a, T b)
+{
+ return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));
+};
+
+template <typename T>
+armnn::PredicateResult CompareTensors(const std::vector<T>& actualData,
+ const std::vector<T>& expectedData,
+ const armnn::TensorShape& actualShape,
+ const armnn::TensorShape& expectedShape,
+ bool compareBoolean = false,
+ bool isDynamic = false)
+{
+ if (actualData.size() != expectedData.size())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different data size ["
+ << actualData.size()
+ << "!="
+ << expectedData.size()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of dimensions ["
+ << actualShape.GetNumDimensions()
+ << "!="
+ << expectedShape.GetNumDimensions()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumElements() != expectedShape.GetNumElements())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of elements ["
+ << actualShape.GetNumElements()
+ << "!="
+ << expectedShape.GetNumElements()
+ << "]";
+ return res;
+ }
+
+ unsigned int numberOfDimensions = actualShape.GetNumDimensions();
+
+ if (!isDynamic)
+ {
+ // Checks they are same shape.
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
+ {
+ if (actualShape[i] != expectedShape[i])
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different shapes ["
+ << actualShape[i]
+ << "!="
+ << expectedShape[i]
+ << "]";
+ return res;
+ }
+ }
+ }
+
+ // Fun iteration over n dimensions.
+ std::vector<unsigned int> indices;
+ for (unsigned int i = 0; i < numberOfDimensions; i++)
+ {
+ indices.emplace_back(0);
+ }
+
+ std::stringstream errorString;
+ int numFailedElements = 0;
+ constexpr int maxReportedDifferences = 3;
+ unsigned int index = 0;
+
+ // Compare data element by element.
+ while (true)
+ {
+ bool comparison;
+ // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans.
+ if(compareBoolean)
+ {
+ comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]);
+ }
+ else
+ {
+ comparison = SelectiveCompare(actualData[index], expectedData[index]);
+ }
+
+ if (!comparison)
+ {
+ ++numFailedElements;
+
+ if (numFailedElements <= maxReportedDifferences)
+ {
+ if (numFailedElements >= 2)
+ {
+ errorString << ", ";
+ }
+ errorString << "[";
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
+ {
+ errorString << indices[i];
+ if (i != numberOfDimensions - 1)
+ {
+ errorString << ",";
+ }
+ }
+ errorString << "]";
+
+ errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")";
+ }
+ }
+
+ ++indices[numberOfDimensions - 1];
+ for (unsigned int i=numberOfDimensions-1; i>0; i--)
+ {
+ if (indices[i] == actualShape[i])
+ {
+ indices[i] = 0;
+ ++indices[i - 1];
+ }
+ }
+ if (indices[0] == actualShape[0])
+ {
+ break;
+ }
+
+ index++;
+ }
+
+ armnn::PredicateResult comparisonResult(true);
+ if (numFailedElements > 0)
+ {
+ comparisonResult.SetResult(false);
+ comparisonResult.Message() << numFailedElements << " different values at: ";
+ if (numFailedElements > maxReportedDifferences)
+ {
+ errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)";
+ }
+ comparisonResult.Message() << errorString.str();
+ }
+
+ return comparisonResult;
+}
+
+template <typename T>
+std::vector<T> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
+ unsigned int seed,
+ float min = -10.0f,
+ float max = 10.0f)
+{
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> dist(min, max);
+
+ std::vector<float> init(tensorInfo.GetNumElements());
+ for (unsigned int i = 0; i < init.size(); i++)
+ {
+ init[i] = dist(gen);
+ }
+
+ const float qScale = tensorInfo.GetQuantizationScale();
+ const int32_t qOffset = tensorInfo.GetQuantizationOffset();
+
+ return armnnUtils::QuantizedVector<T>(init, qScale, qOffset);
+}
diff --git a/include/armnnUtils/QuantizeHelper.hpp b/include/armnnUtils/QuantizeHelper.hpp
new file mode 100644
index 0000000000..231b8411cb
--- /dev/null
+++ b/include/armnnUtils/QuantizeHelper.hpp
@@ -0,0 +1,132 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <BFloat16.hpp>
+#include <Half.hpp>
+
+#include <initializer_list>
+#include <iterator>
+#include <vector>
+
+namespace armnnUtils
+{
+
+template<typename T, bool DoQuantize=true>
+struct SelectiveQuantizer
+{
+ static T Quantize(float value, float scale, int32_t offset)
+ {
+ return armnn::Quantize<T>(value, scale, offset);
+ }
+
+ static float Dequantize(T value, float scale, int32_t offset)
+ {
+ return armnn::Dequantize(value, scale, offset);
+ }
+};
+
+template<typename T>
+struct SelectiveQuantizer<T, false>
+{
+ static T Quantize(float value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return value;
+ }
+
+ static float Dequantize(T value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return value;
+ }
+};
+
+template<>
+struct SelectiveQuantizer<armnn::Half, false>
+{
+ static armnn::Half Quantize(float value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return armnn::Half(value);
+ }
+
+ static float Dequantize(armnn::Half value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return value;
+ }
+};
+
+template<>
+struct SelectiveQuantizer<armnn::BFloat16, false>
+{
+ static armnn::BFloat16 Quantize(float value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return armnn::BFloat16(value);
+ }
+
+ static float Dequantize(armnn::BFloat16 value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return value;
+ }
+};
+
+template<typename T>
+T SelectiveQuantize(float value, float scale, int32_t offset)
+{
+ return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Quantize(value, scale, offset);
+};
+
+template<typename T>
+float SelectiveDequantize(T value, float scale, int32_t offset)
+{
+ return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Dequantize(value, scale, offset);
+};
+
+template<typename ItType>
+struct IsFloatingPointIterator
+{
+ static constexpr bool value=std::is_floating_point<typename std::iterator_traits<ItType>::value_type>::value;
+};
+
+template <typename T, typename FloatIt,
+typename std::enable_if<IsFloatingPointIterator<FloatIt>::value, int>::type=0 // Makes sure fp iterator is valid.
+>
+std::vector<T> QuantizedVector(FloatIt first, FloatIt last, float qScale, int32_t qOffset)
+{
+ std::vector<T> quantized;
+ quantized.reserve(armnn::numeric_cast<size_t>(std::distance(first, last)));
+
+ for (auto it = first; it != last; ++it)
+ {
+ auto f = *it;
+ T q = SelectiveQuantize<T>(f, qScale, qOffset);
+ quantized.push_back(q);
+ }
+
+ return quantized;
+}
+
+template<typename T>
+std::vector<T> QuantizedVector(const std::vector<float>& array, float qScale = 1.f, int32_t qOffset = 0)
+{
+ return QuantizedVector<T>(array.begin(), array.end(), qScale, qOffset);
+}
+
+template<typename T>
+std::vector<T> QuantizedVector(std::initializer_list<float> array, float qScale = 1.f, int32_t qOffset = 0)
+{
+ return QuantizedVector<T>(array.begin(), array.end(), qScale, qOffset);
+}
+
+} // namespace armnnUtils