aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp244
1 files changed, 191 insertions, 53 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index b52dcd5303..153afd9cd7 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -44,16 +44,18 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- LayerTestResult<float,4> ret(outputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
// Batch #0
1.0f, 2.0f,
3.0f, 4.0f,
// Batch #1
5.0f, 6.0f,
7.0f, 8.0f
- }));
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
float alpha = 1.f;
float beta = 1.f;
@@ -75,7 +77,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
- armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -85,11 +87,11 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
switch (normMethod)
{
@@ -104,23 +106,34 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// pow((kappa + (accumulatedScale * alpha)), beta)
// ...where accumulatedScale is the sum of every element squared.
float divisor[inputNum];
- for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
+
+ float accumulatedScale1 = 0.0f;
+ for (size_t i = 0; i < input.size()/2; ++i)
+ {
+ accumulatedScale1 += input[i]*input[i];
+ }
+
+ float accumulatedScale2 = 0.0f;
+ for (size_t i = input.size()/2; i < input.size(); ++i)
{
- float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
- input[i][0][0][1]*input[i][0][0][1] +
- input[i][0][1][0]*input[i][0][1][0] +
- input[i][0][1][1]*input[i][0][1][1];
- divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
+ accumulatedScale2 += input[i]*input[i];
}
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- std::vector<float>({input[0][0][0][0]/divisor[0],
- input[0][0][0][1]/divisor[0],
- input[0][0][1][0]/divisor[0],
- input[0][0][1][1]/divisor[0],
- input[1][0][0][0]/divisor[1],
- input[1][0][0][1]/divisor[1],
- input[1][0][1][0]/divisor[1],
- input[1][0][1][1]/divisor[1]}));
+
+ divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
+ divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
+
+ std::vector<float> output;
+ unsigned int divisorIndex = 0;
+ for (size_t i = 0; i < input.size(); ++i)
+ {
+ if (i == input.size()/2)
+ {
+ divisorIndex++;
+ }
+ output.emplace_back(input[i]/divisor[divisorIndex]);
+ }
+
+ expectedOutput = output;
break;
}
case armnn::NormalizationAlgorithmChannel::Across:
@@ -131,19 +144,14 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// ...where adjacent channels means within half the normSize for the channel
// The test data has only one channel, so this is simplified below.
std::vector<float> outputVector;
- for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
+
+ for (unsigned int i = 0; i < input.size(); ++i)
{
- for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
- {
- for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
- {
- float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
- float scale = powf((kappa + accumulatedScale * alpha), -beta);
- outputVector.push_back(input[n][0][h][w] * scale);
- }
- }
+ float accumulatedScale = input[i]*input[i];
+ float scale = powf((kappa + accumulatedScale * alpha), -beta);
+ outputVector.push_back(input[i] * scale);
}
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
+ expectedOutput = outputVector;
break;
}
default:
@@ -162,7 +170,10 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
}
}
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
@@ -188,16 +199,18 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- LayerTestResult<float,4> ret(outputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
// Batch #0
1.0f, 2.0f,
3.0f, 4.0f,
// Batch #1
5.0f, 6.0f,
7.0f, 8.0f
- }));
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
float alpha = 1.f;
float beta = 1.f;
@@ -219,7 +232,7 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -229,11 +242,11 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
switch (normMethod)
{
@@ -243,9 +256,8 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
{
case armnn::NormalizationAlgorithmChannel::Across:
{
- std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
- 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
+ expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+ 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
break;
}
default:
@@ -264,7 +276,10 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
}
}
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float,4> CompareNormalizationTestImpl(
@@ -297,7 +312,10 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
LayerTestResult<float,4> ret(outputTensorInfo);
- auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
+ auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
constexpr float alpha = 1.f;
constexpr float beta = 1.f;
@@ -330,9 +348,9 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
- if (!ret.supported)
+ ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
+ reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+ if (!ret.m_Supported)
{
return ret;
}
@@ -346,19 +364,125 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
+ ret.m_ActualData = actualOutput;
+ ret.m_ExpectedData = expectedOutput;
return ret;
}
+LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::NormalizationAlgorithmChannel normChannel,
+ armnn::NormalizationAlgorithmMethod normMethod)
+{
+ const unsigned int inputHeight = 1;
+ const unsigned int inputWidth = 2;
+ const unsigned int inputChannels = 3;
+ const unsigned int inputNum = 2;
+
+ unsigned int outputHeight = inputHeight;
+ unsigned int outputWidth = inputWidth;
+ unsigned int outputChannels = inputChannels;
+ unsigned int outputNum = inputNum;
+
+ unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
+ unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
+
+ auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<float> input =
+ {
+ // Batch #0
+ -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
+ // Batch #1
+ -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
+
+ float alpha = 4.f;
+ float beta = 0.5f;
+ float kappa = 9.f;
+ uint32_t normSize = 5;
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::NormalizationQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+ data.m_Parameters.m_NormChannelType = normChannel;
+ data.m_Parameters.m_NormMethodType = normMethod;
+ data.m_Parameters.m_NormSize = normSize;
+ data.m_Parameters.m_Alpha = alpha;
+ data.m_Parameters.m_Beta = beta;
+ data.m_Parameters.m_K = kappa;
+ data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
+ armnn::NormalizationQueueDescriptor refData = data;
+ armnn::WorkloadInfo refInfo = info;
+ SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ switch (normMethod)
+ {
+ case armnn::NormalizationAlgorithmMethod::LocalBrightness:
+ {
+ switch (normChannel)
+ {
+ case armnn::NormalizationAlgorithmChannel::Across:
+ {
+ expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
+ -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
+ break;
+ }
+ default:
+ {
+ throw armnn::UnimplementedException("Unsupported normalisation channel type, "
+ "only Across and Within are supported");
+ }
+ }
+ break;
+ }
+ case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
+ default:
+ {
+ throw armnn::UnimplementedException("Unsupported normalisation method type, "
+ "only LocalBrightness is supported");
+ }
+ }
+
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
} // anonymous namespace
LayerTestResult<float,4> SimpleNormalizationAcrossTest(
@@ -405,3 +529,17 @@ LayerTestResult<float,4> CompareNormalizationTest(
workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
normChannel, normMethod);
}
+
+LayerTestResult<float,4> AcrossChannelNormalizationTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+ auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
+ return AcrossChannelNormalizationTestImpl(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ normChannel,
+ normMethod);
+}