aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
diff options
context:
space:
mode:
authorTracy Narine <tracy.narine@arm.com>2023-09-20 14:19:07 +0100
committerTracy Narine <tracy.narine@arm.com>2023-09-28 14:25:16 +0100
commit6440ce89abb06e090d2b3cf91bafc14277072475 (patch)
treec55682891a0f01f3edbf5dad58720ded7af3fc64 /src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
parent9a418d850333119e219fb05addc57b56cdc60a7e (diff)
downloadarmnn-6440ce89abb06e090d2b3cf91bafc14277072475.tar.gz
IVGCVSW-7504 Create a backend specific optimization to fuse ADD+MUL+Add+(Activation) in CpuAcc
* Adding CpuAcc backend optimization to fuse add+mul+add into one layer * Tests added/enhanced * Also added optional extended parameter to Graph::Print() and throw macros that could be used in place of assert Signed-off-by: Tracy Narine <tracy.narine@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I5f8d094b969a130d8c2c7b4da07426313a9fea76
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
index 9dece9be3b..39d2219954 100644
--- a/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
@@ -39,18 +39,18 @@ std::vector<LayerTestResult<T,4>> AddMulAddTest(armnn::IWorkloadFactory& workloa
if (IsQuantizedType<T>())
{
input0TensorInfo.SetQuantizationScale(0.25f);
- input0TensorInfo.SetQuantizationOffset(128);
+ input0TensorInfo.SetQuantizationOffset(10);
input1TensorInfo.SetQuantizationScale(0.25f);
- input1TensorInfo.SetQuantizationOffset(128);
+ input1TensorInfo.SetQuantizationOffset(11);
mulInput1TensorInfo.SetQuantizationScale(0.25f);
- mulInput1TensorInfo.SetQuantizationOffset(128);
+ mulInput1TensorInfo.SetQuantizationOffset(12);
addInput1TensorInfo.SetQuantizationScale(0.25f);
- addInput1TensorInfo.SetQuantizationOffset(128);
+ addInput1TensorInfo.SetQuantizationOffset(13);
output0TensorInfo.SetQuantizationScale(0.5f);
- output0TensorInfo.SetQuantizationOffset(120);
+ output0TensorInfo.SetQuantizationOffset(14);
output1TensorInfo.SetQuantizationScale(0.5f);
- output1TensorInfo.SetQuantizationOffset(120);
+ output1TensorInfo.SetQuantizationOffset(15);
}
std::vector<float> input0Data
@@ -140,6 +140,12 @@ std::vector<LayerTestResult<T,4>> AddMulAddTest(armnn::IWorkloadFactory& workloa
}
AddOutputToWorkload(fusedQueueDescriptor, info, output1TensorInfo, output1Handle.get());
+ if (addOutput)
+ {
+ AddOutputToWorkload(fusedQueueDescriptor, info, output0TensorInfo, output0Handle.get());
+ }
+ AddOutputToWorkload(fusedQueueDescriptor, info, output1TensorInfo, output1Handle.get());
+
std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Fused,
fusedQueueDescriptor,
info);