aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/test/ConvolutionTestHelper.hpp
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-04-06 17:29:03 +0100
committerJan Eilers <jan.eilers@arm.com>2021-04-21 11:18:29 +0000
commit7612bd6cc385dfbf54f831a6349f3a9363c6d0a2 (patch)
treebe63c7085e8802285473d10da8a7258a2600a378 /delegate/src/test/ConvolutionTestHelper.hpp
parent4af561666b0ce5c12164447a5f7eb9722abb85f8 (diff)
downloadarmnn-7612bd6cc385dfbf54f831a6349f3a9363c6d0a2.tar.gz
IVGCVSW-5842 Remove cross-wiring in depthwise
* Reading tensor infos won't allow a permutation vector anymore. The permutation only changed the quantization dimension not the shape and was therefore misleading * The permutation of the full tensor info is now performed in armnnUtils::Permuted * Changed TfLite Parser depthwise parsing function * Added unit tests to TfLite Parser with more random data * Changed TfLite Delegate depthwise parsing function * Added unit test to the delegate with per channel quantization !android-nn-driver:5412 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I1f985ee69547bcaf16a72201e00a6b6fe1ef9a97
Diffstat (limited to 'delegate/src/test/ConvolutionTestHelper.hpp')
-rw-r--r--delegate/src/test/ConvolutionTestHelper.hpp50
1 files changed, 35 insertions, 15 deletions
diff --git a/delegate/src/test/ConvolutionTestHelper.hpp b/delegate/src/test/ConvolutionTestHelper.hpp
index b2a3c889e6..1b33c1d74d 100644
--- a/delegate/src/test/ConvolutionTestHelper.hpp
+++ b/delegate/src/test/ConvolutionTestHelper.hpp
@@ -34,13 +34,16 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
const std::vector <int32_t>& outputTensorShape,
const std::vector <T>& filterData,
const std::vector <B>& biasData,
- float filterScale = 1.0f,
- int filterOffset = 0,
+ const std::vector<float> biasScales = {1.0f},
+ const std::vector<int64_t> biasOffsets = {0},
+ const std::vector<float> filterScales = {1.0f},
+ const std::vector<int64_t> filterOffsets = {0},
float outputQuantScale = 2.0f,
int outputQuantOffset = 0,
float quantScale = 1.0f,
int quantOffset = 0,
- int32_t depth_multiplier = 1)
+ int32_t depth_multiplier = 1,
+ int32_t filterQuantizationDim = 0)
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
@@ -67,12 +70,23 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
0,
flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+
auto filterQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ filterScale }),
- flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>(filterScales),
+ flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
+ tflite::QuantizationDetails_NONE,
+ 0,
+ filterQuantizationDim);
+
+ auto biasQuantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>(biasScales),
+ flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
std::array<flatbuffers::Offset<Tensor>, 4> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
@@ -100,7 +114,7 @@ std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOpe
biasTensorType,
2,
flatBufferBuilder.CreateString("bias"),
- quantizationParameters);
+ biasQuantizationParameters);
tensors[3] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
outputTensorShape.size()),
@@ -192,13 +206,16 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
std::vector<T>& expectedOutputValues,
const std::vector<int32_t>& biasShape = {},
const std::vector<B>& biasValues = {},
- float filterScale = 1.0f,
- int filterOffset = 0,
+ const std::vector<float> biasScales = {1.0f},
+ const std::vector<int64_t> biasOffsets = {0},
+ const std::vector<float> filterScales = {1.0f},
+ const std::vector<int64_t> filterOffsets = {0},
float outputQuantScale = 2.0f,
int outputQuantOffset = 0,
float quantScale = 1.0f,
int quantOffset = 0,
- int32_t depth_multiplier = 1)
+ int32_t depth_multiplier = 1,
+ int32_t filterQuantizationDim = 3)
{
using namespace tflite;
@@ -218,13 +235,16 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
outputShape,
filterValues,
biasValues,
- filterScale,
- filterOffset,
+ biasScales,
+ biasOffsets,
+ filterScales,
+ filterOffsets,
outputQuantScale,
outputQuantOffset,
quantScale,
quantOffset,
- depth_multiplier);
+ depth_multiplier,
+ filterQuantizationDim);
const Model* tfLiteModel = GetModel(modelBuffer.data());