aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/fixtures/DepthConcatenateLayerFixture.h45
-rw-r--r--tests/validation/fixtures/WidthConcatenateLayerFixture.h44
-rw-r--r--tests/validation/reference/DepthConcatenateLayer.cpp30
-rw-r--r--tests/validation/reference/DepthConcatenateLayer.h4
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.cpp31
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.h4
6 files changed, 101 insertions, 57 deletions
diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
index 5fdfacbb76..edeefa228a 100644
--- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,9 +53,22 @@ public:
// Create input shapes
std::mt19937 gen(library->seed());
std::uniform_int_distribution<> num_dis(2, 4);
- const int num_tensors = num_dis(gen);
+ std::uniform_int_distribution<> offset_dis(0, 20);
+
+ const int num_tensors = num_dis(gen);
+
+ std::vector<TensorShape> shapes(num_tensors, shape);
+
+ // vector holding the quantization info:
+ // the last element is the output quantization info
+ // all other elements are the quantization info for the input tensors
+ std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
+
+ for(auto &qi : qinfo)
+ {
+ qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
+ }
- std::vector<TensorShape> shapes(num_tensors, shape);
std::uniform_int_distribution<> depth_dis(1, 3);
std::bernoulli_distribution mutate_dis(0.5f);
std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
@@ -82,8 +95,8 @@ public:
}
}
- _target = compute_target(shapes, data_type);
- _reference = compute_reference(shapes, data_type);
+ _target = compute_target(shapes, qinfo, data_type);
+ _reference = compute_reference(shapes, qinfo, data_type);
}
protected:
@@ -93,7 +106,7 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+ TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
{
std::vector<TensorType> srcs;
std::vector<ITensorType *> src_ptrs;
@@ -101,14 +114,14 @@ protected:
// Create tensors
srcs.reserve(shapes.size());
- for(const auto &shape : shapes)
+ for(size_t j = 0; j < shapes.size(); ++j)
{
- srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
+ srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
src_ptrs.emplace_back(&srcs.back());
}
TensorShape dst_shape = misc::shape_calculator::calculate_depth_concatenate_shape(src_ptrs);
- TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
// Create and configure function
FunctionType depth_concat;
@@ -144,19 +157,21 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+ SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
{
std::vector<SimpleTensor<T>> srcs;
// Create and fill tensors
- int i = 0;
- for(const auto &shape : shapes)
+ for(size_t j = 0; j < shapes.size(); ++j)
{
- srcs.emplace_back(shape, data_type, 1);
- fill(srcs.back(), i++);
+ srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
+ fill(srcs.back(), j);
}
- return reference::depthconcatenate_layer<T>(srcs);
+ const TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
+ SimpleTensor<T> dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
+
+ return reference::depthconcatenate_layer<T>(srcs, dst);
}
TensorType _target{};
diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
index 1f79210350..47a03ed865 100644
--- a/tests/validation/fixtures/WidthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,9 +53,20 @@ public:
// Create input shapes
std::mt19937 gen(library->seed());
std::uniform_int_distribution<> num_dis(2, 8);
- const int num_tensors = num_dis(gen);
+ std::uniform_int_distribution<> offset_dis(0, 20);
- std::vector<TensorShape> shapes(num_tensors, shape);
+ const int num_tensors = num_dis(gen);
+
+ std::vector<TensorShape> shapes(num_tensors, shape);
+
+ // vector holding the quantization info:
+ // the last element is the output quantization info
+ // all other elements are the quantization info for the input tensors
+ std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
+ for(auto &qi : qinfo)
+ {
+ qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
+ }
std::bernoulli_distribution mutate_dis(0.5f);
std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
@@ -71,8 +82,8 @@ public:
}
}
- _target = compute_target(shapes, data_type);
- _reference = compute_reference(shapes, data_type);
+ _target = compute_target(shapes, qinfo, data_type);
+ _reference = compute_reference(shapes, qinfo, data_type);
}
protected:
@@ -82,7 +93,7 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+ TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
{
std::vector<TensorType> srcs;
std::vector<ITensorType *> src_ptrs;
@@ -90,14 +101,15 @@ protected:
// Create tensors
srcs.reserve(shapes.size());
- for(const auto &shape : shapes)
+ for(size_t j = 0; j < shapes.size(); ++j)
{
- srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
+ srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
src_ptrs.emplace_back(&srcs.back());
}
TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
- TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1);
+
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
// Create and configure function
FunctionType width_concat;
@@ -133,19 +145,21 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+ SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
{
std::vector<SimpleTensor<T>> srcs;
// Create and fill tensors
- int i = 0;
- for(const auto &shape : shapes)
+ for(size_t j = 0; j < shapes.size(); ++j)
{
- srcs.emplace_back(shape, data_type, 1);
- fill(srcs.back(), i++);
+ srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
+ fill(srcs.back(), j);
}
- return reference::widthconcatenate_layer<T>(srcs);
+ const TensorShape dst_shape = calculate_width_concatenate_shape(shapes);
+ SimpleTensor<T> dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
+
+ return reference::widthconcatenate_layer<T>(srcs, dst);
}
TensorType _target{};
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index 90fbd915b1..6551f0c79e 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@ namespace validation
namespace reference
{
template <typename T>
-SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst)
{
// Create reference
std::vector<TensorShape> shapes;
@@ -44,10 +44,6 @@ SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
shapes.emplace_back(src.shape());
}
- DataType dst_type = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
- TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
- SimpleTensor<T> dst(dst_shape, dst_type);
-
// Compute reference
int depth_offset = 0;
const int width_out = dst.shape().x();
@@ -80,8 +76,20 @@ SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
{
for(int r = 0; r < height; ++r)
{
- std::copy(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out);
- src_ptr += width;
+ if(src.data_type() == DataType::QASYMM8 && src.quantization_info() != dst.quantization_info())
+ {
+ std::transform(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out, [src, dst](T t)
+ {
+ const float dequantized_input = src.quantization_info().dequantize(t);
+ return dst.quantization_info().quantize(dequantized_input, RoundingPolicy::TO_NEAREST_UP);
+ });
+ src_ptr += width;
+ }
+ else
+ {
+ std::copy(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out);
+ src_ptr += width;
+ }
}
}
}
@@ -92,9 +100,9 @@ SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
return dst;
}
-template SimpleTensor<uint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs);
-template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
-template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
+template SimpleTensor<uint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs, SimpleTensor<uint8_t> &dst);
+template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs, SimpleTensor<float> &dst);
+template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs, SimpleTensor<half> &dst);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthConcatenateLayer.h b/tests/validation/reference/DepthConcatenateLayer.h
index 3c486a8015..8a78441651 100644
--- a/tests/validation/reference/DepthConcatenateLayer.h
+++ b/tests/validation/reference/DepthConcatenateLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,7 @@ namespace validation
namespace reference
{
template <typename T>
-SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index 6be171b64d..38543393ce 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@ namespace validation
namespace reference
{
template <typename T>
-SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst)
{
// Create reference
std::vector<TensorShape> shapes;
@@ -44,10 +44,6 @@ SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
shapes.emplace_back(src.shape());
}
- DataType dst_type = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
- TensorShape dst_shape = calculate_width_concatenate_shape(shapes);
- SimpleTensor<T> dst(dst_shape, dst_type);
-
// Compute reference
int width_offset = 0;
const int width_out = dst.shape().x();
@@ -74,21 +70,32 @@ SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
for(int r = 0; r < height; ++r)
{
const int offset = u * height * depth + d * height + r;
- std::copy(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out);
- src_ptr += width;
+ if(src.data_type() == DataType::QASYMM8 && src.quantization_info() != dst.quantization_info())
+ {
+ std::transform(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out, [src, dst](T t)
+ {
+ const float dequantized_input = src.quantization_info().dequantize(t);
+ return dst.quantization_info().quantize(dequantized_input, RoundingPolicy::TO_NEAREST_UP);
+ });
+ src_ptr += width;
+ }
+ else
+ {
+ std::copy(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out);
+ src_ptr += width;
+ }
}
}
}
-
width_offset += width;
}
return dst;
}
-template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
-template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<uint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs);
+template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs, SimpleTensor<float> &dst);
+template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs, SimpleTensor<half> &dst);
+template SimpleTensor<uint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<uint8_t>> &srcs, SimpleTensor<uint8_t> &dst);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.h b/tests/validation/reference/WidthConcatenateLayer.h
index 237e72b947..0f1f428f10 100644
--- a/tests/validation/reference/WidthConcatenateLayer.h
+++ b/tests/validation/reference/WidthConcatenateLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,7 @@ namespace validation
namespace reference
{
template <typename T>
-SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs, SimpleTensor<T> &dst);
} // namespace reference
} // namespace validation
} // namespace test