aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/TensorOperations.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/TensorOperations.h')
-rw-r--r--tests/validation/TensorOperations.h202
1 files changed, 0 insertions, 202 deletions
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 5018bfdb2b..a5039a4641 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1207,208 +1207,6 @@ void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const
}
}
-// Normalization Layer for floating point type
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
-{
- const uint32_t norm_size = norm_info.norm_size();
- NormType type = norm_info.type();
- float beta = norm_info.beta();
- uint32_t kappa = norm_info.kappa();
-
- const int cols = static_cast<int>(in.shape()[0]);
- const int rows = static_cast<int>(in.shape()[1]);
- const int depth = static_cast<int>(in.shape()[2]);
- int upper_dims = in.shape().total_size() / (cols * rows);
-
- float coeff = norm_info.scale_coeff();
- int radius_cols = norm_size / 2;
- // IN_MAP_1D and CROSS_MAP normalize over a single axis only
- int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
-
- if(type == NormType::CROSS_MAP)
- {
- // Remove also depth from upper dimensions since it is the axes we want
- // to use for normalization
- upper_dims /= depth;
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < rows; ++i)
- {
- for(int k = 0; k < cols; ++k)
- {
- for(int l = 0; l < depth; ++l)
- {
- float accumulated_scale = 0.f;
- for(int j = -radius_cols; j <= radius_cols; ++j)
- {
- const int z = l + j;
- if(z >= 0 && z < depth)
- {
- const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
- accumulated_scale += value * value;
- }
- }
- out[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff;
- }
- }
- }
- }
- }
- else
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < rows; ++i)
- {
- for(int k = 0; k < cols; ++k)
- {
- float accumulated_scale = 0.f;
- for(int j = -radius_rows; j <= radius_rows; ++j)
- {
- const int y = i + j;
- for(int l = -radius_cols; l <= radius_cols; ++l)
- {
- const int x = k + l;
- if((x >= 0 && y >= 0) && (x < cols && y < rows))
- {
- const T value = in[x + y * cols + r * cols * rows];
- accumulated_scale += value * value;
- }
- }
- }
- out[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff;
- }
- }
- }
- }
-
- if(beta == 1.f)
- {
- for(int i = 0; i < out.num_elements(); ++i)
- {
- out[i] = in[i] / out[i];
- }
- }
- else if(beta == 0.5f)
- {
- for(int i = 0; i < out.num_elements(); ++i)
- {
- out[i] = in[i] / std::sqrt(out[i]);
- }
- }
- else
- {
- for(int i = 0; i < out.num_elements(); ++i)
- {
- out[i] = in[i] * std::exp(std::log(out[i]) * -beta);
- }
- }
-}
-// Normalization Layer for fixed-point types
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
-{
- using namespace fixed_point_arithmetic;
-
- const int fixed_point_position = in.fixed_point_position();
-
- const uint32_t norm_size = norm_info.norm_size();
- NormType type = norm_info.type();
- fixed_point<T> beta(norm_info.beta(), fixed_point_position);
- fixed_point<T> kappa(norm_info.kappa(), fixed_point_position);
-
- const int cols = static_cast<int>(in.shape()[0]);
- const int rows = static_cast<int>(in.shape()[1]);
- const int depth = static_cast<int>(in.shape()[2]);
- int upper_dims = in.shape().total_size() / (cols * rows);
-
- fixed_point<T> coeff(norm_info.scale_coeff(), fixed_point_position);
- int radius_cols = norm_size / 2;
- // IN_MAP_1D and CROSS_MAP normalize over a single axis only
- int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
-
- if(type == NormType::CROSS_MAP)
- {
- // Remove also depth from upper dimensions since it is the axes we want
- // to use for normalization
- upper_dims /= depth;
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < rows; ++i)
- {
- for(int k = 0; k < cols; ++k)
- {
- for(int l = 0; l < depth; ++l)
- {
- fixed_point<T> accumulated_scale(0.f, fixed_point_position);
- for(int j = -radius_cols; j <= radius_cols; ++j)
- {
- const int z = l + j;
- if(z >= 0 && z < depth)
- {
- const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
- const fixed_point<T> fp_value(value, fixed_point_position, true);
- accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
- }
- }
- accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
- out[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw();
- }
- }
- }
- }
- }
- else
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < rows; ++i)
- {
- for(int k = 0; k < cols; ++k)
- {
- fixed_point<T> accumulated_scale(0.f, fixed_point_position);
- for(int j = -radius_rows; j <= radius_rows; ++j)
- {
- const int y = i + j;
- for(int l = -radius_cols; l <= radius_cols; ++l)
- {
- const int x = k + l;
- if((x >= 0 && y >= 0) && (x < cols && y < rows))
- {
- const T value = in[x + y * cols + r * cols * rows];
- const fixed_point<T> fp_value(value, fixed_point_position, true);
- accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
- }
- }
- }
- accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
- out[k + i * cols + r * cols * rows] = accumulated_scale.raw();
- }
- }
- }
- }
-
- if(norm_info.beta() == 1.f)
- {
- for(int i = 0; i < out.num_elements(); ++i)
- {
- fixed_point<T> res = div(fixed_point<T>(in[i], fixed_point_position, true), fixed_point<T>(out[i], fixed_point_position, true));
- out[i] = res.raw();
- }
- }
- else
- {
- const fixed_point<T> beta(norm_info.beta(), fixed_point_position);
- for(int i = 0; i < out.num_elements(); ++i)
- {
- fixed_point<T> res = pow(fixed_point<T>(out[i], fixed_point_position, true), beta);
- res = div(fixed_point<T>(in[i], fixed_point_position, true), res);
- out[i] = res.raw();
- }
- }
-}
-
// Pooling layer
template <typename T>
void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info, int fixed_point_position)