aboutsummaryrefslogtreecommitdiff
path: root/src/graph
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-01-10 15:33:28 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:43:42 +0000
commit652bde553f506caac4c563988dc9baf746f9584d (patch)
tree931d17bdfa70e9968cd434cfa53db8919bb534ea /src/graph
parentf72f9367d1eddee91f15a64952b99ee6b80b821d (diff)
downloadComputeLibrary-652bde553f506caac4c563988dc9baf746f9584d.tar.gz
COMPMID-674 - Create Google InceptionV3 example
Change-Id: I389e0d4104b7dde60b7cdd612a83f3328517e44c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/115804 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph')
-rw-r--r--src/graph/SubTensor.cpp20
-rw-r--r--src/graph/nodes/BranchLayer.cpp65
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp22
3 files changed, 31 insertions, 76 deletions
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
index 2edeb3b1d4..2e640dd93c 100644
--- a/src/graph/SubTensor.cpp
+++ b/src/graph/SubTensor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,21 +37,21 @@ using namespace arm_compute::graph;
namespace
{
template <typename SubTensorType, typename ParentTensorType>
-std::unique_ptr<arm_compute::ITensor> initialise_subtensor(arm_compute::ITensor *parent, TensorShape shape, Coordinates coords)
+std::unique_ptr<arm_compute::ITensor> initialise_subtensor(arm_compute::ITensor *parent, TensorShape shape, Coordinates coords, bool extend_parent)
{
auto ptensor = dynamic_cast<ParentTensorType *>(parent);
- auto subtensor = arm_compute::support::cpp14::make_unique<SubTensorType>(ptensor, shape, coords);
+ auto subtensor = arm_compute::support::cpp14::make_unique<SubTensorType>(ptensor, shape, coords, extend_parent);
return std::move(subtensor);
}
} // namespace
SubTensor::SubTensor()
- : _target(TargetHint::DONT_CARE), _tensor_shape(), _coords(), _parent(nullptr), _subtensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _tensor_shape(), _coords(), _parent(nullptr), _subtensor(nullptr), _extend_parent(false)
{
}
-SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords)
- : _target(TargetHint::DONT_CARE), _tensor_shape(tensor_shape), _coords(coords), _parent(nullptr), _subtensor(nullptr)
+SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords, bool extend_parent)
+ : _target(TargetHint::DONT_CARE), _tensor_shape(tensor_shape), _coords(coords), _parent(nullptr), _subtensor(nullptr), _extend_parent(extend_parent)
{
ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
_parent = parent.tensor();
@@ -60,8 +60,8 @@ SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coord
instantiate_subtensor();
}
-SubTensor::SubTensor(arm_compute::ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target)
- : _target(target), _tensor_shape(tensor_shape), _coords(coords), _parent(parent), _subtensor(nullptr)
+SubTensor::SubTensor(arm_compute::ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target, bool extend_parent)
+ : _target(target), _tensor_shape(tensor_shape), _coords(coords), _parent(parent), _subtensor(nullptr), _extend_parent(extend_parent)
{
ARM_COMPUTE_ERROR_ON(parent == nullptr);
instantiate_subtensor();
@@ -108,10 +108,10 @@ void SubTensor::instantiate_subtensor()
switch(_target)
{
case TargetHint::OPENCL:
- _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _tensor_shape, _coords);
+ _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _tensor_shape, _coords, _extend_parent);
break;
case TargetHint::NEON:
- _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _tensor_shape, _coords);
+ _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _tensor_shape, _coords, _extend_parent);
break;
default:
ARM_COMPUTE_ERROR("Invalid TargetHint");
diff --git a/src/graph/nodes/BranchLayer.cpp b/src/graph/nodes/BranchLayer.cpp
index 6352bfc1e3..7a20a565b8 100644
--- a/src/graph/nodes/BranchLayer.cpp
+++ b/src/graph/nodes/BranchLayer.cpp
@@ -37,46 +37,6 @@
using namespace arm_compute::graph;
-namespace
-{
-void depth_concatenate_output_info(ITensorInfo *info, ITensorInfo *sub_tensor_info)
-{
- ARM_COMPUTE_ERROR_ON(info == nullptr);
- ARM_COMPUTE_ERROR_ON(sub_tensor_info == nullptr);
-
- TensorShape info_shape = info->tensor_shape();
- const TensorShape &sub_tensor_info_shape = sub_tensor_info->tensor_shape();
-
- // Update parent info and valid region
- if(info_shape.total_size() == 0)
- {
- arm_compute::auto_init_if_empty(*info,
- sub_tensor_info->tensor_shape(),
- sub_tensor_info->num_channels(),
- sub_tensor_info->data_type(), sub_tensor_info->fixed_point_position(), sub_tensor_info->quantization_info());
- info->set_valid_region(sub_tensor_info->valid_region());
- }
- else
- {
- ARM_COMPUTE_ERROR_ON(info->num_channels() != sub_tensor_info->num_channels());
- ARM_COMPUTE_ERROR_ON(info->data_type() != sub_tensor_info->data_type());
- ARM_COMPUTE_ERROR_ON(info->fixed_point_position() != sub_tensor_info->fixed_point_position());
-
- // Concatenate depth
- ARM_COMPUTE_ERROR_ON(info_shape.x() != sub_tensor_info_shape.x());
- ARM_COMPUTE_ERROR_ON(info_shape.y() != sub_tensor_info_shape.y());
- info_shape.set(2, info_shape.z() + sub_tensor_info_shape.z());
- info->set_tensor_shape(info_shape);
-
- // Update valid region
- arm_compute::ValidRegion info_valid_region = info->valid_region();
- info_valid_region.shape.set(2, info_shape.z());
- arm_compute::ValidRegion updated_region = arm_compute::intersect_valid_regions(info_valid_region, sub_tensor_info->valid_region());
- info->set_valid_region(updated_region);
- }
-}
-} // namespace
-
/** Branch function */
class BranchFunction final : public arm_compute::IFunction
{
@@ -117,9 +77,8 @@ std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphConte
// Create branch function
auto func = arm_compute::support::cpp14::make_unique<BranchFunction>();
- // Track output SubTensorInfo and depth
- TensorInfo out_info;
- int depth = 0;
+ // Track output depth
+ int depth = 0;
// Constuct all sub-graphs given the input/output
for(auto &sg : _sub_graphs)
@@ -143,10 +102,13 @@ std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphConte
// Create output sub-tensor
if(!sg->has_output())
{
- ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(output) == nullptr);
- out = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(output),
- output->tensor()->info()->tensor_shape(),
- Coordinates(0, 0, depth));
+ ARM_COMPUTE_ERROR_ON((dynamic_cast<Tensor *>(output) == nullptr) && (dynamic_cast<SubTensor *>(output) == nullptr));
+
+ out = arm_compute::support::cpp14::make_unique<SubTensor>(output->tensor(),
+ TensorShape(),
+ Coordinates(0, 0, depth),
+ output->target(),
+ true);
out_sub_tensor = dynamic_cast<SubTensor *>(out.get());
}
@@ -161,17 +123,8 @@ std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphConte
{
ARM_COMPUTE_ERROR_ON(out_sub_tensor->tensor() == nullptr);
depth += out_sub_tensor->tensor()->info()->tensor_shape()[2];
- depth_concatenate_output_info(&out_info, out_sub_tensor->tensor()->info());
}
}
- // Auto-init output
- arm_compute::auto_init_if_empty(*output->tensor()->info(),
- out_info.tensor_shape(),
- out_info.num_channels(),
- out_info.data_type(),
- out_info.fixed_point_position(),
- out_info.quantization_info());
-
return std::move(func);
} \ No newline at end of file
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index 53d06ea75f..f292b893ed 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -106,13 +106,16 @@ std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_comp
const WeightsInfo &weights_info,
ConvolutionMethodHint conv_method)
{
- if(conv_method == ConvolutionMethodHint::GEMM)
+ if((conv_method == ConvolutionMethodHint::DIRECT)
+ && arm_compute::CLDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
{
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDirectConvolutionLayer");
+ return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
}
else
{
- return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
+ return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
}
}
@@ -122,13 +125,16 @@ std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_comput
const WeightsInfo &weights_info,
ConvolutionMethodHint conv_method)
{
- if(conv_method == ConvolutionMethodHint::GEMM)
+ if((conv_method == ConvolutionMethodHint::DIRECT)
+ && arm_compute::NEDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
{
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDirectConvolutionLayer");
+ return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
}
else
{
- return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
+ return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
}
}
} // namespace
@@ -258,12 +264,10 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolutio
std::unique_ptr<arm_compute::IFunction> func;
if(_target_hint == TargetHint::OPENCL)
{
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
func = instantiate<TargetHint::OPENCL>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
}
else
{
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
func = instantiate<TargetHint::NEON>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
}
return func;
@@ -325,12 +329,10 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_co
// Instantiate convolution function
if(_target_hint == TargetHint::OPENCL)
{
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}
else
{
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}