10 #include <arm_compute/core/Types.h> 11 #include <arm_compute/runtime/NEON/functions/NEPadLayer.h> 17 using namespace armcomputetensorutils;
27 std::vector<std::pair<unsigned int, unsigned int>> reversed_PadList(descriptor.
m_Parameters.
m_PadList.size());
31 std::begin(reversed_PadList));
33 arm_compute::PaddingList padList =
static_cast<arm_compute::PaddingList
>(reversed_PadList);
37 auto layer = std::make_unique<arm_compute::NEPadLayer>();
38 layer->configure(&input, &output, padList, pixelValue);
39 m_Layer.reset(layer.release());
52 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
53 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
55 std::vector<std::pair<unsigned int, unsigned int>> reversed_PadList(descriptor.
m_PadList.size());
57 std::reverse_copy(std::begin(descriptor.
m_PadList),
59 std::begin(reversed_PadList));
61 arm_compute::PaddingList padList =
static_cast<arm_compute::PaddingList
>(reversed_PadList);
63 return arm_compute::NEPadLayer::validate(&aclInputInfo, &aclOutputInfo, padList);
void Execute() const override
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
LayerDescriptor m_Parameters
A PadDescriptor for the PadLayer.
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
NeonPadWorkload(const PadQueueDescriptor &descriptor, const WorkloadInfo &info)
const PadQueueDescriptor m_Data
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
float m_PadValue
Optional value to use for padding, defaults to 0.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension. First is the number of values to add before the tensor in ...
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs