12 #include <boost/cast.hpp> 13 #include <boost/format.hpp> 26 if (prevSlot !=
nullptr)
29 prevSlot->Disconnect(*
this);
37 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
48 ValidateConnectionIndex(index);
49 return m_Connections[index];
54 ValidateConnectionIndex(index);
55 return m_Connections[index];
60 GetOutputHandler().SetTensorInfo(tensorInfo);
65 return GetOutputHandler().GetTensorInfo();
74 return GetOutputHandler().IsTensorInfoSet();
79 ARMNN_ASSERT_MSG(IsTensorInfoSet(),
"TensorInfo must be set in order to validate the shape.");
80 return shape == m_OutputHandler.GetTensorInfo().GetShape();
86 m_Connections.push_back(&destination);
94 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
96 if (it == m_Connections.end())
101 auto idx = std::distance(m_Connections.begin(), it);
102 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
104 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
109 while (GetNumConnections() > 0)
112 Disconnect(connection);
118 while (GetNumConnections() > 0)
121 "Cannot move connections once memory strategies have be established.");
124 Disconnect(connection);
125 destination.
Connect(connection);
151 for (
unsigned int i = 0; i < GetNumConnections(); i++)
158 void OutputSlot::ValidateConnectionIndex(
unsigned int index)
const 160 if (boost::numeric_cast<std::size_t>(index) >= m_Connections.size())
163 boost::str(boost::format(
"GetConnection: Invalid index %1% provided") % index));
174 m_TensorHandleFactoryId = id;
179 return m_TensorHandleFactoryId;
184 m_EdgeStrategies[connectionIndex] = strategy;
189 return m_EdgeStrategies[connectionIdx];
193 unsigned int numOutputSlots,
197 : m_OutputHandlers(numOutputSlots)
199 , m_LayerName(name ? name :
"")
206 m_InputSlots.reserve(numInputSlots);
207 for (
unsigned int i = 0; i < numInputSlots; ++i)
209 m_InputSlots.emplace_back(*
this, i);
212 m_OutputSlots.reserve(numOutputSlots);
213 for (
unsigned int i = 0; i < numOutputSlots; ++i)
220 unsigned int numOutputSlots,
233 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
234 dataCollector.
Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
242 outputHandler.CollectWorkloadOutputs(dataCollector);
248 const bool IsMemoryManaged)
275 handle.reset(
nullptr);
296 constexpr
LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
297 constexpr
LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
301 m_Priority = inputPrio;
305 m_Priority = outputPrio;
307 else if (m_Priority == 0)
316 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
333 if (parentPrio >= outputPrio)
338 m_Priority = parentPrio + 1U;
348 for (
unsigned int i=0; i<expectedConnections; ++i)
355 "Input connection #%1% must be connected " 356 "for %2% layer %3% %4%")
380 "Default implementation for InferOutputShapes can only be used for " 381 "layers with the same number of input and output slots. This doesn't " 382 "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
395 const std::string& layerName,
396 const unsigned int outputSlotIndex)
400 ConditionalThrowIfNotEqual<LayerValidationException>(
401 layerName +
": TensorShape set on OutputSlot[0] does not match the inferred shape.",
413 std::stringstream ss;
414 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
415 "] does not match the inferred shape at dimension index [";
416 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
426 info.GetQuantizationScale(),
427 info.GetQuantizationOffset());
436 ConditionalThrow<LayerValidationException>(
438 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
440 ConditionalThrow<LayerValidationException>(
442 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
449 std::string backendId = std::string(m_BackendId);
450 if(!(m_LayerName.compare(
"") == 0) && !m_LayerName.empty())
452 fn(
"LayerName",m_LayerName);
454 if(!(layerType.compare(
"") == 0) && !layerType.empty())
456 fn(
"LayerType",layerType);
458 if(!(backendId.compare(
"") == 0) && !backendId.empty())
460 fn(
"BackendID",backendId);
virtual void ReleaseConstantData()
bool ValidateTensorShape(const TensorShape &shape) const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::string AsString() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
LayerGuid GetOwningLayerGuid() const override
void OperateOnConstantTensors(Op op)
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Layer & GetOwningLayer() const
int Connect(InputSlot &destination)
unsigned int LayerPriority
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void Disconnect(InputSlot &slot)
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ResetPriority() const
#define ARMNN_ASSERT_MSG(COND, MSG)
char const * GetLayerTypeAsCString(LayerType type)
DataType GetDataType() const
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
LayerPriority GetPriority() const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
#define ARMNN_ASSERT(COND)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
virtual void ValidateTensorShapesFromInputs()=0
std::vector< OutputHandler > m_OutputHandlers
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
bool operator==(const OutputSlot &other) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
DataType GetDataType() const
LayerType GetType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
bool IsTensorInfoSet() const override
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const InputSlot * GetConnection(unsigned int index) const override
LayerGuid GetGuid() const final
Returns the unique id of the layer.
unsigned int CalculateIndexOnOwner() const override