This class allows to create and manipulate comprehensive artificial neural networks. More...
#include <opencv2/dnn/dnn.hpp>
Public Types | |
typedef DictValue | LayerId |
Container for strings and integers. | |
Public Member Functions | |
Net () | |
Default constructor. | |
~Net () | |
Destructor frees the net only if there aren't references to the net anymore. | |
int | addLayer (const String &name, const String &type, const int &dtype, LayerParams ¶ms) |
Adds new layer to the net. | |
int | addLayer (const String &name, const String &type, LayerParams ¶ms) |
int | addLayerToPrev (const String &name, const String &type, const int &dtype, LayerParams ¶ms) |
Adds new layer and connects its first input to the first output of previously added layer. | |
int | addLayerToPrev (const String &name, const String &type, LayerParams ¶ms) |
void | connect (int outLayerId, int outNum, int inpLayerId, int inpNum) |
Connects #outNum output of the first layer to #inNum input of the second layer. | |
void | connect (String outPin, String inpPin) |
Connects output of the first layer to input of the second layer. | |
String | dump () |
Dump net to String. | |
void | dumpToFile (const String &path) |
Dump net structure, hyperparameters, backend, target and fusion to dot file. | |
bool | empty () const |
void | enableFusion (bool fusion) |
Enables or disables layer fusion in the network. | |
void | enableWinograd (bool useWinograd) |
Enables or disables the Winograd compute branch. The Winograd compute branch can speed up 3x3 Convolution at a small loss of accuracy. | |
Mat | forward (const String &outputName=String()) |
Runs forward pass to compute output of layer with name outputName . | |
void | forward (OutputArrayOfArrays outputBlobs, const std::vector< String > &outBlobNames) |
Runs forward pass to compute outputs of layers listed in outBlobNames . | |
void | forward (OutputArrayOfArrays outputBlobs, const String &outputName=String()) |
Runs forward pass to compute output of layer with name outputName . | |
void | forward (std::vector< std::vector< Mat > > &outputBlobs, const std::vector< String > &outBlobNames) |
Runs forward pass to compute outputs of layers listed in outBlobNames . | |
AsyncArray | forwardAsync (const String &outputName=String()) |
Runs forward pass to compute output of layer with name outputName . | |
int64 | getFLOPS (const int layerId, const MatShape &netInputShape) const |
int64 | getFLOPS (const int layerId, const std::vector< MatShape > &netInputShapes) const |
int64 | getFLOPS (const MatShape &netInputShape) const |
int64 | getFLOPS (const std::vector< MatShape > &netInputShapes) const |
Computes FLOP for whole loaded model with specified input shapes. | |
Impl * | getImpl () const |
Impl & | getImplRef () const |
void | getInputDetails (std::vector< float > &scales, std::vector< int > &zeropoints) const |
Returns input scale and zeropoint for a quantized Net. | |
Ptr< Layer > | getLayer (const LayerId &layerId) const |
Ptr< Layer > | getLayer (const String &layerName) const |
Ptr< Layer > | getLayer (int layerId) const |
Returns pointer to layer with specified id or name which the network use. | |
int | getLayerId (const String &layer) const |
Converts string name of the layer to the integer identifier. | |
std::vector< Ptr< Layer > > | getLayerInputs (int layerId) const |
Returns pointers to input layers of specific layer. | |
std::vector< String > | getLayerNames () const |
int | getLayersCount (const String &layerType) const |
Returns count of layers of specified type. | |
void | getLayerShapes (const MatShape &netInputShape, const int layerId, std::vector< MatShape > &inLayerShapes, std::vector< MatShape > &outLayerShapes) const |
Returns input and output shapes for layer with specified id in loaded model; preliminary inferencing isn't necessary. | |
void | getLayerShapes (const std::vector< MatShape > &netInputShapes, const int layerId, std::vector< MatShape > &inLayerShapes, std::vector< MatShape > &outLayerShapes) const |
void | getLayersShapes (const MatShape &netInputShape, std::vector< int > &layersIds, std::vector< std::vector< MatShape > > &inLayersShapes, std::vector< std::vector< MatShape > > &outLayersShapes) const |
void | getLayersShapes (const std::vector< MatShape > &netInputShapes, std::vector< int > &layersIds, std::vector< std::vector< MatShape > > &inLayersShapes, std::vector< std::vector< MatShape > > &outLayersShapes) const |
Returns input and output shapes for all layers in loaded model; preliminary inferencing isn't necessary. | |
void | getLayerTypes (std::vector< String > &layersTypes) const |
Returns list of types for layer used in model. | |
void | getMemoryConsumption (const int layerId, const MatShape &netInputShape, size_t &weights, size_t &blobs) const |
void | getMemoryConsumption (const int layerId, const std::vector< MatShape > &netInputShapes, size_t &weights, size_t &blobs) const |
void | getMemoryConsumption (const MatShape &netInputShape, size_t &weights, size_t &blobs) const |
void | getMemoryConsumption (const MatShape &netInputShape, std::vector< int > &layerIds, std::vector< size_t > &weights, std::vector< size_t > &blobs) const |
void | getMemoryConsumption (const std::vector< MatShape > &netInputShapes, size_t &weights, size_t &blobs) const |
Computes bytes number which are required to store all weights and intermediate blobs for model. | |
void | getMemoryConsumption (const std::vector< MatShape > &netInputShapes, std::vector< int > &layerIds, std::vector< size_t > &weights, std::vector< size_t > &blobs) const |
Computes bytes number which are required to store all weights and intermediate blobs for each layer. | |
void | getOutputDetails (std::vector< float > &scales, std::vector< int > &zeropoints) const |
Returns output scale and zeropoint for a quantized Net. | |
Mat | getParam (const String &layerName, int numParam=0) const |
Mat | getParam (int layer, int numParam=0) const |
Returns parameter blob of the layer. | |
int64 | getPerfProfile (std::vector< double > &timings) |
Returns overall time for inference and timings (in ticks) for layers. | |
std::vector< int > | getUnconnectedOutLayers () const |
Returns indexes of layers with unconnected outputs. | |
std::vector< String > | getUnconnectedOutLayersNames () const |
Returns names of layers with unconnected outputs. | |
Net | quantize (InputArrayOfArrays calibData, int inputsDtype, int outputsDtype, bool perChannel=true) |
Returns a quantized Net from a floating-point Net. | |
int | registerOutput (const std::string &outputName, int layerId, int outputPort) |
Registers network output with name. | |
void | setHalideScheduler (const String &scheduler) |
Compile Halide layers. | |
void | setInput (InputArray blob, const String &name="", double scalefactor=1.0, const Scalar &mean=Scalar()) |
Sets the new input value for the network. | |
void | setInputShape (const String &inputName, const MatShape &shape) |
Specify shape of network input. | |
void | setInputsNames (const std::vector< String > &inputBlobNames) |
Sets outputs names of the network input pseudo layer. | |
void | setParam (const String &layerName, int numParam, const Mat &blob) |
void | setParam (int layer, int numParam, const Mat &blob) |
Sets the new value for the learned param of the layer. | |
void | setPreferableBackend (int backendId) |
Ask network to use specific computation backend where it supported. | |
void | setPreferableTarget (int targetId) |
Ask network to make computations on specific target device. | |
Static Public Member Functions | |
static Net | readFromModelOptimizer (const std::vector< uchar > &bufferModelConfig, const std::vector< uchar > &bufferWeights) |
Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR). | |
static Net | readFromModelOptimizer (const String &xml, const String &bin) |
Create a network from Intel's Model Optimizer intermediate representation (IR). | |
static Net | readFromModelOptimizer (const uchar *bufferModelConfigPtr, size_t bufferModelConfigSize, const uchar *bufferWeightsPtr, size_t bufferWeightsSize) |
Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR). | |
Protected Attributes | |
Ptr< Impl > | impl |
Friends | |
class | accessor::DnnNetAccessor |
Detailed Description
This class allows to create and manipulate comprehensive artificial neural networks.
Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances, and edges specify relationships between layers inputs and outputs.
Each network layer has unique integer id and unique string name inside its network. LayerId can store either layer name or layer id.
This class supports reference counting of its instances, i. e. copies point to the same instance.
- Examples
- samples/dnn/colorization.cpp, and samples/dnn/openpose.cpp.
Member Typedef Documentation
◆ LayerId
typedef DictValue cv::dnn::Net::LayerId |
Container for strings and integers.
- Deprecated:
- Use getLayerId() with int result.
Constructor & Destructor Documentation
◆ Net()
cv::dnn::Net::Net | ( | ) |
Default constructor.
◆ ~Net()
cv::dnn::Net::~Net | ( | ) |
Destructor frees the net only if there aren't references to the net anymore.
Member Function Documentation
◆ addLayer() [1/2]
int cv::dnn::Net::addLayer | ( | const String & | name, |
const String & | type, | ||
const int & | dtype, | ||
LayerParams & | params | ||
) |
Adds new layer to the net.
- Parameters
-
name unique name of the adding layer. type typename of the adding layer (type must be registered in LayerRegister). dtype datatype of output blobs. params parameters which will be used to initialize the creating layer.
- Returns
- unique identifier of created layer, or -1 if a failure will happen.
◆ addLayer() [2/2]
int cv::dnn::Net::addLayer | ( | const String & | name, |
const String & | type, | ||
LayerParams & | params | ||
) |
◆ addLayerToPrev() [1/2]
int cv::dnn::Net::addLayerToPrev | ( | const String & | name, |
const String & | type, | ||
const int & | dtype, | ||
LayerParams & | params | ||
) |
Adds new layer and connects its first input to the first output of previously added layer.
- See also
- addLayer()
◆ addLayerToPrev() [2/2]
int cv::dnn::Net::addLayerToPrev | ( | const String & | name, |
const String & | type, | ||
LayerParams & | params | ||
) |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ connect() [1/2]
void cv::dnn::Net::connect | ( | int | outLayerId, |
int | outNum, | ||
int | inpLayerId, | ||
int | inpNum | ||
) |
Connects #outNum
output of the first layer to #inNum
input of the second layer.
- Parameters
-
outLayerId identifier of the first layer outNum number of the first layer output inpLayerId identifier of the second layer inpNum number of the second layer input
◆ connect() [2/2]
Connects output of the first layer to input of the second layer.
- Parameters
-
outPin descriptor of the first layer output. inpPin descriptor of the second layer input.
Descriptors have the following template <layer_name>[.input_number]
:
- the first part of the template
layer_name
is string name of the added layer. If this part is empty then the network input pseudo layer will be used; the second optional part of the template
input_number
is either number of the layer input, either label one. If this part is omitted then the first layer input will be used.- See also
- setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
◆ dump()
String cv::dnn::Net::dump | ( | ) |
Dump net to String.
- Returns
- String with structure, hyperparameters, backend, target and fusion Call method after setInput(). To see correct backend, target and fusion run after forward().
◆ dumpToFile()
void cv::dnn::Net::dumpToFile | ( | const String & | path | ) |
Dump net structure, hyperparameters, backend, target and fusion to dot file.
- Parameters
-
path path to output file with .dot extension
- See also
- dump()
◆ empty()
bool cv::dnn::Net::empty | ( | ) | const |
Returns true if there are no layers in the network.
◆ enableFusion()
void cv::dnn::Net::enableFusion | ( | bool | fusion | ) |
Enables or disables layer fusion in the network.
- Parameters
-
fusion true to enable the fusion, false to disable. The fusion is enabled by default.
◆ enableWinograd()
void cv::dnn::Net::enableWinograd | ( | bool | useWinograd | ) |
Enables or disables the Winograd compute branch. The Winograd compute branch can speed up 3x3 Convolution at a small loss of accuracy.
- Parameters
-
useWinograd true to enable the Winograd compute branch. The default is true.
◆ forward() [1/4]
Runs forward pass to compute output of layer with name outputName
.
- Parameters
-
outputName name for layer which output is needed to get
- Returns
- blob for first output of specified layer.
By default runs forward pass for the whole network.
- Examples
- samples/dnn/colorization.cpp, and samples/dnn/openpose.cpp.
◆ forward() [2/4]
void cv::dnn::Net::forward | ( | OutputArrayOfArrays | outputBlobs, |
const std::vector< String > & | outBlobNames | ||
) |
Runs forward pass to compute outputs of layers listed in outBlobNames
.
- Parameters
-
outputBlobs contains blobs for first outputs of specified layers. outBlobNames names for layers which outputs are needed to get
◆ forward() [3/4]
void cv::dnn::Net::forward | ( | OutputArrayOfArrays | outputBlobs, |
const String & | outputName = String() |
||
) |
Runs forward pass to compute output of layer with name outputName
.
- Parameters
-
outputBlobs contains all output blobs for specified layer. outputName name for layer which output is needed to get
If outputName
is empty, runs forward pass for the whole network.
◆ forward() [4/4]
void cv::dnn::Net::forward | ( | std::vector< std::vector< Mat > > & | outputBlobs, |
const std::vector< String > & | outBlobNames | ||
) |
Runs forward pass to compute outputs of layers listed in outBlobNames
.
- Parameters
-
outputBlobs contains all output blobs for each layer specified in outBlobNames
.outBlobNames names for layers which outputs are needed to get
◆ forwardAsync()
AsyncArray cv::dnn::Net::forwardAsync | ( | const String & | outputName = String() | ) |
Runs forward pass to compute output of layer with name outputName
.
- Parameters
-
outputName name for layer which output is needed to get
By default runs forward pass for the whole network.
This is an asynchronous version of forward(const String&). dnn::DNN_BACKEND_INFERENCE_ENGINE backend is required.
◆ getFLOPS() [1/4]
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getFLOPS() [2/4]
int64 cv::dnn::Net::getFLOPS | ( | const int | layerId, |
const std::vector< MatShape > & | netInputShapes | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getFLOPS() [3/4]
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getFLOPS() [4/4]
Computes FLOP for whole loaded model with specified input shapes.
- Parameters
-
netInputShapes vector of shapes for all net inputs.
- Returns
- computed FLOP.
◆ getImpl()
|
inline |
◆ getImplRef()
|
inline |
◆ getInputDetails()
void cv::dnn::Net::getInputDetails | ( | std::vector< float > & | scales, |
std::vector< int > & | zeropoints | ||
) | const |
Returns input scale and zeropoint for a quantized Net.
- Parameters
-
scales output parameter for returning input scales. zeropoints output parameter for returning input zeropoints.
◆ getLayer() [1/3]
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
- Deprecated:
- to be removed
◆ getLayer() [2/3]
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
- Deprecated:
- Use int getLayerId(const String &layer)
◆ getLayer() [3/3]
Returns pointer to layer with specified id or name which the network use.
- Examples
- samples/dnn/colorization.cpp.
◆ getLayerId()
int cv::dnn::Net::getLayerId | ( | const String & | layer | ) | const |
Converts string name of the layer to the integer identifier.
- Returns
- id of the layer, or -1 if the layer wasn't found.
◆ getLayerInputs()
Returns pointers to input layers of specific layer.
◆ getLayerNames()
std::vector< String > cv::dnn::Net::getLayerNames | ( | ) | const |
◆ getLayersCount()
int cv::dnn::Net::getLayersCount | ( | const String & | layerType | ) | const |
Returns count of layers of specified type.
- Parameters
-
layerType type.
- Returns
- count of layers
◆ getLayerShapes() [1/2]
void cv::dnn::Net::getLayerShapes | ( | const MatShape & | netInputShape, |
const int | layerId, | ||
std::vector< MatShape > & | inLayerShapes, | ||
std::vector< MatShape > & | outLayerShapes | ||
) | const |
Returns input and output shapes for layer with specified id in loaded model; preliminary inferencing isn't necessary.
- Parameters
-
netInputShape shape input blob in net input layer. layerId id for layer. inLayerShapes output parameter for input layers shapes; order is the same as in layersIds outLayerShapes output parameter for output layers shapes; order is the same as in layersIds
◆ getLayerShapes() [2/2]
void cv::dnn::Net::getLayerShapes | ( | const std::vector< MatShape > & | netInputShapes, |
const int | layerId, | ||
std::vector< MatShape > & | inLayerShapes, | ||
std::vector< MatShape > & | outLayerShapes | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getLayersShapes() [1/2]
void cv::dnn::Net::getLayersShapes | ( | const MatShape & | netInputShape, |
std::vector< int > & | layersIds, | ||
std::vector< std::vector< MatShape > > & | inLayersShapes, | ||
std::vector< std::vector< MatShape > > & | outLayersShapes | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getLayersShapes() [2/2]
void cv::dnn::Net::getLayersShapes | ( | const std::vector< MatShape > & | netInputShapes, |
std::vector< int > & | layersIds, | ||
std::vector< std::vector< MatShape > > & | inLayersShapes, | ||
std::vector< std::vector< MatShape > > & | outLayersShapes | ||
) | const |
Returns input and output shapes for all layers in loaded model; preliminary inferencing isn't necessary.
- Parameters
-
netInputShapes shapes for all input blobs in net input layer. layersIds output parameter for layer IDs. inLayersShapes output parameter for input layers shapes; order is the same as in layersIds outLayersShapes output parameter for output layers shapes; order is the same as in layersIds
◆ getLayerTypes()
void cv::dnn::Net::getLayerTypes | ( | std::vector< String > & | layersTypes | ) | const |
Returns list of types for layer used in model.
- Parameters
-
layersTypes output parameter for returning types.
◆ getMemoryConsumption() [1/6]
void cv::dnn::Net::getMemoryConsumption | ( | const int | layerId, |
const MatShape & | netInputShape, | ||
size_t & | weights, | ||
size_t & | blobs | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getMemoryConsumption() [2/6]
void cv::dnn::Net::getMemoryConsumption | ( | const int | layerId, |
const std::vector< MatShape > & | netInputShapes, | ||
size_t & | weights, | ||
size_t & | blobs | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getMemoryConsumption() [3/6]
void cv::dnn::Net::getMemoryConsumption | ( | const MatShape & | netInputShape, |
size_t & | weights, | ||
size_t & | blobs | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getMemoryConsumption() [4/6]
void cv::dnn::Net::getMemoryConsumption | ( | const MatShape & | netInputShape, |
std::vector< int > & | layerIds, | ||
std::vector< size_t > & | weights, | ||
std::vector< size_t > & | blobs | ||
) | const |
This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.
◆ getMemoryConsumption() [5/6]
void cv::dnn::Net::getMemoryConsumption | ( | const std::vector< MatShape > & | netInputShapes, |
size_t & | weights, | ||
size_t & | blobs | ||
) | const |
Computes bytes number which are required to store all weights and intermediate blobs for model.
- Parameters
-
netInputShapes vector of shapes for all net inputs. weights output parameter to store resulting bytes for weights. blobs output parameter to store resulting bytes for intermediate blobs.
◆ getMemoryConsumption() [6/6]
void cv::dnn::Net::getMemoryConsumption | ( | const std::vector< MatShape > & | netInputShapes, |
std::vector< int > & | layerIds, | ||
std::vector< size_t > & | weights, | ||
std::vector< size_t > & | blobs | ||
) | const |
Computes bytes number which are required to store all weights and intermediate blobs for each layer.
- Parameters
-
netInputShapes vector of shapes for all net inputs. layerIds output vector to save layer IDs. weights output parameter to store resulting bytes for weights. blobs output parameter to store resulting bytes for intermediate blobs.
◆ getOutputDetails()
void cv::dnn::Net::getOutputDetails | ( | std::vector< float > & | scales, |
std::vector< int > & | zeropoints | ||
) | const |
Returns output scale and zeropoint for a quantized Net.
- Parameters
-
scales output parameter for returning output scales. zeropoints output parameter for returning output zeropoints.
◆ getParam() [1/2]
◆ getParam() [2/2]
Mat cv::dnn::Net::getParam | ( | int | layer, |
int | numParam = 0 |
||
) | const |
Returns parameter blob of the layer.
- Parameters
-
layer name or id of the layer. numParam index of the layer parameter in the Layer::blobs array.
- See also
- Layer::blobs
◆ getPerfProfile()
int64 cv::dnn::Net::getPerfProfile | ( | std::vector< double > & | timings | ) |
Returns overall time for inference and timings (in ticks) for layers.
Indexes in returned vector correspond to layers ids. Some layers can be fused with others, in this case zero ticks count will be return for that skipped layers. Supported by DNN_BACKEND_OPENCV on DNN_TARGET_CPU only.
- Parameters
-
[out] timings vector for tick timings for all layers.
- Returns
- overall ticks for model inference.
◆ getUnconnectedOutLayers()
std::vector< int > cv::dnn::Net::getUnconnectedOutLayers | ( | ) | const |
Returns indexes of layers with unconnected outputs.
FIXIT: Rework API to registerOutput() approach, deprecate this call
◆ getUnconnectedOutLayersNames()
std::vector< String > cv::dnn::Net::getUnconnectedOutLayersNames | ( | ) | const |
Returns names of layers with unconnected outputs.
FIXIT: Rework API to registerOutput() approach, deprecate this call
◆ quantize()
Net cv::dnn::Net::quantize | ( | InputArrayOfArrays | calibData, |
int | inputsDtype, | ||
int | outputsDtype, | ||
bool | perChannel = true |
||
) |
Returns a quantized Net from a floating-point Net.
- Parameters
-
calibData Calibration data to compute the quantization parameters. inputsDtype Datatype of quantized net's inputs. Can be CV_32F or CV_8S. outputsDtype Datatype of quantized net's outputs. Can be CV_32F or CV_8S. perChannel Quantization granularity of quantized Net. The default is true, that means quantize model in per-channel way (channel-wise). Set it false to quantize model in per-tensor way (or tensor-wise).
◆ readFromModelOptimizer() [1/3]
◆ readFromModelOptimizer() [2/3]
◆ readFromModelOptimizer() [3/3]
|
static |
Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
- Parameters
-
[in] bufferModelConfigPtr buffer pointer of model's configuration. [in] bufferModelConfigSize buffer size of model's configuration. [in] bufferWeightsPtr buffer pointer of model's trained weights. [in] bufferWeightsSize buffer size of model's trained weights.
- Returns
- Net object.
◆ registerOutput()
int cv::dnn::Net::registerOutput | ( | const std::string & | outputName, |
int | layerId, | ||
int | outputPort | ||
) |
Registers network output with name.
Function may create additional 'Identity' layer.
- Parameters
-
outputName identifier of the output layerId identifier of the second layer outputPort number of the second layer input
- Returns
- index of bound layer (the same as layerId or newly created)
◆ setHalideScheduler()
void cv::dnn::Net::setHalideScheduler | ( | const String & | scheduler | ) |
Compile Halide layers.
- Parameters
-
[in] scheduler Path to YAML file with scheduling directives.
- See also
- setPreferableBackend
Schedule layers that support Halide backend. Then compile them for specific target. For layers that not represented in scheduling file or if no manual scheduling used at all, automatic scheduling will be applied.
◆ setInput()
void cv::dnn::Net::setInput | ( | InputArray | blob, |
const String & | name = "" , |
||
double | scalefactor = 1.0 , |
||
const Scalar & | mean = Scalar() |
||
) |
Sets the new input value for the network.
- Parameters
-
blob A new blob. Should have CV_32F or CV_8U depth. name A name of input layer. scalefactor An optional normalization scale. mean An optional mean subtraction values.
- See also
- connect(String, String) to know format of the descriptor.
If scale or mean values are specified, a final input blob is computed as:
\[input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\]
- Examples
- samples/dnn/colorization.cpp, and samples/dnn/openpose.cpp.
◆ setInputShape()
Specify shape of network input.
◆ setInputsNames()
void cv::dnn::Net::setInputsNames | ( | const std::vector< String > & | inputBlobNames | ) |
Sets outputs names of the network input pseudo layer.
Each net always has special own the network input pseudo layer with id=0. This layer stores the user blobs only and don't make any computations. In fact, this layer provides the only way to pass user data into the network. As any other layer, this layer can label its outputs and this function provides an easy way to do this.
◆ setParam() [1/2]
◆ setParam() [2/2]
void cv::dnn::Net::setParam | ( | int | layer, |
int | numParam, | ||
const Mat & | blob | ||
) |
Sets the new value for the learned param of the layer.
- Parameters
-
layer name or id of the layer. numParam index of the layer parameter in the Layer::blobs array. blob the new value.
- See also
- Layer::blobs
- Note
- If shape of the new blob differs from the previous shape, then the following forward pass may fail.
◆ setPreferableBackend()
void cv::dnn::Net::setPreferableBackend | ( | int | backendId | ) |
Ask network to use specific computation backend where it supported.
- Parameters
-
[in] backendId backend identifier.
- See also
- Backend
◆ setPreferableTarget()
void cv::dnn::Net::setPreferableTarget | ( | int | targetId | ) |
Ask network to make computations on specific target device.
- Parameters
-
[in] targetId target identifier.
- See also
- Target
List of supported combinations backend / target:
DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE | DNN_BACKEND_CUDA | |
---|---|---|---|---|
DNN_TARGET_CPU | + | + | + | |
DNN_TARGET_OPENCL | + | + | + | |
DNN_TARGET_OPENCL_FP16 | + | + | ||
DNN_TARGET_MYRIAD | + | |||
DNN_TARGET_FPGA | + | |||
DNN_TARGET_CUDA | + | |||
DNN_TARGET_CUDA_FP16 | + | |||
DNN_TARGET_HDDL | + |
- Examples
- samples/dnn/colorization.cpp.
Friends And Related Function Documentation
◆ accessor::DnnNetAccessor
|
friend |
Member Data Documentation
◆ impl
|
protected |
The documentation for this class was generated from the following file:
- opencv2/dnn/dnn.hpp