cv::dnn Namespace Reference


名称空间

  details
 

struct   _Range
 
class   AbsLayer
 
class   ActivationLayer
 
class   BackendNode
  Derivatives of this class encapsulates functions of certain backends. 更多...
 
class   BackendWrapper
  Derivatives of this class wraps cv::Mat for different backends and targets. 更多...
 
class   BaseConvolutionLayer
 
class   BatchNormLayer
 
class   BlankLayer
 
class   BNLLLayer
 
class   ChannelsPReLULayer
 
class   ClassificationModel
  This class represents high-level API for classification models. 更多...
 
class   ConcatLayer
 
class   ConstLayer
 
class   ConvolutionLayer
 
class   CropAndResizeLayer
 
class   CropLayer
 
class   DeconvolutionLayer
 
class   DetectionModel
  This class represents high-level API for object detection networks. 更多...
 
class   DetectionOutputLayer
 
class   Dict
  This class implements name-value dictionary, values are instances of DictValue . 更多...
 
struct   DictValue
  This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64. 更多...
 
class   EltwiseLayer
  Element wise operation on inputs. 更多...
 
class   ELULayer
 
class   FlattenLayer
 
class   InnerProductLayer
 
class   InterpLayer
  Bilinear resize layer from https://github.com/cdmh/deeplab-public-ver2 . 更多...
 
class   KeypointsModel
  This class represents high-level API for keypoints models. 更多...
 
class  
  This interface class allows to build new Layers - are building blocks of networks. 更多...
 
class   LayerFactory
  Layer factory allows to create instances of registered layers. 更多...
 
class   LayerParams
  This class provides all data needed to initialize layer. 更多...
 
class   LRNLayer
 
class   LSTMLayer
  LSTM recurrent layer. 更多...
 
class   MaxUnpoolLayer
 
class   MishLayer
 
class   Model
  This class is presented high-level API for neural networks. 更多...
 
class   MVNLayer
 
class   Net
  This class allows to create and manipulate comprehensive artificial neural networks. 更多...
 
class   NormalizeBBoxLayer
  \( L_p \) - normalization layer. 更多...
 
class   PaddingLayer
  Adds extra values for specific axes. 更多...
 
class   PermuteLayer
 
class   PoolingLayer
 
class   PowerLayer
 
class   PriorBoxLayer
 
class   ProposalLayer
 
class   RegionLayer
 
class   ReLU6Layer
 
class   ReLULayer
 
class   ReorgLayer
 
class   ReshapeLayer
 
class   ResizeLayer
  Resize input 4-dimensional blob by nearest neighbor or bilinear strategy. 更多...
 
class   RNNLayer
  Classical recurrent layer. 更多...
 
class   ScaleLayer
 
class   SegmentationModel
  This class represents high-level API for segmentation models. 更多...
 
class   ShiftLayer
 
class   ShuffleChannelLayer
 
class   SigmoidLayer
 
class   SliceLayer
 
class   SoftmaxLayer
 
class   SplitLayer
 
class   SwishLayer
 
class   TanHLayer
 

Typedefs

typedef std::vector< int >  MatShape
 

枚举

enum   Backend {
   DNN_BACKEND_DEFAULT = 0,
   DNN_BACKEND_HALIDE ,
   DNN_BACKEND_INFERENCE_ENGINE ,
   DNN_BACKEND_OPENCV ,
   DNN_BACKEND_VKCOM ,
   DNN_BACKEND_CUDA
}
  Enum of computation backends supported by layers. 更多...
 
enum   Target {
   DNN_TARGET_CPU ,
   DNN_TARGET_OPENCL ,
   DNN_TARGET_OPENCL_FP16 ,
   DNN_TARGET_MYRIAD ,
   DNN_TARGET_VULKAN ,
   DNN_TARGET_FPGA ,
   DNN_TARGET_CUDA ,
   DNN_TARGET_CUDA_FP16
}
  Enum of target devices for computations. 更多...
 

函数

Mat   blobFromImage ( InputArray image, double scalefactor=1.0, const Size &size= Size (), const Scalar & mean = Scalar (), bool swapRB=false, bool crop=false, int ddepth= CV_32F )
  Creates 4-dimensional blob from image. Optionally resizes and crops image from center, subtract mean values, scales values by scalefactor , swap Blue and Red channels. 更多...
 
void  blobFromImage ( InputArray image, OutputArray blob, double scalefactor=1.0, const Size &size= Size (), const Scalar & mean = Scalar (), bool swapRB=false, bool crop=false, int ddepth= CV_32F )
  Creates 4-dimensional blob from image. 更多...
 
Mat   blobFromImages ( InputArrayOfArrays images, double scalefactor=1.0, Size size= Size (), const Scalar & mean = Scalar (), bool swapRB=false, bool crop=false, int ddepth= CV_32F )
  Creates 4-dimensional blob from series of images. Optionally resizes and crops images from center, subtract mean values, scales values by scalefactor , swap Blue and Red channels. 更多...
 
void  blobFromImages ( InputArrayOfArrays images, OutputArray blob, double scalefactor=1.0, Size size= Size (), const Scalar & mean = Scalar (), bool swapRB=false, bool crop=false, int ddepth= CV_32F )
  Creates 4-dimensional blob from series of images. 更多...
 
int  clamp (int ax, int dims)
 
int  clamp (int ax, const MatShape & shape )
 
Range   clamp (const Range &r, int axisSize)
 
static MatShape   concat (const MatShape &a, const MatShape &b)
 
std::vector< std::pair< Backend , Target > >  getAvailableBackends ()
 
std::vector< Target getAvailableTargets ( Backend be)
 
cv::String   getInferenceEngineBackendType ()
  Returns Inference Engine internal backend API. 更多...
 
cv::String   getInferenceEngineVPUType ()
  Returns Inference Engine VPU type. 更多...
 
static Mat   getPlane (const Mat &m, int n, int cn)
 
void  imagesFromBlob (const cv::Mat &blob_, OutputArrayOfArrays images_)
  Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure (std::vector<cv::Mat>). 更多...
 
void  NMSBoxes (const std::vector< Rect > &bboxes, const std::vector< float > &scores, const float score_threshold, const float nms_threshold, std::vector< int > &indices, const float eta=1.f, const int top_k=0)
  Performs non maximum suppression given boxes and corresponding scores. 更多...
 
void  NMSBoxes (const std::vector< Rect2d > &bboxes, const std::vector< float > &scores, const float score_threshold, const float nms_threshold, std::vector< int > &indices, const float eta=1.f, const int top_k=0)
 
void  NMSBoxes (const std::vector< RotatedRect > &bboxes, const std::vector< float > &scores, const float score_threshold, const float nms_threshold, std::vector< int > &indices, const float eta=1.f, const int top_k=0)
 
static std::ostream &  operator<< (std::ostream &out, const MatShape & shape )
 
static void  print (const MatShape & shape , const 字符串 &name="")
 
Net   readNet (const 字符串 &model, const 字符串 &config="", const 字符串 &framework="")
  Read deep learning network represented in one of the supported formats. 更多...
 
Net   readNet (const 字符串 &framework, const std::vector< uchar > &bufferModel, const std::vector< uchar > &bufferConfig=std::vector< uchar >())
  Read deep learning network represented in one of the supported formats. 更多...
 
Net   readNetFromCaffe (const 字符串 &prototxt, const 字符串 &caffeModel= 字符串 ())
  Reads a network model stored in Caffe framework's format. 更多...
 
Net   readNetFromCaffe (const std::vector< uchar > &bufferProto, const std::vector< uchar > &bufferModel=std::vector< uchar >())
  Reads a network model stored in Caffe model in memory. 更多...
 
Net   readNetFromCaffe (const char *bufferProto, size_t lenProto, const char *bufferModel=NULL, size_t lenModel=0)
  Reads a network model stored in Caffe model in memory. 更多...
 
Net   readNetFromDarknet (const 字符串 &cfgFile, const 字符串 &darknetModel= 字符串 ())
  Reads a network model stored in Darknet model files. 更多...
 
Net   readNetFromDarknet (const std::vector< uchar > &bufferCfg, const std::vector< uchar > &bufferModel=std::vector< uchar >())
  Reads a network model stored in Darknet model files. 更多...
 
Net   readNetFromDarknet (const char *bufferCfg, size_t lenCfg, const char *bufferModel=NULL, size_t lenModel=0)
  Reads a network model stored in Darknet model files. 更多...
 
Net   readNetFromModelOptimizer (const 字符串 &xml, const 字符串 &bin)
  Load a network from Intel's Model Optimizer intermediate representation. 更多...
 
Net   readNetFromModelOptimizer (const std::vector< uchar > &bufferModelConfig, const std::vector< uchar > &bufferWeights)
  Load a network from Intel's Model Optimizer intermediate representation. 更多...
 
Net   readNetFromModelOptimizer (const uchar *bufferModelConfigPtr, size_t bufferModelConfigSize, const uchar *bufferWeightsPtr, size_t bufferWeightsSize)
  Load a network from Intel's Model Optimizer intermediate representation. 更多...
 
Net   readNetFromONNX (const 字符串 &onnxFile)
  Reads a network model ONNX . 更多...
 
Net   readNetFromONNX (const char *buffer, size_t sizeBuffer)
  Reads a network model from ONNX in-memory buffer. 更多...
 
Net   readNetFromONNX (const std::vector< uchar > &buffer)
  Reads a network model from ONNX in-memory buffer. 更多...
 
Net   readNetFromTensorflow (const 字符串 &model, const 字符串 &config= 字符串 ())
  Reads a network model stored in TensorFlow framework's format. 更多...
 
Net   readNetFromTensorflow (const std::vector< uchar > &bufferModel, const std::vector< uchar > &bufferConfig=std::vector< uchar >())
  Reads a network model stored in TensorFlow framework's format. 更多...
 
Net   readNetFromTensorflow (const char *bufferModel, size_t lenModel, const char *bufferConfig=NULL, size_t lenConfig=0)
  Reads a network model stored in TensorFlow framework's format. 更多...
 
Net   readNetFromTorch (const 字符串 &model, bool isBinary=true, bool evaluate=true)
  Reads a network model stored in Torch7 framework's format. 更多...
 
Mat   readTensorFromONNX (const 字符串 &path)
  Creates blob from .pb file. 更多...
 
Mat   readTorchBlob (const 字符串 &filename, bool isBinary=true)
  Loads blob which was serialized as torch.Tensor object of Torch7 framework. 更多...
 
void  resetMyriadDevice ()
  Release a Myriad device (binded by OpenCV). 更多...
 
cv::String   setInferenceEngineBackendType (const cv::String &newBackendType)
  Specify Inference Engine internal backend API. 更多...
 
static MatShape   shape (const int *dims, const int n)
 
static MatShape   shape (const Mat &mat)
 
static MatShape   shape (const MatSize &sz)
 
static MatShape   shape (const UMat &mat)
 
static MatShape   shape (int a0, int a1=-1, int a2=-1, int a3=-1)
 
void  shrinkCaffeModel (const 字符串 &src, const 字符串 &dst, const std::vector< 字符串 > &layersTypes=std::vector< 字符串 >())
  Convert all weights of Caffe network to half precision floating point. 更多...
 
static Mat   slice (const Mat &m, const _Range &r0)
 
static Mat   slice (const Mat &m, const _Range &r0, const _Range &r1)
 
static Mat   slice (const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2)
 
static Mat   slice (const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, const _Range &r3)
 
static std::string  toString (const MatShape & shape , const 字符串 &name="")
 
static int  total (const MatShape & shape , int start=-1, int end=-1)
 
void  writeTextGraph (const 字符串 &model, const 字符串 &output)
  Create a text representation for a binary network stored in protocol buffer format. 更多...
 

函数文档编制

◆  clamp() [1/3]

int cv::dnn::clamp ( int  ax ,
int  dims  
)
inline

◆  clamp() [2/3]

int cv::dnn::clamp ( int  ax ,
const MatShape shape  
)
inline

◆  clamp() [3/3]

Range cv::dnn::clamp ( const Range r ,
int  axisSize  
)
inline

◆  concat()

static MatShape cv::dnn::concat ( const MatShape a ,
const MatShape b  
)
inline static

◆  getInferenceEngineBackendType()

cv::String cv::dnn::getInferenceEngineBackendType ( )

Returns Inference Engine internal backend API.

See values of CV_DNN_BACKEND_INFERENCE_ENGINE_* macros.

Default value is controlled through OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE runtime parameter (environment variable).

◆  getInferenceEngineVPUType()

cv::String cv::dnn::getInferenceEngineVPUType ( )

Returns Inference Engine VPU type.

See values of CV_DNN_INFERENCE_ENGINE_VPU_TYPE_* macros.

◆  getPlane()

static Mat cv::dnn::getPlane ( const Mat m ,
int  n ,
int  cn  
)
inline static

◆  operator <<()

static std::ostream& cv::dnn::operator<< ( std::ostream &  out ,
const MatShape shape  
)
inline static

◆  print()

static void cv::dnn::print ( const MatShape shape ,
const 字符串 name = ""  
)
inline static

◆  resetMyriadDevice()

void cv::dnn::resetMyriadDevice ( )

Release a Myriad device (binded by OpenCV).

Single Myriad device cannot be shared across multiple processes which uses Inference Engine's Myriad plugin.

◆  setInferenceEngineBackendType()

cv::String cv::dnn::setInferenceEngineBackendType ( const cv::String newBackendType )

Specify Inference Engine internal backend API.

See values of CV_DNN_BACKEND_INFERENCE_ENGINE_* macros.

返回
previous value of internal backend API

◆  shape() [1/5]

static MatShape cv::dnn::shape ( const int *  dims ,
const int  n  
)
inline static

◆  shape() [2/5]

static MatShape cv::dnn::shape ( const Mat mat )
inline static

◆  shape() [3/5]

static MatShape cv::dnn::shape ( const MatSize sz )
inline static

◆  shape() [4/5]

static MatShape cv::dnn::shape ( const UMat mat )
inline static

◆  shape() [5/5]

static MatShape cv::dnn::shape ( int  a0 ,
int  a1 = -1 ,
int  a2 = -1 ,
int  a3 = -1  
)
inline static

◆  slice() [1/4]

static Mat cv::dnn::slice ( const Mat m ,
const _Range r0  
)
inline static

◆  slice() [2/4]

static Mat cv::dnn::slice ( const Mat m ,
const _Range r0 ,
const _Range r1  
)
inline static

◆  slice() [3/4]

static Mat cv::dnn::slice ( const Mat m ,
const _Range r0 ,
const _Range r1 ,
const _Range r2  
)
inline static

◆  slice() [4/4]

static Mat cv::dnn::slice ( const Mat m ,
const _Range r0 ,
const _Range r1 ,
const _Range r2 ,
const _Range r3  
)
inline static

◆  toString()

static std::string cv::dnn::toString ( const MatShape shape ,
const 字符串 name = ""  
)
inline static

◆  total()

static int cv::dnn::total ( const MatShape shape ,
int  start = -1 ,
int  end = -1  
)
inline static