cv::cuda::HostMem Class Reference
CUDA 加速的计算机视觉 » Core part » Data Structures

Class with reference counting wrapping special memory type allocation functions from CUDA. 更多...

#include <opencv2/core/cuda.hpp>

公共类型

enum AllocType {
PAGE_LOCKED = 1,
SHARED = 2,
WRITE_COMBINED = 4
}

Public Member Functions

HostMem ( HostMem::AllocType alloc_type =HostMem::AllocType::PAGE_LOCKED)
HostMem (const HostMem &m)
HostMem (int rows , int cols , int type , HostMem::AllocType alloc_type =HostMem::AllocType::PAGE_LOCKED)
HostMem ( Size size , int type , HostMem::AllocType alloc_type =HostMem::AllocType::PAGE_LOCKED)
HostMem ( InputArray arr, HostMem::AllocType alloc_type =HostMem::AllocType::PAGE_LOCKED)
creates from host memory with coping data 更多...
~HostMem ()
int channels () const
HostMem clone () const
returns deep copy of the matrix, i.e. the data is copied 更多...
void create (int rows , int cols , int type )
allocates new matrix data unless the matrix already has specified size and type. 更多...
void create ( Size size , int type )
GpuMat createGpuMatHeader () const
Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it. 更多...
Mat createMatHeader () const
returns matrix header with disabled reference counting for HostMem data. 更多...
int depth () const
size_t elemSize () const
size_t elemSize1 () const
bool empty () const
bool isContinuous () const
HostMem & operator= (const HostMem &m)
void release ()
decrements reference counter and released memory if needed. 更多...
HostMem reshape (int cn, int rows =0) const
Size size () const
size_t step1 () const
void swap ( HostMem &b)
swaps with other smart pointer 更多...
int type () const

Static Public Member Functions

static MatAllocator * getAllocator ( HostMem::AllocType alloc_type =HostMem::AllocType::PAGE_LOCKED)

Public Attributes

AllocType alloc_type
int cols
uchar * data
const uchar * dataend
uchar * datastart
int flags
int * refcount
int rows
size_t step

详细描述

Class with reference counting wrapping special memory type allocation functions from CUDA.

Its interface is also Mat-like but with additional memory type parameters.

  • PAGE_LOCKED sets a page locked memory type used commonly for fast and asynchronous uploading/downloading data from/to GPU.
  • SHARED specifies a zero copy memory allocation that enables mapping the host memory to GPU address space, if supported.
  • WRITE_COMBINED sets the write combined buffer that is not cached by CPU. Such buffers are used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache utilization.
注意
Allocation size of such memory types is usually limited. For more details, see CUDA 2.2 Pinned Memory APIs document or CUDA C Programming Guide .

Member Enumeration Documentation

AllocType

枚举器
PAGE_LOCKED
SHARED
WRITE_COMBINED

Constructor & Destructor Documentation

HostMem() [1/5]

cv::cuda::HostMem::HostMem ( HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED )
explicit
Python:
<cuda_HostMem object> = cv.cuda_HostMem( [, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( rows, cols, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( size, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( arr[, alloc_type] )

HostMem() [2/5]

cv::cuda::HostMem::HostMem ( const HostMem & m )
Python:
<cuda_HostMem object> = cv.cuda_HostMem( [, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( rows, cols, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( size, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( arr[, alloc_type] )

HostMem() [3/5]

cv::cuda::HostMem::HostMem ( int rows ,
int cols ,
int type ,
HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED
)
Python:
<cuda_HostMem object> = cv.cuda_HostMem( [, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( rows, cols, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( size, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( arr[, alloc_type] )

HostMem() [4/5]

cv::cuda::HostMem::HostMem ( Size size ,
int type ,
HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED
)
Python:
<cuda_HostMem object> = cv.cuda_HostMem( [, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( rows, cols, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( size, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( arr[, alloc_type] )

HostMem() [5/5]

cv::cuda::HostMem::HostMem ( InputArray arr ,
HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED
)
explicit
Python:
<cuda_HostMem object> = cv.cuda_HostMem( [, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( rows, cols, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( size, type[, alloc_type] )
<cuda_HostMem object> = cv.cuda_HostMem( arr[, alloc_type] )

creates from host memory with coping data

~HostMem()

cv::cuda::HostMem::~HostMem ( )

成员函数文档编制

channels()

int cv::cuda::HostMem::channels ( ) const
Python:
retval = cv.cuda_HostMem.channels( )

clone()

HostMem cv::cuda::HostMem::clone ( ) const
Python:
retval = cv.cuda_HostMem.clone( )

returns deep copy of the matrix, i.e. the data is copied

create() [1/2]

void cv::cuda::HostMem::create ( int rows ,
int cols ,
int type
)
Python:
None = cv.cuda_HostMem.create( rows, cols, type )

allocates new matrix data unless the matrix already has specified size and type.

create() [2/2]

void cv::cuda::HostMem::create ( Size size ,
int type
)
Python:
None = cv.cuda_HostMem.create( rows, cols, type )

createGpuMatHeader()

GpuMat cv::cuda::HostMem::createGpuMatHeader ( ) const

Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it.

This can be done only if memory was allocated with the SHARED flag and if it is supported by the hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which eliminates an extra copy.

createMatHeader()

Mat cv::cuda::HostMem::createMatHeader ( ) const
Python:
retval = cv.cuda_HostMem.createMatHeader( )

returns matrix header with disabled reference counting for HostMem data.

depth()

int cv::cuda::HostMem::depth ( ) const
Python:
retval = cv.cuda_HostMem.depth( )

elemSize()

size_t cv::cuda::HostMem::elemSize ( ) const
Python:
retval = cv.cuda_HostMem.elemSize( )

elemSize1()

size_t cv::cuda::HostMem::elemSize1 ( ) const
Python:
retval = cv.cuda_HostMem.elemSize1( )

empty()

bool cv::cuda::HostMem::empty ( ) const
Python:
retval = cv.cuda_HostMem.empty( )

getAllocator()

static MatAllocator * cv::cuda::HostMem::getAllocator ( HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED )
static

isContinuous()

bool cv::cuda::HostMem::isContinuous ( ) const
Python:
retval = cv.cuda_HostMem.isContinuous( )

operator=()

HostMem & cv::cuda::HostMem::operator= ( const HostMem & m )

release()

void cv::cuda::HostMem::release ( )

decrements reference counter and released memory if needed.

reshape()

HostMem cv::cuda::HostMem::reshape ( int cn ,
int rows = 0
) const
Python:
retval = cv.cuda_HostMem.reshape( cn[, rows] )

creates alternative HostMem header for the same data, with different number of channels and/or different number of rows

size()

Size cv::cuda::HostMem::size ( ) const
Python:
retval = cv.cuda_HostMem.size( )

step1()

size_t cv::cuda::HostMem::step1 ( ) const
Python:
retval = cv.cuda_HostMem.step1( )

swap()

void cv::cuda::HostMem::swap ( HostMem & b )
Python:
None = cv.cuda_HostMem.swap( b )

swaps with other smart pointer

type()

int cv::cuda::HostMem::type ( ) const
Python:
retval = cv.cuda_HostMem.type( )

Member Data Documentation

alloc_type

AllocType cv::cuda::HostMem::alloc_type

cols

int cv::cuda::HostMem::cols

data

uchar * cv::cuda::HostMem::data

dataend

const uchar * cv::cuda::HostMem::dataend

datastart

uchar * cv::cuda::HostMem::datastart

flags

int cv::cuda::HostMem::flags

refcount

int* cv::cuda::HostMem::refcount

rows

int cv::cuda::HostMem::rows

step

size_t cv::cuda::HostMem::step

The documentation for this class was generated from the following file: