EstervQrCode 2.0.0
Library for qr code manipulation
Loading...
Searching...
No Matches
cuda.hpp
1/*M///////////////////////////////////////////////////////////////////////////////////////
2//
3// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4//
5// By downloading, copying, installing or using the software you agree to this license.
6// If you do not agree to this license, do not download, install,
7// copy or use the software.
8//
9//
10// License Agreement
11// For Open Source Computer Vision Library
12//
13// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16// Third party copyrights are property of their respective owners.
17//
18// Redistribution and use in source and binary forms, with or without modification,
19// are permitted provided that the following conditions are met:
20//
21// * Redistribution's of source code must retain the above copyright notice,
22// this list of conditions and the following disclaimer.
23//
24// * Redistribution's in binary form must reproduce the above copyright notice,
25// this list of conditions and the following disclaimer in the documentation
26// and/or other materials provided with the distribution.
27//
28// * The name of the copyright holders may not be used to endorse or promote products
29// derived from this software without specific prior written permission.
30//
31// This software is provided by the copyright holders and contributors "as is" and
32// any express or implied warranties, including, but not limited to, the implied
33// warranties of merchantability and fitness for a particular purpose are disclaimed.
34// In no event shall the Intel Corporation or contributors be liable for any direct,
35// indirect, incidental, special, exemplary, or consequential damages
36// (including, but not limited to, procurement of substitute goods or services;
37// loss of use, data, or profits; or business interruption) however caused
38// and on any theory of liability, whether in contract, strict liability,
39// or tort (including negligence or otherwise) arising in any way out of
40// the use of this software, even if advised of the possibility of such damage.
41//
42//M*/
43
44#ifndef OPENCV_CORE_CUDA_HPP
45#define OPENCV_CORE_CUDA_HPP
46
47#ifndef __cplusplus
48# error cuda.hpp header must be compiled as C++
49#endif
50
51#include "opencv2/core.hpp"
52#include "opencv2/core/cuda_types.hpp"
53
65namespace cv { namespace cuda {
66
69
70//===================================================================================
71// GpuMat
72//===================================================================================
73
106{
107public:
109 {
110 public:
111 virtual ~Allocator() {}
112
113 // allocator must fill data, step and refcount fields
114 virtual bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) = 0;
115 virtual void free(GpuMat* mat) = 0;
116 };
117
121
123 CV_WRAP explicit GpuMat(GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
124
126 CV_WRAP GpuMat(int rows, int cols, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
127 CV_WRAP GpuMat(Size size, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
128
130 CV_WRAP GpuMat(int rows, int cols, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
131 CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
132
135
137 GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
138 GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
139
141 CV_WRAP GpuMat(const GpuMat& m, Range rowRange, Range colRange);
142 CV_WRAP GpuMat(const GpuMat& m, Rect roi);
143
145 CV_WRAP explicit GpuMat(InputArray arr, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
146
149
151 GpuMat& operator =(const GpuMat& m);
152
154 CV_WRAP void create(int rows, int cols, int type);
156
159
162
168 CV_WRAP void upload(InputArray arr);
169
178 CV_WRAP void upload(InputArray arr, Stream& stream);
179
186
195 CV_WRAP void download(OutputArray dst, Stream& stream) const;
196
199
201 void copyTo(OutputArray dst) const;
204 copyTo(static_cast<OutputArray>(dst));
205 }
206
208 void copyTo(OutputArray dst, Stream& stream) const;
210 CV_WRAP void copyTo(CV_OUT GpuMat& dst, Stream& stream) const {
211 copyTo(static_cast<OutputArray>(dst), stream);
212 }
213
215 void copyTo(OutputArray dst, InputArray mask) const;
218 copyTo(static_cast<OutputArray>(dst), static_cast<InputArray>(mask));
219 }
220
222 void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
224 CV_WRAP void copyTo(CV_OUT GpuMat& dst, GpuMat& mask, Stream& stream) const {
225 copyTo(static_cast<OutputArray>(dst), static_cast<InputArray>(mask), stream);
226 }
227
230
233
235 CV_WRAP GpuMat& setTo(Scalar s, InputArray mask);
236
238 CV_WRAP GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
239
241 void convertTo(OutputArray dst, int rtype) const;
242
244 void convertTo(OutputArray dst, int rtype, Stream& stream) const;
246 CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, Stream& stream) const {
247 convertTo(static_cast<OutputArray>(dst), rtype, stream);
248 }
249
251 void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;
253 CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, double alpha = 1.0, double beta = 0.0) const {
254 convertTo(static_cast<OutputArray>(dst), rtype, alpha, beta);
255 }
256
258 void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;
259
261 void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
263 CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, double alpha, double beta, Stream& stream) const {
264 convertTo(static_cast<OutputArray>(dst), rtype, alpha, beta, stream);
265 }
266
267 CV_WRAP void assignTo(GpuMat& m, int type = -1) const;
268
270 uchar* ptr(int y = 0);
271 const uchar* ptr(int y = 0) const;
272
274 template<typename _Tp> _Tp* ptr(int y = 0);
275 template<typename _Tp> const _Tp* ptr(int y = 0) const;
276
277 template <typename _Tp> operator PtrStepSz<_Tp>() const;
278 template <typename _Tp> operator PtrStep<_Tp>() const;
279
281 CV_WRAP GpuMat row(int y) const;
282
284 CV_WRAP GpuMat col(int x) const;
285
287 CV_WRAP GpuMat rowRange(int startrow, int endrow) const;
289
291 CV_WRAP GpuMat colRange(int startcol, int endcol) const;
293
295 GpuMat operator ()(Range rowRange, Range colRange) const;
296 GpuMat operator ()(Rect roi) const;
297
300 CV_WRAP GpuMat reshape(int cn, int rows = 0) const;
301
303 CV_WRAP void locateROI(Size& wholeSize, Point& ofs) const;
304
306 CV_WRAP GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
307
310 CV_WRAP bool isContinuous() const;
311
313 CV_WRAP size_t elemSize() const;
314
316 CV_WRAP size_t elemSize1() const;
317
319 CV_WRAP int type() const;
320
322 CV_WRAP int depth() const;
323
325 CV_WRAP int channels() const;
326
328 CV_WRAP size_t step1() const;
329
332
334 CV_WRAP bool empty() const;
335
336 // returns pointer to cuda memory
337 CV_WRAP void* cudaPtr() const;
338
341
348 int flags;
349
351 int rows, cols;
352
354 CV_PROP size_t step;
355
358
362
366
369};
370
372{
373 explicit GpuData(size_t _size);
375
376 GpuData(const GpuData&) = delete;
377 GpuData& operator=(const GpuData&) = delete;
378
379 GpuData(GpuData&&) = delete;
381
383 size_t size;
384};
385
387{
388public:
392
395
398
405
419
428
429 void release();
430
431 void swap(GpuMatND& m) noexcept;
432
439
443 GpuMatND clone(Stream& stream) const;
444
451
457 GpuMat createGpuMatHeader(IndexArray idx, Range rowRange, Range colRange) const;
458
466
472 GpuMat operator()(IndexArray idx, Range rowRange, Range colRange) const;
473
478 operator GpuMat() const;
479
480 GpuMatND(const GpuMatND&) = default;
481 GpuMatND& operator=(const GpuMatND&) = default;
482
483#if defined(__GNUC__) && __GNUC__ < 5
484 // error: function '...' defaulted on its first declaration with an exception-specification
485 // that differs from the implicit declaration '...'
486
487 GpuMatND(GpuMatND&&) = default;
488 GpuMatND& operator=(GpuMatND&&) = default;
489#else
490 GpuMatND(GpuMatND&&) noexcept = default;
491 GpuMatND& operator=(GpuMatND&&) noexcept = default;
492#endif
493
494 void upload(InputArray src);
495 void upload(InputArray src, Stream& stream);
497 void download(OutputArray dst, Stream& stream) const;
498
501 bool isContinuous() const;
502
504 bool isSubmatrix() const;
505
507 size_t elemSize() const;
508
510 size_t elemSize1() const;
511
513 bool empty() const;
514
516 bool external() const;
517
520
522 size_t total() const;
523
525 size_t totalMemSize() const;
526
528 int type() const;
529
530private:
532 void setFields(SizeArray size, int type, StepArray step = StepArray());
533
534public:
541 int flags;
542
544 int dims;
545
548
553
554private:
559
565 uchar* data;
566
571 size_t offset;
572};
573
586
597
606CV_EXPORTS_W GpuMat inline createGpuMatFromCudaMemory(int rows, int cols, int type, size_t cudaMemoryAddress, size_t step = Mat::AUTO_STEP) {
607 return GpuMat(rows, cols, type, reinterpret_cast<void*>(cudaMemoryAddress), step);
608}
609
617CV_EXPORTS_W inline GpuMat createGpuMatFromCudaMemory(Size size, int type, size_t cudaMemoryAddress, size_t step = Mat::AUTO_STEP) {
618 return GpuMat(size, type, reinterpret_cast<void*>(cudaMemoryAddress), step);
619}
620
739{
740public:
741
743 CV_WRAP explicit BufferPool(Stream& stream);
744
747
748// WARNING: unreachable code using Ninja
749#if defined _MSC_VER && _MSC_VER >= 1920
750#pragma warning(push)
751#pragma warning(disable: 4702)
752#endif
755#if defined _MSC_VER && _MSC_VER >= 1920
756#pragma warning(pop)
757#endif
758
760 CV_WRAP Ptr<GpuMat::Allocator> getAllocator() const { return allocator_; }
761
762private:
763 Ptr<GpuMat::Allocator> allocator_;
764};
765
768CV_EXPORTS_W void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
769
770//===================================================================================
771// HostMem
772//===================================================================================
773
790{
791public:
792 enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
793
794 static MatAllocator* getAllocator(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
795
796 CV_WRAP explicit HostMem(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
797
798 HostMem(const HostMem& m);
799
800 CV_WRAP HostMem(int rows, int cols, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
801 CV_WRAP HostMem(Size size, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
802
804 CV_WRAP explicit HostMem(InputArray arr, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
805
807
808 HostMem& operator =(const HostMem& m);
809
812
815
817 CV_WRAP void create(int rows, int cols, int type);
818 void create(Size size, int type);
819
822 CV_WRAP HostMem reshape(int cn, int rows = 0) const;
823
825 void release();
826
829
838
839 // Please see cv::Mat for descriptions
840 CV_WRAP bool isContinuous() const;
841 CV_WRAP size_t elemSize() const;
842 CV_WRAP size_t elemSize1() const;
843 CV_WRAP int type() const;
844 CV_WRAP int depth() const;
845 CV_WRAP int channels() const;
846 CV_WRAP size_t step1() const;
848 CV_WRAP bool empty() const;
849
850 // Please see cv::Mat for descriptions
851 int flags;
852 int rows, cols;
853 CV_PROP size_t step;
854
857
860
862};
863
869
875
876//===================================================================================
877// Stream
878//===================================================================================
879
908{
909 typedef void (Stream::*bool_type)() const;
910 void this_type_does_not_support_comparisons() const {}
911
912public:
913 typedef void (*StreamCallback)(int status, void* userData);
914
917
920
931 CV_WRAP Stream(const size_t cudaFlags);
932
936
940
943 CV_WRAP void waitEvent(const Event& event);
944
953 void enqueueHostCallback(StreamCallback callback, void* userData);
954
956 CV_WRAP static Stream& Null();
957
959 operator bool_type() const;
960
962 CV_WRAP void* cudaPtr() const;
963
964 class Impl;
965
966private:
967 Ptr<Impl> impl_;
968 Stream(const Ptr<Impl>& impl);
969
970 friend struct StreamAccessor;
971 friend class BufferPool;
972 friend class DefaultDeviceInitializer;
973};
974
975
980CV_EXPORTS_W Stream wrapStream(size_t cudaStreamMemoryAddress);
981
983{
984public:
986 {
987 DEFAULT = 0x00,
988 BLOCKING_SYNC = 0x01,
989 DISABLE_TIMING = 0x02,
990 INTERPROCESS = 0x04
991 };
992
993 CV_WRAP explicit Event(const Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
994
996 CV_WRAP void record(Stream& stream = Stream::Null());
997
1000
1003
1005 CV_WRAP static float elapsedTime(const Event& start, const Event& end);
1006
1007 class Impl;
1008
1009private:
1010 Ptr<Impl> impl_;
1011 Event(const Ptr<Impl>& impl);
1012
1013 friend struct EventAccessor;
1014};
1016
1017
1018
1019//===================================================================================
1020// Initialization & Info
1021//===================================================================================
1022
1023
1025
1033
1040CV_EXPORTS_W void setDevice(int device);
1041
1045
1052
1074
1077
1085{
1086public:
1091 static bool builtWith(FeatureSet feature_set);
1092
1099 CV_WRAP static bool has(int major, int minor);
1100 CV_WRAP static bool hasPtx(int major, int minor);
1101 CV_WRAP static bool hasBin(int major, int minor);
1102
1103 CV_WRAP static bool hasEqualOrLessPtx(int major, int minor);
1104 CV_WRAP static bool hasEqualOrGreater(int major, int minor);
1105 CV_WRAP static bool hasEqualOrGreaterPtx(int major, int minor);
1106 CV_WRAP static bool hasEqualOrGreaterBin(int major, int minor);
1107};
1108
1112{
1113public:
1116
1124 CV_WRAP DeviceInfo(int device_id);
1125
1128 CV_WRAP int deviceID() const;
1129
1131 const char* name() const;
1132
1134 CV_WRAP size_t totalGlobalMem() const;
1135
1138
1141
1143 CV_WRAP int warpSize() const;
1144
1146 CV_WRAP size_t memPitch() const;
1147
1150
1153
1156
1158 CV_WRAP int clockRate() const;
1159
1161 CV_WRAP size_t totalConstMem() const;
1162
1165
1168
1171
1174
1177
1180
1182 CV_WRAP bool integrated() const;
1183
1186
1194
1197
1200
1203
1206
1209
1212
1215
1218
1221
1224
1227
1230
1233
1236
1239
1242
1245
1248
1251
1254
1257
1260
1262 CV_WRAP bool ECCEnabled() const;
1263
1265 CV_WRAP int pciBusID() const;
1266
1269
1272
1274 CV_WRAP bool tccDriver() const;
1275
1278
1281
1284
1287
1290
1293
1295 CV_WRAP void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
1296 CV_WRAP size_t freeMemory() const;
1297 CV_WRAP size_t totalMemory() const;
1298
1305 bool supports(FeatureSet feature_set) const;
1306
1312 CV_WRAP bool isCompatible() const;
1313
1314private:
1315 int device_id_;
1316};
1317
1320
1329
1331
1332}} // namespace cv { namespace cuda {
1333
1334
1335#include "opencv2/core/cuda.inl.hpp"
1336
1337#endif /* OPENCV_CORE_CUDA_HPP */
Custom array allocator.
Definition mat.hpp:480
n-dimensional dense array class
Definition mat.hpp:812
@ AUTO_STEP
Definition mat.hpp:2125
Template class for 2D points specified by its coordinates x and y.
Definition types.hpp:163
Template class specifying a continuous subsequence (slice) of a sequence.
Definition types.hpp:623
Template class for 2D rectangles.
Definition types.hpp:444
Template class for specifying the size of an image or rectangle.
Definition types.hpp:335
Template class for short numerical vectors, a partial case of Matx.
Definition matx.hpp:369
This type is very similar to InputArray except that it is used for input/output and output function p...
Definition mat.hpp:296
BufferPool for use with CUDA streams.
Definition cuda.hpp:739
CV_WRAP GpuMat getBuffer(Size size, int type)
Allocates a new GpuMat of given size and type.
Definition cuda.hpp:754
CV_WRAP BufferPool(Stream &stream)
Gets the BufferPool for the given stream.
CV_WRAP Ptr< GpuMat::Allocator > getAllocator() const
Returns the allocator associated with the stream.
Definition cuda.hpp:760
CV_WRAP GpuMat getBuffer(int rows, int cols, int type)
Allocates a new GpuMat of given size and type.
Class providing functionality for querying the specified GPU properties.
Definition cuda.hpp:1112
CV_WRAP size_t texturePitchAlignment() const
pitch alignment requirement for texture references bound to pitched memory
CV_WRAP size_t memPitch() const
maximum pitch in bytes allowed by memory copies
CV_WRAP Vec3i maxTexture2DLinear() const
maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory
CV_WRAP size_t surfaceAlignment() const
alignment requirements for surfaces
CV_WRAP Vec3i maxSurface3D() const
maximum 3D surface dimensions
CV_WRAP int warpSize() const
warp size in threads
CV_WRAP int memoryBusWidth() const
global memory bus width in bits
CV_WRAP bool tccDriver() const
true if device is a Tesla device using TCC driver, false otherwise
CV_WRAP Vec2i maxTexture2DGather() const
maximum 2D texture dimensions if texture gather operations have to be performed
CV_WRAP int maxSurfaceCubemap() const
maximum Cubemap surface dimensions
CV_WRAP Vec3i maxSurface2DLayered() const
maximum 2D layered surface dimensions
CV_WRAP int maxTexture1DLinear() const
maximum size for 1D textures bound to linear memory
CV_WRAP int clockRate() const
clock frequency in kilohertz
CV_WRAP Vec2i maxTexture1DLayered() const
maximum 1D layered texture dimensions
CV_WRAP int memoryClockRate() const
peak memory clock frequency in kilohertz
CV_WRAP size_t freeMemory() const
CV_WRAP Vec3i maxGridSize() const
maximum size of each dimension of a grid
CV_WRAP size_t totalConstMem() const
constant memory available on device in bytes
CV_WRAP int pciDomainID() const
PCI domain ID of the device.
CV_WRAP int deviceID() const
Returns system index of the CUDA device starting with 0.
CV_WRAP int minorVersion() const
minor compute capability
CV_WRAP int maxThreadsPerBlock() const
maximum number of threads per block
CV_WRAP int maxTexture1D() const
maximum 1D texture size
CV_WRAP int pciDeviceID() const
PCI device ID of the device.
CV_WRAP Vec2i maxTexture2DMipmap() const
maximum 2D mipmapped texture dimensions
CV_WRAP bool isCompatible() const
Checks the CUDA module and device compatibility.
CV_WRAP Vec2i maxSurfaceCubemapLayered() const
maximum Cubemap layered surface dimensions
CV_WRAP Vec3i maxTexture2DLayered() const
maximum 2D layered texture dimensions
CV_WRAP int maxThreadsPerMultiProcessor() const
maximum resident threads per multiprocessor
bool supports(FeatureSet feature_set) const
Provides information on CUDA feature support.
CV_WRAP size_t totalGlobalMem() const
global memory available on device in bytes
const char * name() const
ASCII string identifying device.
CV_WRAP Vec2i maxSurface2D() const
maximum 2D surface dimensions
CV_WRAP int multiProcessorCount() const
number of multiprocessors on device
CV_WRAP Vec3i maxThreadsDim() const
maximum size of each dimension of a block
CV_WRAP int pciBusID() const
PCI bus ID of the device.
CV_WRAP Vec3i maxTexture3D() const
maximum 3D texture dimensions
CV_WRAP bool ECCEnabled() const
device has ECC support enabled
CV_WRAP size_t sharedMemPerBlock() const
shared memory available per block in bytes
CV_WRAP size_t textureAlignment() const
alignment requirement for textures
CV_WRAP int maxTexture1DMipmap() const
maximum 1D mipmapped texture size
CV_WRAP bool unifiedAddressing() const
device shares a unified address space with the host
CV_WRAP Vec2i maxSurface1DLayered() const
maximum 1D layered surface dimensions
CV_WRAP int regsPerBlock() const
32-bit registers available per block
CV_WRAP int majorVersion() const
major compute capability
CV_WRAP bool integrated() const
device is integrated as opposed to discrete
CV_WRAP bool canMapHostMemory() const
device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer
CV_WRAP DeviceInfo(int device_id)
The constructors.
CV_WRAP int asyncEngineCount() const
number of asynchronous engines
CV_WRAP bool kernelExecTimeoutEnabled() const
specified whether there is a run time limit on kernels
CV_WRAP int l2CacheSize() const
size of L2 cache in bytes
CV_WRAP int maxSurface1D() const
maximum 1D surface size
CV_WRAP void queryMemory(size_t &totalMemory, size_t &freeMemory) const
gets free and total device memory
CV_WRAP Vec2i maxTextureCubemapLayered() const
maximum Cubemap layered texture dimensions
CV_WRAP Vec2i maxTexture2D() const
maximum 2D texture dimensions
CV_WRAP size_t totalMemory() const
CV_WRAP DeviceInfo::ComputeMode computeMode() const
compute mode
CV_WRAP bool concurrentKernels() const
device can possibly execute multiple kernels concurrently
CV_WRAP int maxTextureCubemap() const
maximum Cubemap texture dimensions
CV_WRAP DeviceInfo()
creates DeviceInfo object for the current GPU
ComputeMode
Definition cuda.hpp:1188
@ ComputeModeProhibited
Definition cuda.hpp:1191
@ ComputeModeDefault
Definition cuda.hpp:1189
@ ComputeModeExclusive
Definition cuda.hpp:1190
Definition cuda.hpp:983
static CV_WRAP float elapsedTime(const Event &start, const Event &end)
computes the elapsed time between events
CV_WRAP void record(Stream &stream=Stream::Null())
records an event
CreateFlags
Definition cuda.hpp:986
CV_WRAP Event(const Event::CreateFlags flags=Event::CreateFlags::DEFAULT)
CV_WRAP void waitForCompletion()
waits for an event to complete
CV_WRAP bool queryIfComplete() const
queries an event's status
Definition cuda.hpp:387
GpuMatND()
default constructor
GpuMatND clone(Stream &stream) const
GpuMatND(const GpuMatND &)=default
int dims
matrix dimensionality
Definition cuda.hpp:544
bool isSubmatrix() const
returns true if the matrix is a sub-matrix of another matrix
~GpuMatND()
destructor
bool external() const
returns true if not empty and points to external(user-allocated) gpu memory
size_t total() const
returns the total number of array elements
GpuMatND clone() const
Creates a full copy of the array and the underlying data. The method creates a full copy of the array...
size_t elemSize1() const
returns the size of element channel in bytes
StepArray step
Definition cuda.hpp:552
void download(OutputArray dst) const
bool isContinuous() const
int flags
Definition cuda.hpp:541
GpuMat createGpuMatHeader(IndexArray idx, Range rowRange, Range colRange) const
Creates a GpuMat header for a 2D plane part of an n-dim matrix.
GpuMatND & operator=(const GpuMatND &)=default
size_t elemSize() const
returns element size in bytes
GpuMat createGpuMatHeader() const
GpuMatND(GpuMatND &&) noexcept=default
GpuMat operator()(IndexArray idx, Range rowRange, Range colRange) const
Extracts a 2D plane part of an n-dim matrix. It differs from createGpuMatHeader(IndexArray,...
size_t totalMemSize() const
returns the size of underlying memory in bytes
GpuMatND(SizeArray size, int type)
int type() const
returns element type
SizeArray size
shape of this array
Definition cuda.hpp:547
void upload(InputArray src, Stream &stream)
GpuMatND operator()(const std::vector< Range > &ranges) const
Extracts a sub-matrix. The operator makes a new header for the specified sub-array of *this....
void create(SizeArray size, int type)
Allocates GPU memory. Suppose there is some GPU memory already allocated. In that case,...
GpuMatND(SizeArray size, int type, void *data, StepArray step=StepArray())
uchar * getDevicePtr() const
returns pointer to the first byte of the GPU memory
bool empty() const
returns true if data is null
void download(OutputArray dst, Stream &stream) const
void upload(InputArray src)
void swap(GpuMatND &m) noexcept
Definition cuda.hpp:109
virtual ~Allocator()
Definition cuda.hpp:111
virtual bool allocate(GpuMat *mat, int rows, int cols, size_t elemSize)=0
virtual void free(GpuMat *mat)=0
Base storage class for GPU memory with reference counting.
Definition cuda.hpp:106
CV_WRAP void assignTo(GpuMat &m, int type=-1) const
CV_WRAP GpuMat(int rows, int cols, int type, Scalar s, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
constructs GpuMat and fills it with the specified value _s
void convertTo(OutputArray dst, int rtype, Stream &stream) const
converts GpuMat to another datatype (Non-Blocking call)
CV_WRAP GpuMat(Size size, int type, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
Allocator * allocator
allocator
Definition cuda.hpp:368
CV_WRAP GpuMat colRange(int startcol, int endcol) const
... for the specified column span
CV_WRAP void create(int rows, int cols, int type)
allocates new GpuMat data unless the GpuMat already has specified size and type
CV_WRAP void convertTo(CV_OUT GpuMat &dst, int rtype, Stream &stream) const
bindings overload which converts GpuMat to another datatype (Non-Blocking call)
Definition cuda.hpp:246
CV_PROP size_t step
a distance between successive rows in bytes; includes the gap if any
Definition cuda.hpp:354
CV_WRAP void * cudaPtr() const
CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
CV_WRAP int depth() const
returns element type
CV_WRAP GpuMat row(int y) const
returns a new GpuMat header for the specified row
CV_WRAP GpuMat & setTo(Scalar s, InputArray mask)
sets some of the GpuMat elements to s, according to the mask (Blocking call)
void convertTo(OutputArray dst, int rtype) const
converts GpuMat to another datatype (Blocking call)
CV_WRAP size_t elemSize1() const
returns the size of element channel in bytes
void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream &stream) const
converts GpuMat to another datatype with scaling (Non-Blocking call)
CV_WRAP void upload(InputArray arr)
Performs data upload to GpuMat (Blocking call)
CV_WRAP GpuMat & setTo(Scalar s, InputArray mask, Stream &stream)
sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)
void copyTo(OutputArray dst, InputArray mask, Stream &stream) const
copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
CV_WRAP GpuMat & setTo(Scalar s, Stream &stream)
sets some of the GpuMat elements to s (Non-Blocking call)
uchar * data
pointer to the data
Definition cuda.hpp:357
CV_WRAP Size size() const
returns GpuMat size : width == number of columns, height == number of rows
void convertTo(OutputArray dst, int rtype, double alpha, Stream &stream) const
converts GpuMat to another datatype with scaling (Non-Blocking call)
~GpuMat()
destructor - calls release()
CV_WRAP GpuMat rowRange(Range r) const
CV_WRAP GpuMat(const GpuMat &m)
copy constructor
CV_WRAP GpuMat(int rows, int cols, int type, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
constructs GpuMat of the specified size and type
static CV_WRAP GpuMat::Allocator * defaultAllocator()
default allocator
CV_WRAP GpuMat(const GpuMat &m, Range rowRange, Range colRange)
creates a GpuMat header for a part of the bigger matrix
void convertTo(OutputArray dst, int rtype, double alpha, double beta=0.0) const
converts GpuMat to another datatype with scaling (Blocking call)
GpuMat(Size size, int type, void *data, size_t step=Mat::AUTO_STEP)
CV_WRAP int channels() const
returns number of channels
CV_WRAP GpuMat(GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
default constructor
CV_WRAP void copyTo(CV_OUT GpuMat &dst) const
bindings overload which copies the GpuMat content to device memory (Blocking call)
Definition cuda.hpp:203
CV_WRAP bool empty() const
returns true if GpuMat data is NULL
CV_WRAP bool isContinuous() const
CV_WRAP GpuMat colRange(Range r) const
CV_WRAP void copyTo(CV_OUT GpuMat &dst, Stream &stream) const
bindings overload which copies the GpuMat content to device memory (Non-Blocking call)
Definition cuda.hpp:210
CV_WRAP void convertTo(CV_OUT GpuMat &dst, int rtype, double alpha, double beta, Stream &stream) const
bindings overload which converts GpuMat to another datatype with scaling (Non-Blocking call)
Definition cuda.hpp:263
CV_WRAP size_t elemSize() const
returns element size in bytes
CV_WRAP void upload(InputArray arr, Stream &stream)
Performs data upload to GpuMat (Non-Blocking call)
const uchar * ptr(int y=0) const
CV_WRAP GpuMat rowRange(int startrow, int endrow) const
... for the specified row span
int cols
Definition cuda.hpp:351
void copyTo(OutputArray dst) const
copies the GpuMat content to device memory (Blocking call)
CV_WRAP GpuMat col(int x) const
returns a new GpuMat header for the specified column
CV_WRAP GpuMat(InputArray arr, GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
builds GpuMat from host memory (Blocking call)
CV_WRAP void locateROI(Size &wholeSize, Point &ofs) const
locates GpuMat header within a parent GpuMat
CV_WRAP GpuMat clone() const
returns deep copy of the GpuMat, i.e. the data is copied
CV_WRAP GpuMat(const GpuMat &m, Rect roi)
CV_WRAP void copyTo(CV_OUT GpuMat &dst, GpuMat &mask) const
bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elemen...
Definition cuda.hpp:217
CV_WRAP size_t step1() const
returns step/elemSize1()
uchar * ptr(int y=0)
returns pointer to y-th row
CV_WRAP void download(OutputArray dst) const
Performs data download from GpuMat (Blocking call)
GpuMat(int rows, int cols, int type, void *data, size_t step=Mat::AUTO_STEP)
constructor for GpuMat headers pointing to user-allocated data
CV_WRAP void download(OutputArray dst, Stream &stream) const
Performs data download from GpuMat (Non-Blocking call)
_Tp * ptr(int y=0)
template version of the above method
CV_WRAP void convertTo(CV_OUT GpuMat &dst, int rtype, double alpha=1.0, double beta=0.0) const
bindings overload which converts GpuMat to another datatype with scaling(Blocking call)
Definition cuda.hpp:253
CV_WRAP void release()
decreases reference counter, deallocate the data when reference counter reaches 0
CV_WRAP GpuMat & adjustROI(int dtop, int dbottom, int dleft, int dright)
moves/resizes the current GpuMat ROI inside the parent GpuMat
void copyTo(OutputArray dst, InputArray mask) const
copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
CV_WRAP void swap(GpuMat &mat)
swaps with other smart pointer
const uchar * dataend
Definition cuda.hpp:365
static CV_WRAP void setDefaultAllocator(GpuMat::Allocator *allocator)
uchar * datastart
helper fields used in locateROI and adjustROI
Definition cuda.hpp:364
void copyTo(OutputArray dst, Stream &stream) const
copies the GpuMat content to device memory (Non-Blocking call)
int flags
Definition cuda.hpp:348
CV_WRAP void updateContinuityFlag()
internal use method: updates the continuity flag
CV_WRAP GpuMat & setTo(Scalar s)
sets some of the GpuMat elements to s (Blocking call)
CV_WRAP int type() const
returns element type
const _Tp * ptr(int y=0) const
CV_WRAP void create(Size size, int type)
int * refcount
Definition cuda.hpp:361
CV_WRAP void copyTo(CV_OUT GpuMat &dst, GpuMat &mask, Stream &stream) const
bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elemen...
Definition cuda.hpp:224
CV_WRAP GpuMat reshape(int cn, int rows=0) const
Class with reference counting wrapping special memory type allocation functions from CUDA.
Definition cuda.hpp:790
CV_WRAP bool empty() const
CV_WRAP HostMem clone() const
returns deep copy of the matrix, i.e. the data is copied
CV_WRAP HostMem(HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
CV_WRAP void create(int rows, int cols, int type)
allocates new matrix data unless the matrix already has specified size and type.
CV_WRAP HostMem(InputArray arr, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
creates from host memory with coping data
CV_WRAP Size size() const
uchar * data
Definition cuda.hpp:855
CV_WRAP size_t elemSize() const
void create(Size size, int type)
CV_WRAP HostMem reshape(int cn, int rows=0) const
CV_WRAP int depth() const
uchar * datastart
Definition cuda.hpp:858
CV_WRAP int channels() const
int cols
Definition cuda.hpp:852
GpuMat createGpuMatHeader() const
Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting f...
CV_PROP size_t step
Definition cuda.hpp:853
CV_WRAP size_t step1() const
CV_WRAP HostMem(int rows, int cols, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
CV_WRAP size_t elemSize1() const
const uchar * dataend
Definition cuda.hpp:859
void release()
decrements reference counter and released memory if needed.
AllocType
Definition cuda.hpp:792
CV_WRAP HostMem(Size size, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
CV_WRAP bool isContinuous() const
static MatAllocator * getAllocator(HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED)
CV_WRAP Mat createMatHeader() const
returns matrix header with disabled reference counting for HostMem data.
HostMem(const HostMem &m)
AllocType alloc_type
Definition cuda.hpp:861
CV_WRAP void swap(HostMem &b)
swaps with other smart pointer
CV_WRAP int type() const
int * refcount
Definition cuda.hpp:856
int flags
Definition cuda.hpp:851
This class encapsulates a queue of asynchronous calls.
Definition cuda.hpp:908
CV_WRAP void * cudaPtr() const
return Pointer to CUDA stream
CV_WRAP void waitForCompletion()
Blocks the current CPU thread until all operations in the stream are complete.
CV_WRAP Stream()
creates a new asynchronous stream
static CV_WRAP Stream & Null()
return Stream object for default CUDA stream
CV_WRAP Stream(const size_t cudaFlags)
creates a new Stream using the cudaFlags argument to determine the behaviors of the stream
CV_WRAP Stream(const Ptr< GpuMat::Allocator > &allocator)
creates a new asynchronous stream with custom allocator
CV_WRAP bool queryIfComplete() const
Returns true if the current stream queue is finished. Otherwise, it returns false.
void enqueueHostCallback(StreamCallback callback, void *userData)
Adds a callback to be called on the host after all currently enqueued items in the stream have comple...
CV_WRAP void waitEvent(const Event &event)
Makes a compute stream wait on an event.
Class providing a set of static methods to check what NVIDIA* card architecture the CUDA module was b...
Definition cuda.hpp:1085
static bool builtWith(FeatureSet feature_set)
The following method checks whether the module was built with the support of the given feature:
static CV_WRAP bool hasEqualOrLessPtx(int major, int minor)
static CV_WRAP bool hasPtx(int major, int minor)
static CV_WRAP bool hasEqualOrGreaterBin(int major, int minor)
static CV_WRAP bool hasEqualOrGreaterPtx(int major, int minor)
static CV_WRAP bool has(int major, int minor)
There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA code...
static CV_WRAP bool hasBin(int major, int minor)
static CV_WRAP bool hasEqualOrGreater(int major, int minor)
void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask)
This is an overloaded member function, provided for convenience (python) Copies the matrix to another...
CV__DEBUG_NS_END typedef const _InputArray & InputArray
Definition mat.hpp:442
int rows
Definition core_c.h:257
const CvArr const CvArr const CvArr CvArr int flags
Definition core_c.h:1342
double double end
Definition core_c.h:1381
CvArr * dst
Definition core_c.h:875
int cols
Definition core_c.h:221
double start
Definition core_c.h:1381
double const CvArr double beta
Definition core_c.h:1094
const int * idx
Definition core_c.h:668
CvArr const CvMat * mat
Definition core_c.h:1308
CvSize size
Definition core_c.h:112
int int type
Definition core_c.h:221
void int step
Definition core_c.h:905
CvArr const CvArr * mask
Definition core_c.h:589
CvArr * arr
Definition core_c.h:1247
void * data
Definition core_c.h:427
const CvArr CvArr * x
Definition core_c.h:1195
double alpha
Definition core_c.h:1093
const CvArr * y
Definition core_c.h:1187
unsigned char uchar
Definition interface.h:51
#define CV_EXPORTS
Definition cvdef.h:435
#define CV_ENUM_FLAGS(EnumType)
Definition cvdef.h:649
#define CV_OUT
Definition cvdef.h:478
#define CV_EXPORTS_W
Definition cvdef.h:472
#define CV_PROP
Definition cvdef.h:479
#define CV_WRAP
Definition cvdef.h:481
CV_EXPORTS void convertFp16(InputArray _src, OutputArray _dst, Stream &stream=Stream::Null())
Converts an array to half precision floating number.
CV_EXPORTS_W void printCudaDeviceInfo(int device)
CV_EXPORTS_W void setDevice(int device)
Sets a device and initializes it for the current thread.
FeatureSet
Enumeration providing CUDA computing features.
Definition cuda.hpp:1056
CV_EXPORTS_W int getDevice()
Returns the current device index set by cuda::setDevice or initialized by default.
CV_EXPORTS_W void printShortCudaDeviceInfo(int device)
CV_EXPORTS_W void resetDevice()
Explicitly destroys and cleans up all resources associated with the current device in the current pro...
CV_EXPORTS_W int getCudaEnabledDeviceCount()
Returns the number of installed CUDA-enabled devices.
CV_EXPORTS bool deviceSupports(FeatureSet feature_set)
checks whether current device supports the given feature
@ WARP_SHUFFLE_FUNCTIONS
Definition cuda.hpp:1071
@ DYNAMIC_PARALLELISM
Definition cuda.hpp:1072
@ FEATURE_SET_COMPUTE_50
Definition cuda.hpp:1066
@ SHARED_ATOMICS
Definition cuda.hpp:1069
@ FEATURE_SET_COMPUTE_11
Definition cuda.hpp:1058
@ NATIVE_DOUBLE
Definition cuda.hpp:1070
@ FEATURE_SET_COMPUTE_32
Definition cuda.hpp:1064
@ GLOBAL_ATOMICS
Definition cuda.hpp:1068
@ FEATURE_SET_COMPUTE_10
Definition cuda.hpp:1057
@ FEATURE_SET_COMPUTE_12
Definition cuda.hpp:1059
@ FEATURE_SET_COMPUTE_20
Definition cuda.hpp:1061
@ FEATURE_SET_COMPUTE_21
Definition cuda.hpp:1062
@ FEATURE_SET_COMPUTE_35
Definition cuda.hpp:1065
@ FEATURE_SET_COMPUTE_13
Definition cuda.hpp:1060
@ FEATURE_SET_COMPUTE_30
Definition cuda.hpp:1063
CV_EXPORTS_W void setBufferPoolUsage(bool on)
BufferPool management (must be called before Stream creation)
CV_EXPORTS_W void unregisterPageLocked(Mat &m)
Unmaps the memory of matrix and makes it pageable again.
CV_EXPORTS_W void registerPageLocked(Mat &m)
Page-locks the memory of matrix and maps it for the device(s).
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr)
Creates a continuous matrix.
CV_EXPORTS_W GpuMat createGpuMatFromCudaMemory(int rows, int cols, int type, size_t cudaMemoryAddress, size_t step=Mat::AUTO_STEP)
Bindings overload to create a GpuMat from existing GPU memory.
Definition cuda.hpp:606
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
Ensures that the size of a matrix is big enough and the matrix has a proper type.
CV_EXPORTS_W Stream wrapStream(size_t cudaStreamMemoryAddress)
Bindings overload to create a Stream object from the address stored in an existing CUDA Runtime API s...
CV_EXPORTS_W void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount)
CvRect r
Definition imgproc_c.h:984
float ** ranges
Definition imgproc_c.h:636
CvArr CvPoint offset
Definition imgproc_c.h:88
"black box" representation of the file storage associated with a file on disk.
Definition calib3d.hpp:441
int width
Definition types_c.h:1159
int height
Definition types_c.h:1160
Definition cvstd_wrapper.hpp:74
Class that enables getting cudaEvent_t from cuda::Event.
Definition cuda_stream_accessor.hpp:76
Definition cuda.hpp:372
uchar * data
Definition cuda.hpp:382
GpuData & operator=(GpuData &&)=delete
GpuData(GpuData &&)=delete
GpuData(const GpuData &)=delete
GpuData & operator=(const GpuData &)=delete
GpuData(size_t _size)
size_t size
Definition cuda.hpp:383
Class that enables getting cudaStream_t from cuda::Stream.
Definition cuda_stream_accessor.hpp:68