EstervQrCode 1.1.1
Library for qr code manipulation
cuda.inl.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Third party copyrights are property of their respective owners.
17 //
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
20 //
21 // * Redistribution's of source code must retain the above copyright notice,
22 // this list of conditions and the following disclaimer.
23 //
24 // * Redistribution's in binary form must reproduce the above copyright notice,
25 // this list of conditions and the following disclaimer in the documentation
26 // and/or other materials provided with the distribution.
27 //
28 // * The name of the copyright holders may not be used to endorse or promote products
29 // derived from this software without specific prior written permission.
30 //
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
41 //
42 //M*/
43 
44 #ifndef OPENCV_CORE_CUDAINL_HPP
45 #define OPENCV_CORE_CUDAINL_HPP
46 
47 #include "opencv2/core/cuda.hpp"
48 
50 
51 namespace cv { namespace cuda {
52 
53 //===================================================================================
54 // GpuMat
55 //===================================================================================
56 
57 inline
58 GpuMat::GpuMat(Allocator* allocator_)
59  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
60 {}
61 
62 inline
63 GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_)
64  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
65 {
66  if (rows_ > 0 && cols_ > 0)
67  create(rows_, cols_, type_);
68 }
69 
70 inline
71 GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_)
72  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
73 {
74  if (size_.height > 0 && size_.width > 0)
75  create(size_.height, size_.width, type_);
76 }
77 
78 // WARNING: unreachable code using Ninja
79 #if defined _MSC_VER && _MSC_VER >= 1920
80 #pragma warning(push)
81 #pragma warning(disable: 4702)
82 #endif
83 inline
84 GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_)
85  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
86 {
87  if (rows_ > 0 && cols_ > 0)
88  {
89  create(rows_, cols_, type_);
90  setTo(s_);
91  }
92 }
93 
94 inline
95 GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_)
96  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
97 {
98  if (size_.height > 0 && size_.width > 0)
99  {
100  create(size_.height, size_.width, type_);
101  setTo(s_);
102  }
103 }
104 #if defined _MSC_VER && _MSC_VER >= 1920
105 #pragma warning(pop)
106 #endif
107 
108 inline
109 GpuMat::GpuMat(const GpuMat& m)
110  : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
111 {
112  if (refcount)
113  CV_XADD(refcount, 1);
114 }
115 
116 inline
117 GpuMat::GpuMat(InputArray arr, Allocator* allocator_) :
118  flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
119 {
120  upload(arr);
121 }
122 
123 inline
124 GpuMat::~GpuMat()
125 {
126  release();
127 }
128 
129 inline
130 GpuMat& GpuMat::operator =(const GpuMat& m)
131 {
132  if (this != &m)
133  {
134  GpuMat temp(m);
135  swap(temp);
136  }
137 
138  return *this;
139 }
140 
141 inline
142 void GpuMat::create(Size size_, int type_)
143 {
144  create(size_.height, size_.width, type_);
145 }
146 
147 inline
148 void GpuMat::swap(GpuMat& b)
149 {
150  std::swap(flags, b.flags);
151  std::swap(rows, b.rows);
152  std::swap(cols, b.cols);
153  std::swap(step, b.step);
154  std::swap(data, b.data);
155  std::swap(datastart, b.datastart);
156  std::swap(dataend, b.dataend);
157  std::swap(refcount, b.refcount);
158  std::swap(allocator, b.allocator);
159 }
160 
161 inline
162 GpuMat GpuMat::clone() const
163 {
164  GpuMat m;
165  copyTo(m);
166  return m;
167 }
168 
169 // WARNING: unreachable code using Ninja
170 #if defined _MSC_VER && _MSC_VER >= 1920
171 #pragma warning(push)
172 #pragma warning(disable: 4702)
173 #endif
174 inline
176 {
177  copyTo(dst, mask, Stream::Null());
178 }
179 #if defined _MSC_VER && _MSC_VER >= 1920
180 #pragma warning(pop)
181 #endif
182 
183 inline
184 GpuMat& GpuMat::setTo(Scalar s)
185 {
186  return setTo(s, Stream::Null());
187 }
188 
189 inline
190 GpuMat& GpuMat::setTo(Scalar s, InputArray mask)
191 {
192  return setTo(s, mask, Stream::Null());
193 }
194 
195 // WARNING: unreachable code using Ninja
196 #if defined _MSC_VER && _MSC_VER >= 1920
197 #pragma warning(push)
198 #pragma warning(disable: 4702)
199 #endif
200 inline
201 void GpuMat::convertTo(OutputArray dst, int rtype) const
202 {
203  convertTo(dst, rtype, Stream::Null());
204 }
205 
206 inline
207 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const
208 {
209  convertTo(dst, rtype, alpha, beta, Stream::Null());
210 }
211 #if defined _MSC_VER && _MSC_VER >= 1920
212 #pragma warning(pop)
213 #endif
214 
215 inline
216 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const
217 {
218  convertTo(dst, rtype, alpha, 0.0, stream);
219 }
220 
221 inline
222 void GpuMat::assignTo(GpuMat& m, int _type) const
223 {
224  if (_type < 0)
225  m = *this;
226  else
227  convertTo(m, _type);
228 }
229 
230 inline
231 uchar* GpuMat::ptr(int y)
232 {
233  CV_DbgAssert( (unsigned)y < (unsigned)rows );
234  return data + step * y;
235 }
236 
237 inline
238 const uchar* GpuMat::ptr(int y) const
239 {
240  CV_DbgAssert( (unsigned)y < (unsigned)rows );
241  return data + step * y;
242 }
243 
244 template<typename _Tp> inline
245 _Tp* GpuMat::ptr(int y)
246 {
247  return (_Tp*)ptr(y);
248 }
249 
250 template<typename _Tp> inline
251 const _Tp* GpuMat::ptr(int y) const
252 {
253  return (const _Tp*)ptr(y);
254 }
255 
256 template <class T> inline
257 GpuMat::operator PtrStepSz<T>() const
258 {
259  return PtrStepSz<T>(rows, cols, (T*)data, step);
260 }
261 
262 template <class T> inline
263 GpuMat::operator PtrStep<T>() const
264 {
265  return PtrStep<T>((T*)data, step);
266 }
267 
268 inline
269 GpuMat GpuMat::row(int y) const
270 {
271  return GpuMat(*this, Range(y, y+1), Range::all());
272 }
273 
274 inline
275 GpuMat GpuMat::col(int x) const
276 {
277  return GpuMat(*this, Range::all(), Range(x, x+1));
278 }
279 
280 inline
281 GpuMat GpuMat::rowRange(int startrow, int endrow) const
282 {
283  return GpuMat(*this, Range(startrow, endrow), Range::all());
284 }
285 
286 inline
287 GpuMat GpuMat::rowRange(Range r) const
288 {
289  return GpuMat(*this, r, Range::all());
290 }
291 
292 inline
293 GpuMat GpuMat::colRange(int startcol, int endcol) const
294 {
295  return GpuMat(*this, Range::all(), Range(startcol, endcol));
296 }
297 
298 inline
299 GpuMat GpuMat::colRange(Range r) const
300 {
301  return GpuMat(*this, Range::all(), r);
302 }
303 
304 inline
305 GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const
306 {
307  return GpuMat(*this, rowRange_, colRange_);
308 }
309 
310 inline
311 GpuMat GpuMat::operator ()(Rect roi) const
312 {
313  return GpuMat(*this, roi);
314 }
315 
316 inline
317 bool GpuMat::isContinuous() const
318 {
319  return (flags & Mat::CONTINUOUS_FLAG) != 0;
320 }
321 
322 inline
323 size_t GpuMat::elemSize() const
324 {
325  return CV_ELEM_SIZE(flags);
326 }
327 
328 inline
329 size_t GpuMat::elemSize1() const
330 {
331  return CV_ELEM_SIZE1(flags);
332 }
333 
334 inline
335 int GpuMat::type() const
336 {
337  return CV_MAT_TYPE(flags);
338 }
339 
340 inline
341 int GpuMat::depth() const
342 {
343  return CV_MAT_DEPTH(flags);
344 }
345 
346 inline
347 int GpuMat::channels() const
348 {
349  return CV_MAT_CN(flags);
350 }
351 
352 inline
353 size_t GpuMat::step1() const
354 {
355  return step / elemSize1();
356 }
357 
358 inline
359 Size GpuMat::size() const
360 {
361  return Size(cols, rows);
362 }
363 
364 inline
365 bool GpuMat::empty() const
366 {
367  return data == 0;
368 }
369 
370 inline
371 void* GpuMat::cudaPtr() const
372 {
373  return data;
374 }
375 
376 static inline
377 GpuMat createContinuous(int rows, int cols, int type)
378 {
379  GpuMat m;
381  return m;
382 }
383 
384 static inline
386 {
388 }
389 
390 static inline
391 GpuMat createContinuous(Size size, int type)
392 {
393  GpuMat m;
395  return m;
396 }
397 
398 static inline
400 {
402 }
403 
404 static inline
405 void swap(GpuMat& a, GpuMat& b)
406 {
407  a.swap(b);
408 }
409 
410 //===================================================================================
411 // GpuMatND
412 //===================================================================================
413 
414 inline
415 GpuMatND::GpuMatND() :
416  flags(0), dims(0), data(nullptr), offset(0)
417 {
418 }
419 
420 inline
421 GpuMatND::GpuMatND(SizeArray _size, int _type) :
422  flags(0), dims(0), data(nullptr), offset(0)
423 {
424  create(std::move(_size), _type);
425 }
426 
427 inline
428 void GpuMatND::swap(GpuMatND& m) noexcept
429 {
430  std::swap(*this, m);
431 }
432 
433 inline
434 bool GpuMatND::isContinuous() const
435 {
436  return (flags & Mat::CONTINUOUS_FLAG) != 0;
437 }
438 
439 inline
440 bool GpuMatND::isSubmatrix() const
441 {
442  return (flags & Mat::SUBMATRIX_FLAG) != 0;
443 }
444 
445 inline
446 size_t GpuMatND::elemSize() const
447 {
448  return CV_ELEM_SIZE(flags);
449 }
450 
451 inline
452 size_t GpuMatND::elemSize1() const
453 {
454  return CV_ELEM_SIZE1(flags);
455 }
456 
457 inline
458 bool GpuMatND::empty() const
459 {
460  return data == nullptr;
461 }
462 
463 inline
464 bool GpuMatND::external() const
465 {
466  return !empty() && data_.use_count() == 0;
467 }
468 
469 inline
470 uchar* GpuMatND::getDevicePtr() const
471 {
472  return data + offset;
473 }
474 
475 inline
476 size_t GpuMatND::total() const
477 {
478  size_t p = 1;
479  for(auto s : size)
480  p *= s;
481  return p;
482 }
483 
484 inline
485 size_t GpuMatND::totalMemSize() const
486 {
487  return size[0] * step[0];
488 }
489 
490 inline
491 int GpuMatND::type() const
492 {
493  return CV_MAT_TYPE(flags);
494 }
495 
496 //===================================================================================
497 // HostMem
498 //===================================================================================
499 
500 inline
501 HostMem::HostMem(AllocType alloc_type_)
502  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
503 {
504 }
505 
506 inline
507 HostMem::HostMem(const HostMem& m)
508  : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
509 {
510  if( refcount )
511  CV_XADD(refcount, 1);
512 }
513 
514 inline
515 HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
516  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
517 {
518  if (rows_ > 0 && cols_ > 0)
519  create(rows_, cols_, type_);
520 }
521 
522 inline
523 HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
524  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
525 {
526  if (size_.height > 0 && size_.width > 0)
527  create(size_.height, size_.width, type_);
528 }
529 
530 inline
531 HostMem::HostMem(InputArray arr, AllocType alloc_type_)
532  : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
533 {
534  arr.getMat().copyTo(*this);
535 }
536 
537 inline
538 HostMem::~HostMem()
539 {
540  release();
541 }
542 
543 inline
544 HostMem& HostMem::operator =(const HostMem& m)
545 {
546  if (this != &m)
547  {
548  HostMem temp(m);
549  swap(temp);
550  }
551 
552  return *this;
553 }
554 
555 inline
556 void HostMem::swap(HostMem& b)
557 {
558  std::swap(flags, b.flags);
559  std::swap(rows, b.rows);
560  std::swap(cols, b.cols);
561  std::swap(step, b.step);
562  std::swap(data, b.data);
563  std::swap(datastart, b.datastart);
564  std::swap(dataend, b.dataend);
565  std::swap(refcount, b.refcount);
566  std::swap(alloc_type, b.alloc_type);
567 }
568 
569 inline
570 HostMem HostMem::clone() const
571 {
572  HostMem m(size(), type(), alloc_type);
573  createMatHeader().copyTo(m);
574  return m;
575 }
576 
577 inline
578 void HostMem::create(Size size_, int type_)
579 {
580  create(size_.height, size_.width, type_);
581 }
582 
583 inline
584 Mat HostMem::createMatHeader() const
585 {
586  return Mat(size(), type(), data, step);
587 }
588 
589 inline
590 bool HostMem::isContinuous() const
591 {
592  return (flags & Mat::CONTINUOUS_FLAG) != 0;
593 }
594 
595 inline
596 size_t HostMem::elemSize() const
597 {
598  return CV_ELEM_SIZE(flags);
599 }
600 
601 inline
602 size_t HostMem::elemSize1() const
603 {
604  return CV_ELEM_SIZE1(flags);
605 }
606 
607 inline
608 int HostMem::type() const
609 {
610  return CV_MAT_TYPE(flags);
611 }
612 
613 inline
614 int HostMem::depth() const
615 {
616  return CV_MAT_DEPTH(flags);
617 }
618 
619 inline
620 int HostMem::channels() const
621 {
622  return CV_MAT_CN(flags);
623 }
624 
625 inline
626 size_t HostMem::step1() const
627 {
628  return step / elemSize1();
629 }
630 
631 inline
632 Size HostMem::size() const
633 {
634  return Size(cols, rows);
635 }
636 
637 inline
638 bool HostMem::empty() const
639 {
640  return data == 0;
641 }
642 
643 static inline
644 void swap(HostMem& a, HostMem& b)
645 {
646  a.swap(b);
647 }
648 
649 //===================================================================================
650 // Stream
651 //===================================================================================
652 
653 inline
654 Stream::Stream(const Ptr<Impl>& impl)
655  : impl_(impl)
656 {
657 }
658 
659 //===================================================================================
660 // Event
661 //===================================================================================
662 
663 inline
664 Event::Event(const Ptr<Impl>& impl)
665  : impl_(impl)
666 {
667 }
668 
669 //===================================================================================
670 // Initialization & Info
671 //===================================================================================
672 
673 // WARNING: unreachable code using Ninja
674 #if defined _MSC_VER && _MSC_VER >= 1920
675 #pragma warning(push)
676 #pragma warning(disable: 4702)
677 #endif
678 inline
679 bool TargetArchs::has(int major, int minor)
680 {
681  return hasPtx(major, minor) || hasBin(major, minor);
682 }
683 
684 inline
685 bool TargetArchs::hasEqualOrGreater(int major, int minor)
686 {
687  return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);
688 }
689 
690 inline
691 DeviceInfo::DeviceInfo()
692 {
693  device_id_ = getDevice();
694 }
695 #if defined _MSC_VER && _MSC_VER >= 1920
696 #pragma warning(pop)
697 #endif
698 
699 inline
700 DeviceInfo::DeviceInfo(int device_id)
701 {
702  CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() );
703  device_id_ = device_id;
704 }
705 
706 // WARNING: unreachable code using Ninja
707 #if defined _MSC_VER && _MSC_VER >= 1920
708 #pragma warning(push)
709 #pragma warning(disable: 4702)
710 #endif
711 inline
712 int DeviceInfo::deviceID() const
713 {
714  return device_id_;
715 }
716 
717 inline
718 size_t DeviceInfo::freeMemory() const
719 {
720  size_t _totalMemory = 0, _freeMemory = 0;
721  queryMemory(_totalMemory, _freeMemory);
722  return _freeMemory;
723 }
724 
725 inline
726 size_t DeviceInfo::totalMemory() const
727 {
728  size_t _totalMemory = 0, _freeMemory = 0;
729  queryMemory(_totalMemory, _freeMemory);
730  return _totalMemory;
731 }
732 
733 inline
734 bool DeviceInfo::supports(FeatureSet feature_set) const
735 {
736  int version = majorVersion() * 10 + minorVersion();
737  return version >= feature_set;
738 }
739 #if defined _MSC_VER && _MSC_VER >= 1920
740 #pragma warning(pop)
741 #endif
742 
743 
744 }} // namespace cv { namespace cuda {
745 
746 //===================================================================================
747 // Mat
748 //===================================================================================
749 
750 namespace cv {
751 
752 inline
753 Mat::Mat(const cuda::GpuMat& m)
754  : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)
755 {
756  m.download(*this);
757 }
758 
759 }
760 
762 
763 #endif // OPENCV_CORE_CUDAINL_HPP
Mat() CV_NOEXCEPT
CV_WRAP GpuMat(GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
default constructor
InputArrayOfArrays InputArrayOfArrays InputOutputArray InputOutputArray InputOutputArray InputOutputArray Size InputOutputArray InputOutputArray T
Definition: calib3d.hpp:1867
InputArrayOfArrays Size InputOutputArray InputOutputArray OutputArrayOfArrays OutputArrayOfArrays OutputArray OutputArray OutputArray int flags
Definition: calib3d.hpp:1617
void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask)
This is an overloaded member function, provided for convenience (python) Copies the matrix to another...
Rect2i Rect
Definition: types.hpp:489
Size2i Size
Definition: types.hpp:370
Scalar_< double > Scalar
Definition: types.hpp:702
const _OutputArray & OutputArray
Definition: mat.hpp:444
CV__DEBUG_NS_END typedef const _InputArray & InputArray
Definition: mat.hpp:442
int rows
Definition: core_c.h:257
int int void int total
Definition: core_c.h:1689
const CvArr const CvArr const CvArr CvArr int flags
Definition: core_c.h:1342
int int channels
Definition: core_c.h:100
CvArr * dst
Definition: core_c.h:875
int cols
Definition: core_c.h:221
double const CvArr double beta
Definition: core_c.h:1094
CvSize size
Definition: core_c.h:112
int int type
Definition: core_c.h:221
void int step
Definition: core_c.h:905
CvArr const CvArr * mask
Definition: core_c.h:589
CvArr * arr
Definition: core_c.h:1247
int depth
Definition: core_c.h:100
void * data
Definition: core_c.h:427
const CvArr CvArr * x
Definition: core_c.h:1195
double alpha
Definition: core_c.h:1093
int dims
Definition: core_c.h:464
const CvArr * y
Definition: core_c.h:1187
#define CV_MAT_DEPTH(flags)
Definition: interface.h:83
unsigned char uchar
Definition: interface.h:51
#define CV_ELEM_SIZE1(type)
Definition: cvdef.h:508
#define CV_MAT_CN(flags)
Definition: cvdef.h:495
#define CV_MAT_TYPE(flags)
Definition: cvdef.h:497
#define CV_Assert(expr)
Checks a condition at runtime and throws exception if it fails.
Definition: base.hpp:342
#define CV_DbgAssert(expr)
Definition: base.hpp:375
#define CV_ELEM_SIZE(type)
Definition: cvdef.h:510
CV_EXPORTS void swap(Mat &a, Mat &b)
Swaps two matrices.
FeatureSet
Enumeration providing CUDA computing features.
Definition: cuda.hpp:1056
CV_EXPORTS_W int getDevice()
Returns the current device index set by cuda::setDevice or initialized by default.
CV_EXPORTS_W int getCudaEnabledDeviceCount()
Returns the number of installed CUDA-enabled devices.
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr)
Creates a continuous matrix.
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
Ensures that the size of a matrix is big enough and the matrix has a proper type.
CvRect r
Definition: imgproc_c.h:984
CvArr CvPoint offset
Definition: imgproc_c.h:88
CvArr CvArr * temp
Definition: imgproc_c.h:329
T move(T... args)
"black box" representation of the file storage associated with a file on disk.
Definition: calib3d.hpp:441
int width
Definition: types_c.h:1159
int height
Definition: types_c.h:1160
T swap(T... args)