Upload 27 files
Browse files- cv2/fisheye/__init__.pyi +72 -0
- cv2/flann/__init__.pyi +62 -0
- cv2/gapi/__init__.py +301 -0
- cv2/gapi/__init__.pyi +332 -0
- cv2/gapi/__pycache__/__init__.cpython-311.pyc +0 -0
- cv2/gapi/core/__init__.pyi +0 -0
- cv2/gapi/core/cpu/__init__.pyi +8 -0
- cv2/gapi/core/fluid/__init__.pyi +8 -0
- cv2/gapi/core/ocl/__init__.pyi +8 -0
- cv2/gapi/ie/__init__.pyi +46 -0
- cv2/gapi/ie/detail/__init__.pyi +10 -0
- cv2/gapi/imgproc/__init__.pyi +0 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +8 -0
- cv2/gapi/oak/__init__.pyi +35 -0
- cv2/gapi/onnx/__init__.pyi +32 -0
- cv2/gapi/ov/__init__.pyi +72 -0
- cv2/gapi/own/__init__.pyi +0 -0
- cv2/gapi/own/detail/__init__.pyi +8 -0
- cv2/gapi/render/__init__.pyi +0 -0
- cv2/gapi/render/ocv/__init__.pyi +8 -0
- cv2/gapi/streaming/__init__.pyi +40 -0
- cv2/gapi/video/__init__.pyi +8 -0
- cv2/gapi/wip/__init__.pyi +34 -0
- cv2/gapi/wip/draw/__init__.pyi +117 -0
- cv2/gapi/wip/gst/__init__.pyi +18 -0
- cv2/gapi/wip/onevpl/__init__.pyi +14 -0
- cv2/ipp/__init__.pyi +15 -0
cv2/fisheye/__init__.pyi
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import cv2.typing
|
3 |
+
import typing
|
4 |
+
|
5 |
+
|
6 |
+
# Enumerations
|
7 |
+
CALIB_USE_INTRINSIC_GUESS: int
|
8 |
+
CALIB_RECOMPUTE_EXTRINSIC: int
|
9 |
+
CALIB_CHECK_COND: int
|
10 |
+
CALIB_FIX_SKEW: int
|
11 |
+
CALIB_FIX_K1: int
|
12 |
+
CALIB_FIX_K2: int
|
13 |
+
CALIB_FIX_K3: int
|
14 |
+
CALIB_FIX_K4: int
|
15 |
+
CALIB_FIX_INTRINSIC: int
|
16 |
+
CALIB_FIX_PRINCIPAL_POINT: int
|
17 |
+
CALIB_ZERO_DISPARITY: int
|
18 |
+
CALIB_FIX_FOCAL_LENGTH: int
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
# Functions
|
23 |
+
@typing.overload
|
24 |
+
def calibrate(objectPoints: typing.Sequence[cv2.typing.MatLike], imagePoints: typing.Sequence[cv2.typing.MatLike], image_size: cv2.typing.Size, K: cv2.typing.MatLike, D: cv2.typing.MatLike, rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, typing.Sequence[cv2.typing.MatLike], typing.Sequence[cv2.typing.MatLike]]: ...
|
25 |
+
@typing.overload
|
26 |
+
def calibrate(objectPoints: typing.Sequence[cv2.UMat], imagePoints: typing.Sequence[cv2.UMat], image_size: cv2.typing.Size, K: cv2.UMat, D: cv2.UMat, rvecs: typing.Sequence[cv2.UMat] | None = ..., tvecs: typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, typing.Sequence[cv2.UMat], typing.Sequence[cv2.UMat]]: ...
|
27 |
+
|
28 |
+
@typing.overload
|
29 |
+
def distortPoints(undistorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, distorted: cv2.typing.MatLike | None = ..., alpha: float = ...) -> cv2.typing.MatLike: ...
|
30 |
+
@typing.overload
|
31 |
+
def distortPoints(undistorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, distorted: cv2.UMat | None = ..., alpha: float = ...) -> cv2.UMat: ...
|
32 |
+
|
33 |
+
@typing.overload
|
34 |
+
def estimateNewCameraMatrixForUndistortRectify(K: cv2.typing.MatLike, D: cv2.typing.MatLike, image_size: cv2.typing.Size, R: cv2.typing.MatLike, P: cv2.typing.MatLike | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.typing.MatLike: ...
|
35 |
+
@typing.overload
|
36 |
+
def estimateNewCameraMatrixForUndistortRectify(K: cv2.UMat, D: cv2.UMat, image_size: cv2.typing.Size, R: cv2.UMat, P: cv2.UMat | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.UMat: ...
|
37 |
+
|
38 |
+
@typing.overload
|
39 |
+
def initUndistortRectifyMap(K: cv2.typing.MatLike, D: cv2.typing.MatLike, R: cv2.typing.MatLike, P: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
40 |
+
@typing.overload
|
41 |
+
def initUndistortRectifyMap(K: cv2.UMat, D: cv2.UMat, R: cv2.UMat, P: cv2.UMat, size: cv2.typing.Size, m1type: int, map1: cv2.UMat | None = ..., map2: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
42 |
+
|
43 |
+
@typing.overload
|
44 |
+
def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., alpha: float = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
45 |
+
@typing.overload
|
46 |
+
def projectPoints(objectPoints: cv2.UMat, rvec: cv2.UMat, tvec: cv2.UMat, K: cv2.UMat, D: cv2.UMat, imagePoints: cv2.UMat | None = ..., alpha: float = ..., jacobian: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
47 |
+
|
48 |
+
@typing.overload
|
49 |
+
def stereoCalibrate(objectPoints: typing.Sequence[cv2.typing.MatLike], imagePoints1: typing.Sequence[cv2.typing.MatLike], imagePoints2: typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, typing.Sequence[cv2.typing.MatLike], typing.Sequence[cv2.typing.MatLike]]: ...
|
50 |
+
@typing.overload
|
51 |
+
def stereoCalibrate(objectPoints: typing.Sequence[cv2.UMat], imagePoints1: typing.Sequence[cv2.UMat], imagePoints2: typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., rvecs: typing.Sequence[cv2.UMat] | None = ..., tvecs: typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, typing.Sequence[cv2.UMat], typing.Sequence[cv2.UMat]]: ...
|
52 |
+
@typing.overload
|
53 |
+
def stereoCalibrate(objectPoints: typing.Sequence[cv2.typing.MatLike], imagePoints1: typing.Sequence[cv2.typing.MatLike], imagePoints2: typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
54 |
+
@typing.overload
|
55 |
+
def stereoCalibrate(objectPoints: typing.Sequence[cv2.UMat], imagePoints1: typing.Sequence[cv2.UMat], imagePoints2: typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
56 |
+
|
57 |
+
@typing.overload
|
58 |
+
def stereoRectify(K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, tvec: cv2.typing.MatLike, flags: int, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
59 |
+
@typing.overload
|
60 |
+
def stereoRectify(K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat, tvec: cv2.UMat, flags: int, R1: cv2.UMat | None = ..., R2: cv2.UMat | None = ..., P1: cv2.UMat | None = ..., P2: cv2.UMat | None = ..., Q: cv2.UMat | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
61 |
+
|
62 |
+
@typing.overload
|
63 |
+
def undistortImage(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., Knew: cv2.typing.MatLike | None = ..., new_size: cv2.typing.Size = ...) -> cv2.typing.MatLike: ...
|
64 |
+
@typing.overload
|
65 |
+
def undistortImage(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., Knew: cv2.UMat | None = ..., new_size: cv2.typing.Size = ...) -> cv2.UMat: ...
|
66 |
+
|
67 |
+
@typing.overload
|
68 |
+
def undistortPoints(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., P: cv2.typing.MatLike | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ...
|
69 |
+
@typing.overload
|
70 |
+
def undistortPoints(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., R: cv2.UMat | None = ..., P: cv2.UMat | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.UMat: ...
|
71 |
+
|
72 |
+
|
cv2/flann/__init__.pyi
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import cv2.typing
|
3 |
+
import typing
|
4 |
+
|
5 |
+
|
6 |
+
# Enumerations
|
7 |
+
FLANN_INDEX_TYPE_8U: int
|
8 |
+
FLANN_INDEX_TYPE_8S: int
|
9 |
+
FLANN_INDEX_TYPE_16U: int
|
10 |
+
FLANN_INDEX_TYPE_16S: int
|
11 |
+
FLANN_INDEX_TYPE_32S: int
|
12 |
+
FLANN_INDEX_TYPE_32F: int
|
13 |
+
FLANN_INDEX_TYPE_64F: int
|
14 |
+
FLANN_INDEX_TYPE_STRING: int
|
15 |
+
FLANN_INDEX_TYPE_BOOL: int
|
16 |
+
FLANN_INDEX_TYPE_ALGORITHM: int
|
17 |
+
LAST_VALUE_FLANN_INDEX_TYPE: int
|
18 |
+
FlannIndexType = int
|
19 |
+
"""One of [FLANN_INDEX_TYPE_8U, FLANN_INDEX_TYPE_8S, FLANN_INDEX_TYPE_16U, FLANN_INDEX_TYPE_16S, FLANN_INDEX_TYPE_32S, FLANN_INDEX_TYPE_32F, FLANN_INDEX_TYPE_64F, FLANN_INDEX_TYPE_STRING, FLANN_INDEX_TYPE_BOOL, FLANN_INDEX_TYPE_ALGORITHM, LAST_VALUE_FLANN_INDEX_TYPE]"""
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
# Classes
|
24 |
+
class Index:
|
25 |
+
# Functions
|
26 |
+
@typing.overload
|
27 |
+
def __init__(self) -> None: ...
|
28 |
+
@typing.overload
|
29 |
+
def __init__(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
30 |
+
@typing.overload
|
31 |
+
def __init__(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
32 |
+
|
33 |
+
@typing.overload
|
34 |
+
def build(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
35 |
+
@typing.overload
|
36 |
+
def build(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
37 |
+
|
38 |
+
@typing.overload
|
39 |
+
def knnSearch(self, query: cv2.typing.MatLike, knn: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
40 |
+
@typing.overload
|
41 |
+
def knnSearch(self, query: cv2.UMat, knn: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
42 |
+
|
43 |
+
@typing.overload
|
44 |
+
def radiusSearch(self, query: cv2.typing.MatLike, radius: float, maxResults: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
45 |
+
@typing.overload
|
46 |
+
def radiusSearch(self, query: cv2.UMat, radius: float, maxResults: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
|
47 |
+
|
48 |
+
def save(self, filename: str) -> None: ...
|
49 |
+
|
50 |
+
@typing.overload
|
51 |
+
def load(self, features: cv2.typing.MatLike, filename: str) -> bool: ...
|
52 |
+
@typing.overload
|
53 |
+
def load(self, features: cv2.UMat, filename: str) -> bool: ...
|
54 |
+
|
55 |
+
def release(self) -> None: ...
|
56 |
+
|
57 |
+
def getDistance(self) -> int: ...
|
58 |
+
|
59 |
+
def getAlgorithm(self) -> int: ...
|
60 |
+
|
61 |
+
|
62 |
+
|
cv2/gapi/__init__.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__all__ = ['op', 'kernel']
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import cv2 as cv
|
5 |
+
|
6 |
+
# NB: Register function in specific module
|
7 |
+
def register(mname):
|
8 |
+
def parameterized(func):
|
9 |
+
sys.modules[mname].__dict__[func.__name__] = func
|
10 |
+
return func
|
11 |
+
return parameterized
|
12 |
+
|
13 |
+
|
14 |
+
@register('cv2.gapi')
|
15 |
+
def networks(*args):
|
16 |
+
return cv.gapi_GNetPackage(list(map(cv.detail.strip, args)))
|
17 |
+
|
18 |
+
|
19 |
+
@register('cv2.gapi')
|
20 |
+
def compile_args(*args):
|
21 |
+
return list(map(cv.GCompileArg, args))
|
22 |
+
|
23 |
+
|
24 |
+
@register('cv2')
|
25 |
+
def GIn(*args):
|
26 |
+
return [*args]
|
27 |
+
|
28 |
+
|
29 |
+
@register('cv2')
|
30 |
+
def GOut(*args):
|
31 |
+
return [*args]
|
32 |
+
|
33 |
+
|
34 |
+
@register('cv2')
|
35 |
+
def gin(*args):
|
36 |
+
return [*args]
|
37 |
+
|
38 |
+
|
39 |
+
@register('cv2.gapi')
|
40 |
+
def descr_of(*args):
|
41 |
+
return [*args]
|
42 |
+
|
43 |
+
|
44 |
+
@register('cv2')
|
45 |
+
class GOpaque():
|
46 |
+
# NB: Inheritance from c++ class cause segfault.
|
47 |
+
# So just aggregate cv.GOpaqueT instead of inheritance
|
48 |
+
def __new__(cls, argtype):
|
49 |
+
return cv.GOpaqueT(argtype)
|
50 |
+
|
51 |
+
class Bool():
|
52 |
+
def __new__(self):
|
53 |
+
return cv.GOpaqueT(cv.gapi.CV_BOOL)
|
54 |
+
|
55 |
+
class Int():
|
56 |
+
def __new__(self):
|
57 |
+
return cv.GOpaqueT(cv.gapi.CV_INT)
|
58 |
+
|
59 |
+
class Double():
|
60 |
+
def __new__(self):
|
61 |
+
return cv.GOpaqueT(cv.gapi.CV_DOUBLE)
|
62 |
+
|
63 |
+
class Float():
|
64 |
+
def __new__(self):
|
65 |
+
return cv.GOpaqueT(cv.gapi.CV_FLOAT)
|
66 |
+
|
67 |
+
class String():
|
68 |
+
def __new__(self):
|
69 |
+
return cv.GOpaqueT(cv.gapi.CV_STRING)
|
70 |
+
|
71 |
+
class Point():
|
72 |
+
def __new__(self):
|
73 |
+
return cv.GOpaqueT(cv.gapi.CV_POINT)
|
74 |
+
|
75 |
+
class Point2f():
|
76 |
+
def __new__(self):
|
77 |
+
return cv.GOpaqueT(cv.gapi.CV_POINT2F)
|
78 |
+
|
79 |
+
class Point3f():
|
80 |
+
def __new__(self):
|
81 |
+
return cv.GOpaqueT(cv.gapi.CV_POINT3F)
|
82 |
+
|
83 |
+
class Size():
|
84 |
+
def __new__(self):
|
85 |
+
return cv.GOpaqueT(cv.gapi.CV_SIZE)
|
86 |
+
|
87 |
+
class Rect():
|
88 |
+
def __new__(self):
|
89 |
+
return cv.GOpaqueT(cv.gapi.CV_RECT)
|
90 |
+
|
91 |
+
class Prim():
|
92 |
+
def __new__(self):
|
93 |
+
return cv.GOpaqueT(cv.gapi.CV_DRAW_PRIM)
|
94 |
+
|
95 |
+
class Any():
|
96 |
+
def __new__(self):
|
97 |
+
return cv.GOpaqueT(cv.gapi.CV_ANY)
|
98 |
+
|
99 |
+
@register('cv2')
|
100 |
+
class GArray():
|
101 |
+
# NB: Inheritance from c++ class cause segfault.
|
102 |
+
# So just aggregate cv.GArrayT instead of inheritance
|
103 |
+
def __new__(cls, argtype):
|
104 |
+
return cv.GArrayT(argtype)
|
105 |
+
|
106 |
+
class Bool():
|
107 |
+
def __new__(self):
|
108 |
+
return cv.GArrayT(cv.gapi.CV_BOOL)
|
109 |
+
|
110 |
+
class Int():
|
111 |
+
def __new__(self):
|
112 |
+
return cv.GArrayT(cv.gapi.CV_INT)
|
113 |
+
|
114 |
+
class Double():
|
115 |
+
def __new__(self):
|
116 |
+
return cv.GArrayT(cv.gapi.CV_DOUBLE)
|
117 |
+
|
118 |
+
class Float():
|
119 |
+
def __new__(self):
|
120 |
+
return cv.GArrayT(cv.gapi.CV_FLOAT)
|
121 |
+
|
122 |
+
class String():
|
123 |
+
def __new__(self):
|
124 |
+
return cv.GArrayT(cv.gapi.CV_STRING)
|
125 |
+
|
126 |
+
class Point():
|
127 |
+
def __new__(self):
|
128 |
+
return cv.GArrayT(cv.gapi.CV_POINT)
|
129 |
+
|
130 |
+
class Point2f():
|
131 |
+
def __new__(self):
|
132 |
+
return cv.GArrayT(cv.gapi.CV_POINT2F)
|
133 |
+
|
134 |
+
class Point3f():
|
135 |
+
def __new__(self):
|
136 |
+
return cv.GArrayT(cv.gapi.CV_POINT3F)
|
137 |
+
|
138 |
+
class Size():
|
139 |
+
def __new__(self):
|
140 |
+
return cv.GArrayT(cv.gapi.CV_SIZE)
|
141 |
+
|
142 |
+
class Rect():
|
143 |
+
def __new__(self):
|
144 |
+
return cv.GArrayT(cv.gapi.CV_RECT)
|
145 |
+
|
146 |
+
class Scalar():
|
147 |
+
def __new__(self):
|
148 |
+
return cv.GArrayT(cv.gapi.CV_SCALAR)
|
149 |
+
|
150 |
+
class Mat():
|
151 |
+
def __new__(self):
|
152 |
+
return cv.GArrayT(cv.gapi.CV_MAT)
|
153 |
+
|
154 |
+
class GMat():
|
155 |
+
def __new__(self):
|
156 |
+
return cv.GArrayT(cv.gapi.CV_GMAT)
|
157 |
+
|
158 |
+
class Prim():
|
159 |
+
def __new__(self):
|
160 |
+
return cv.GArray(cv.gapi.CV_DRAW_PRIM)
|
161 |
+
|
162 |
+
class Any():
|
163 |
+
def __new__(self):
|
164 |
+
return cv.GArray(cv.gapi.CV_ANY)
|
165 |
+
|
166 |
+
|
167 |
+
# NB: Top lvl decorator takes arguments
|
168 |
+
def op(op_id, in_types, out_types):
|
169 |
+
|
170 |
+
garray_types= {
|
171 |
+
cv.GArray.Bool: cv.gapi.CV_BOOL,
|
172 |
+
cv.GArray.Int: cv.gapi.CV_INT,
|
173 |
+
cv.GArray.Double: cv.gapi.CV_DOUBLE,
|
174 |
+
cv.GArray.Float: cv.gapi.CV_FLOAT,
|
175 |
+
cv.GArray.String: cv.gapi.CV_STRING,
|
176 |
+
cv.GArray.Point: cv.gapi.CV_POINT,
|
177 |
+
cv.GArray.Point2f: cv.gapi.CV_POINT2F,
|
178 |
+
cv.GArray.Point3f: cv.gapi.CV_POINT3F,
|
179 |
+
cv.GArray.Size: cv.gapi.CV_SIZE,
|
180 |
+
cv.GArray.Rect: cv.gapi.CV_RECT,
|
181 |
+
cv.GArray.Scalar: cv.gapi.CV_SCALAR,
|
182 |
+
cv.GArray.Mat: cv.gapi.CV_MAT,
|
183 |
+
cv.GArray.GMat: cv.gapi.CV_GMAT,
|
184 |
+
cv.GArray.Prim: cv.gapi.CV_DRAW_PRIM,
|
185 |
+
cv.GArray.Any: cv.gapi.CV_ANY
|
186 |
+
}
|
187 |
+
|
188 |
+
gopaque_types= {
|
189 |
+
cv.GOpaque.Size: cv.gapi.CV_SIZE,
|
190 |
+
cv.GOpaque.Rect: cv.gapi.CV_RECT,
|
191 |
+
cv.GOpaque.Bool: cv.gapi.CV_BOOL,
|
192 |
+
cv.GOpaque.Int: cv.gapi.CV_INT,
|
193 |
+
cv.GOpaque.Double: cv.gapi.CV_DOUBLE,
|
194 |
+
cv.GOpaque.Float: cv.gapi.CV_FLOAT,
|
195 |
+
cv.GOpaque.String: cv.gapi.CV_STRING,
|
196 |
+
cv.GOpaque.Point: cv.gapi.CV_POINT,
|
197 |
+
cv.GOpaque.Point2f: cv.gapi.CV_POINT2F,
|
198 |
+
cv.GOpaque.Point3f: cv.gapi.CV_POINT3F,
|
199 |
+
cv.GOpaque.Size: cv.gapi.CV_SIZE,
|
200 |
+
cv.GOpaque.Rect: cv.gapi.CV_RECT,
|
201 |
+
cv.GOpaque.Prim: cv.gapi.CV_DRAW_PRIM,
|
202 |
+
cv.GOpaque.Any: cv.gapi.CV_ANY
|
203 |
+
}
|
204 |
+
|
205 |
+
type2str = {
|
206 |
+
cv.gapi.CV_BOOL: 'cv.gapi.CV_BOOL' ,
|
207 |
+
cv.gapi.CV_INT: 'cv.gapi.CV_INT' ,
|
208 |
+
cv.gapi.CV_DOUBLE: 'cv.gapi.CV_DOUBLE' ,
|
209 |
+
cv.gapi.CV_FLOAT: 'cv.gapi.CV_FLOAT' ,
|
210 |
+
cv.gapi.CV_STRING: 'cv.gapi.CV_STRING' ,
|
211 |
+
cv.gapi.CV_POINT: 'cv.gapi.CV_POINT' ,
|
212 |
+
cv.gapi.CV_POINT2F: 'cv.gapi.CV_POINT2F' ,
|
213 |
+
cv.gapi.CV_POINT3F: 'cv.gapi.CV_POINT3F' ,
|
214 |
+
cv.gapi.CV_SIZE: 'cv.gapi.CV_SIZE',
|
215 |
+
cv.gapi.CV_RECT: 'cv.gapi.CV_RECT',
|
216 |
+
cv.gapi.CV_SCALAR: 'cv.gapi.CV_SCALAR',
|
217 |
+
cv.gapi.CV_MAT: 'cv.gapi.CV_MAT',
|
218 |
+
cv.gapi.CV_GMAT: 'cv.gapi.CV_GMAT',
|
219 |
+
cv.gapi.CV_DRAW_PRIM: 'cv.gapi.CV_DRAW_PRIM'
|
220 |
+
}
|
221 |
+
|
222 |
+
# NB: Second lvl decorator takes class to decorate
|
223 |
+
def op_with_params(cls):
|
224 |
+
if not in_types:
|
225 |
+
raise Exception('{} operation should have at least one input!'.format(cls.__name__))
|
226 |
+
|
227 |
+
if not out_types:
|
228 |
+
raise Exception('{} operation should have at least one output!'.format(cls.__name__))
|
229 |
+
|
230 |
+
for i, t in enumerate(out_types):
|
231 |
+
if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]:
|
232 |
+
raise Exception('{} unsupported output type: {} in position: {}'
|
233 |
+
.format(cls.__name__, t.__name__, i))
|
234 |
+
|
235 |
+
def on(*args):
|
236 |
+
if len(in_types) != len(args):
|
237 |
+
raise Exception('Invalid number of input elements!\nExpected: {}, Actual: {}'
|
238 |
+
.format(len(in_types), len(args)))
|
239 |
+
|
240 |
+
for i, (t, a) in enumerate(zip(in_types, args)):
|
241 |
+
if t in garray_types:
|
242 |
+
if not isinstance(a, cv.GArrayT):
|
243 |
+
raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
|
244 |
+
.format(cls.__name__, i, cv.GArrayT.__name__, type(a).__name__))
|
245 |
+
|
246 |
+
elif a.type() != garray_types[t]:
|
247 |
+
raise Exception("{} invalid GArrayT type for argument {}.\nExpected: {}, Actual: {}"
|
248 |
+
.format(cls.__name__, i, type2str[garray_types[t]], type2str[a.type()]))
|
249 |
+
|
250 |
+
elif t in gopaque_types:
|
251 |
+
if not isinstance(a, cv.GOpaqueT):
|
252 |
+
raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
|
253 |
+
.format(cls.__name__, i, cv.GOpaqueT.__name__, type(a).__name__))
|
254 |
+
|
255 |
+
elif a.type() != gopaque_types[t]:
|
256 |
+
raise Exception("{} invalid GOpaque type for argument {}.\nExpected: {}, Actual: {}"
|
257 |
+
.format(cls.__name__, i, type2str[gopaque_types[t]], type2str[a.type()]))
|
258 |
+
|
259 |
+
else:
|
260 |
+
if t != type(a):
|
261 |
+
raise Exception('{} invalid input type for argument {}.\nExpected: {}, Actual: {}'
|
262 |
+
.format(cls.__name__, i, t.__name__, type(a).__name__))
|
263 |
+
|
264 |
+
op = cv.gapi.__op(op_id, cls.outMeta, *args)
|
265 |
+
|
266 |
+
out_protos = []
|
267 |
+
for i, out_type in enumerate(out_types):
|
268 |
+
if out_type == cv.GMat:
|
269 |
+
out_protos.append(op.getGMat())
|
270 |
+
elif out_type == cv.GScalar:
|
271 |
+
out_protos.append(op.getGScalar())
|
272 |
+
elif out_type in gopaque_types:
|
273 |
+
out_protos.append(op.getGOpaque(gopaque_types[out_type]))
|
274 |
+
elif out_type in garray_types:
|
275 |
+
out_protos.append(op.getGArray(garray_types[out_type]))
|
276 |
+
else:
|
277 |
+
raise Exception("""In {}: G-API operation can't produce the output with type: {} in position: {}"""
|
278 |
+
.format(cls.__name__, out_type.__name__, i))
|
279 |
+
|
280 |
+
return tuple(out_protos) if len(out_protos) != 1 else out_protos[0]
|
281 |
+
|
282 |
+
# NB: Extend operation class
|
283 |
+
cls.id = op_id
|
284 |
+
cls.on = staticmethod(on)
|
285 |
+
return cls
|
286 |
+
|
287 |
+
return op_with_params
|
288 |
+
|
289 |
+
|
290 |
+
def kernel(op_cls):
|
291 |
+
# NB: Second lvl decorator takes class to decorate
|
292 |
+
def kernel_with_params(cls):
|
293 |
+
# NB: Add new members to kernel class
|
294 |
+
cls.id = op_cls.id
|
295 |
+
cls.outMeta = op_cls.outMeta
|
296 |
+
return cls
|
297 |
+
|
298 |
+
return kernel_with_params
|
299 |
+
|
300 |
+
|
301 |
+
cv.gapi.wip.GStreamerPipeline = cv.gapi_wip_gst_GStreamerPipeline
|
cv2/gapi/__init__.pyi
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import cv2.typing
|
3 |
+
import typing
|
4 |
+
|
5 |
+
|
6 |
+
# Enumerations
|
7 |
+
StereoOutputFormat_DEPTH_FLOAT16: int
|
8 |
+
STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16: int
|
9 |
+
StereoOutputFormat_DEPTH_FLOAT32: int
|
10 |
+
STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32: int
|
11 |
+
StereoOutputFormat_DISPARITY_FIXED16_11_5: int
|
12 |
+
STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5: int
|
13 |
+
StereoOutputFormat_DISPARITY_FIXED16_12_4: int
|
14 |
+
STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4: int
|
15 |
+
StereoOutputFormat_DEPTH_16F: int
|
16 |
+
STEREO_OUTPUT_FORMAT_DEPTH_16F: int
|
17 |
+
StereoOutputFormat_DEPTH_32F: int
|
18 |
+
STEREO_OUTPUT_FORMAT_DEPTH_32F: int
|
19 |
+
StereoOutputFormat_DISPARITY_16Q_10_5: int
|
20 |
+
STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5: int
|
21 |
+
StereoOutputFormat_DISPARITY_16Q_11_4: int
|
22 |
+
STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4: int
|
23 |
+
StereoOutputFormat = int
|
24 |
+
"""One of [StereoOutputFormat_DEPTH_FLOAT16, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16, StereoOutputFormat_DEPTH_FLOAT32, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32, StereoOutputFormat_DISPARITY_FIXED16_11_5, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5, StereoOutputFormat_DISPARITY_FIXED16_12_4, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4, StereoOutputFormat_DEPTH_16F, STEREO_OUTPUT_FORMAT_DEPTH_16F, StereoOutputFormat_DEPTH_32F, STEREO_OUTPUT_FORMAT_DEPTH_32F, StereoOutputFormat_DISPARITY_16Q_10_5, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5, StereoOutputFormat_DISPARITY_16Q_11_4, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4]"""
|
25 |
+
|
26 |
+
CV_BOOL: int
|
27 |
+
CV_INT: int
|
28 |
+
CV_INT64: int
|
29 |
+
CV_DOUBLE: int
|
30 |
+
CV_FLOAT: int
|
31 |
+
CV_STRING: int
|
32 |
+
CV_POINT: int
|
33 |
+
CV_POINT2F: int
|
34 |
+
CV_POINT3F: int
|
35 |
+
CV_SIZE: int
|
36 |
+
CV_RECT: int
|
37 |
+
CV_SCALAR: int
|
38 |
+
CV_MAT: int
|
39 |
+
CV_GMAT: int
|
40 |
+
CV_DRAW_PRIM: int
|
41 |
+
CV_ANY: int
|
42 |
+
ArgType = int
|
43 |
+
"""One of [CV_BOOL, CV_INT, CV_INT64, CV_DOUBLE, CV_FLOAT, CV_STRING, CV_POINT, CV_POINT2F, CV_POINT3F, CV_SIZE, CV_RECT, CV_SCALAR, CV_MAT, CV_GMAT, CV_DRAW_PRIM, CV_ANY]"""
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
# Classes
|
48 |
+
class GNetParam:
|
49 |
+
...
|
50 |
+
|
51 |
+
class GNetPackage:
|
52 |
+
# Functions
|
53 |
+
@typing.overload
|
54 |
+
def __init__(self) -> None: ...
|
55 |
+
@typing.overload
|
56 |
+
def __init__(self, nets: typing.Sequence[GNetParam]) -> None: ...
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
# Functions
|
61 |
+
def BGR2Gray(src: cv2.GMat) -> cv2.GMat: ...
|
62 |
+
|
63 |
+
def BGR2I420(src: cv2.GMat) -> cv2.GMat: ...
|
64 |
+
|
65 |
+
def BGR2LUV(src: cv2.GMat) -> cv2.GMat: ...
|
66 |
+
|
67 |
+
def BGR2RGB(src: cv2.GMat) -> cv2.GMat: ...
|
68 |
+
|
69 |
+
def BGR2YUV(src: cv2.GMat) -> cv2.GMat: ...
|
70 |
+
|
71 |
+
def BayerGR2RGB(src_gr: cv2.GMat) -> cv2.GMat: ...
|
72 |
+
|
73 |
+
def Canny(image: cv2.GMat, threshold1: float, threshold2: float, apertureSize: int = ..., L2gradient: bool = ...) -> cv2.GMat: ...
|
74 |
+
|
75 |
+
def I4202BGR(src: cv2.GMat) -> cv2.GMat: ...
|
76 |
+
|
77 |
+
def I4202RGB(src: cv2.GMat) -> cv2.GMat: ...
|
78 |
+
|
79 |
+
def LUT(src: cv2.GMat, lut: cv2.typing.MatLike) -> cv2.GMat: ...
|
80 |
+
|
81 |
+
def LUV2BGR(src: cv2.GMat) -> cv2.GMat: ...
|
82 |
+
|
83 |
+
def Laplacian(src: cv2.GMat, ddepth: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.GMat: ...
|
84 |
+
|
85 |
+
def NV12toBGR(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
|
86 |
+
|
87 |
+
def NV12toGray(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
|
88 |
+
|
89 |
+
def NV12toRGB(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
|
90 |
+
|
91 |
+
@typing.overload
|
92 |
+
def RGB2Gray(src: cv2.GMat) -> cv2.GMat: ...
|
93 |
+
@typing.overload
|
94 |
+
def RGB2Gray(src: cv2.GMat, rY: float, gY: float, bY: float) -> cv2.GMat: ...
|
95 |
+
|
96 |
+
def RGB2HSV(src: cv2.GMat) -> cv2.GMat: ...
|
97 |
+
|
98 |
+
def RGB2I420(src: cv2.GMat) -> cv2.GMat: ...
|
99 |
+
|
100 |
+
def RGB2Lab(src: cv2.GMat) -> cv2.GMat: ...
|
101 |
+
|
102 |
+
def RGB2YUV(src: cv2.GMat) -> cv2.GMat: ...
|
103 |
+
|
104 |
+
def RGB2YUV422(src: cv2.GMat) -> cv2.GMat: ...
|
105 |
+
|
106 |
+
def Sobel(src: cv2.GMat, ddepth: int, dx: int, dy: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
107 |
+
|
108 |
+
def SobelXY(src: cv2.GMat, ddepth: int, order: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
|
109 |
+
|
110 |
+
def YUV2BGR(src: cv2.GMat) -> cv2.GMat: ...
|
111 |
+
|
112 |
+
def YUV2RGB(src: cv2.GMat) -> cv2.GMat: ...
|
113 |
+
|
114 |
+
def absDiff(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
115 |
+
|
116 |
+
def absDiffC(src: cv2.GMat, c: cv2.GScalar) -> cv2.GMat: ...
|
117 |
+
|
118 |
+
def add(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
|
119 |
+
|
120 |
+
@typing.overload
|
121 |
+
def addC(src1: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
|
122 |
+
@typing.overload
|
123 |
+
def addC(c: cv2.GScalar, src1: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
|
124 |
+
|
125 |
+
def addWeighted(src1: cv2.GMat, alpha: float, src2: cv2.GMat, beta: float, gamma: float, ddepth: int = ...) -> cv2.GMat: ...
|
126 |
+
|
127 |
+
def bilateralFilter(src: cv2.GMat, d: int, sigmaColor: float, sigmaSpace: float, borderType: int = ...) -> cv2.GMat: ...
|
128 |
+
|
129 |
+
@typing.overload
|
130 |
+
def bitwise_and(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
131 |
+
@typing.overload
|
132 |
+
def bitwise_and(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
133 |
+
|
134 |
+
def bitwise_not(src: cv2.GMat) -> cv2.GMat: ...
|
135 |
+
|
136 |
+
@typing.overload
|
137 |
+
def bitwise_or(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
138 |
+
@typing.overload
|
139 |
+
def bitwise_or(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
140 |
+
|
141 |
+
@typing.overload
|
142 |
+
def bitwise_xor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
143 |
+
@typing.overload
|
144 |
+
def bitwise_xor(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
145 |
+
|
146 |
+
def blur(src: cv2.GMat, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
147 |
+
|
148 |
+
@typing.overload
|
149 |
+
def boundingRect(src: cv2.GMat) -> cv2.GOpaqueT: ...
|
150 |
+
@typing.overload
|
151 |
+
def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ...
|
152 |
+
@typing.overload
|
153 |
+
def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ...
|
154 |
+
|
155 |
+
def boxFilter(src: cv2.GMat, dtype: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
156 |
+
|
157 |
+
def cartToPolar(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
|
158 |
+
|
159 |
+
@typing.overload
|
160 |
+
def cmpEQ(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
161 |
+
@typing.overload
|
162 |
+
def cmpEQ(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
163 |
+
|
164 |
+
@typing.overload
|
165 |
+
def cmpGE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
166 |
+
@typing.overload
|
167 |
+
def cmpGE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
168 |
+
|
169 |
+
@typing.overload
|
170 |
+
def cmpGT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
171 |
+
@typing.overload
|
172 |
+
def cmpGT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
173 |
+
|
174 |
+
@typing.overload
|
175 |
+
def cmpLE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
176 |
+
@typing.overload
|
177 |
+
def cmpLE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
178 |
+
|
179 |
+
@typing.overload
|
180 |
+
def cmpLT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
181 |
+
@typing.overload
|
182 |
+
def cmpLT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
183 |
+
|
184 |
+
@typing.overload
|
185 |
+
def cmpNE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
186 |
+
@typing.overload
|
187 |
+
def cmpNE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
|
188 |
+
|
189 |
+
def combine(lhs: cv2.GKernelPackage, rhs: cv2.GKernelPackage) -> cv2.GKernelPackage: ...
|
190 |
+
|
191 |
+
@typing.overload
|
192 |
+
def concatHor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
193 |
+
@typing.overload
|
194 |
+
def concatHor(v: typing.Sequence[cv2.GMat]) -> cv2.GMat: ...
|
195 |
+
|
196 |
+
@typing.overload
|
197 |
+
def concatVert(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
198 |
+
@typing.overload
|
199 |
+
def concatVert(v: typing.Sequence[cv2.GMat]) -> cv2.GMat: ...
|
200 |
+
|
201 |
+
def convertTo(src: cv2.GMat, rdepth: int, alpha: float = ..., beta: float = ...) -> cv2.GMat: ...
|
202 |
+
|
203 |
+
def copy(in_: cv2.GMat) -> cv2.GMat: ...
|
204 |
+
|
205 |
+
def countNonZero(src: cv2.GMat) -> cv2.GOpaqueT: ...
|
206 |
+
|
207 |
+
def crop(src: cv2.GMat, rect: cv2.typing.Rect) -> cv2.GMat: ...
|
208 |
+
|
209 |
+
def dilate(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
210 |
+
|
211 |
+
def dilate3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
212 |
+
|
213 |
+
def div(src1: cv2.GMat, src2: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ...
|
214 |
+
|
215 |
+
def divC(src: cv2.GMat, divisor: cv2.GScalar, scale: float, ddepth: int = ...) -> cv2.GMat: ...
|
216 |
+
|
217 |
+
def divRC(divident: cv2.GScalar, src: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ...
|
218 |
+
|
219 |
+
def equalizeHist(src: cv2.GMat) -> cv2.GMat: ...
|
220 |
+
|
221 |
+
def erode(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
222 |
+
|
223 |
+
def erode3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
224 |
+
|
225 |
+
def filter2D(src: cv2.GMat, ddepth: int, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., delta: cv2.typing.Scalar = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
226 |
+
|
227 |
+
def flip(src: cv2.GMat, flipCode: int) -> cv2.GMat: ...
|
228 |
+
|
229 |
+
def gaussianBlur(src: cv2.GMat, ksize: cv2.typing.Size, sigmaX: float, sigmaY: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
230 |
+
|
231 |
+
def goodFeaturesToTrack(image: cv2.GMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.GArrayT: ...
|
232 |
+
|
233 |
+
def inRange(src: cv2.GMat, threshLow: cv2.GScalar, threshUp: cv2.GScalar) -> cv2.GMat: ...
|
234 |
+
|
235 |
+
@typing.overload
|
236 |
+
def infer(name: str, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ...
|
237 |
+
@typing.overload
|
238 |
+
def infer(name: str, roi: cv2.GOpaqueT, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ...
|
239 |
+
@typing.overload
|
240 |
+
def infer(name: str, rois: cv2.GArrayT, inputs: cv2.GInferInputs) -> cv2.GInferListOutputs: ...
|
241 |
+
|
242 |
+
def infer2(name: str, in_: cv2.GMat, inputs: cv2.GInferListInputs) -> cv2.GInferListOutputs: ...
|
243 |
+
|
244 |
+
def integral(src: cv2.GMat, sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
|
245 |
+
|
246 |
+
@typing.overload
|
247 |
+
def kmeans(data: cv2.GMat, K: int, bestLabels: cv2.GMat, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ...
|
248 |
+
@typing.overload
|
249 |
+
def kmeans(data: cv2.GMat, K: int, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ...
|
250 |
+
@typing.overload
|
251 |
+
def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ...
|
252 |
+
@typing.overload
|
253 |
+
def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ...
|
254 |
+
|
255 |
+
def mask(src: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ...
|
256 |
+
|
257 |
+
def max(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
258 |
+
|
259 |
+
def mean(src: cv2.GMat) -> cv2.GScalar: ...
|
260 |
+
|
261 |
+
def medianBlur(src: cv2.GMat, ksize: int) -> cv2.GMat: ...
|
262 |
+
|
263 |
+
def merge3(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat) -> cv2.GMat: ...
|
264 |
+
|
265 |
+
def merge4(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat, src4: cv2.GMat) -> cv2.GMat: ...
|
266 |
+
|
267 |
+
def min(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
|
268 |
+
|
269 |
+
def morphologyEx(src: cv2.GMat, op: cv2.MorphTypes, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: cv2.BorderTypes = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
270 |
+
|
271 |
+
def mul(src1: cv2.GMat, src2: cv2.GMat, scale: float = ..., ddepth: int = ...) -> cv2.GMat: ...
|
272 |
+
|
273 |
+
@typing.overload
|
274 |
+
def mulC(src: cv2.GMat, multiplier: float, ddepth: int = ...) -> cv2.GMat: ...
|
275 |
+
@typing.overload
|
276 |
+
def mulC(src: cv2.GMat, multiplier: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
|
277 |
+
@typing.overload
|
278 |
+
def mulC(multiplier: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
|
279 |
+
|
280 |
+
def normInf(src: cv2.GMat) -> cv2.GScalar: ...
|
281 |
+
|
282 |
+
def normL1(src: cv2.GMat) -> cv2.GScalar: ...
|
283 |
+
|
284 |
+
def normL2(src: cv2.GMat) -> cv2.GScalar: ...
|
285 |
+
|
286 |
+
def normalize(src: cv2.GMat, alpha: float, beta: float, norm_type: int, ddepth: int = ...) -> cv2.GMat: ...
|
287 |
+
|
288 |
+
@typing.overload
|
289 |
+
def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., filterLabel: int = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ...
|
290 |
+
@typing.overload
|
291 |
+
def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float, alignmentToSquare: bool, filterOutOfBounds: bool) -> cv2.GArrayT: ...
|
292 |
+
|
293 |
+
def parseYolo(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., nmsThreshold: float = ..., anchors: typing.Sequence[float] = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ...
|
294 |
+
|
295 |
+
def phase(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> cv2.GMat: ...
|
296 |
+
|
297 |
+
def polarToCart(magnitude: cv2.GMat, angle: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
|
298 |
+
|
299 |
+
def remap(src: cv2.GMat, map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, interpolation: int, borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
300 |
+
|
301 |
+
def resize(src: cv2.GMat, dsize: cv2.typing.Size, fx: float = ..., fy: float = ..., interpolation: int = ...) -> cv2.GMat: ...
|
302 |
+
|
303 |
+
def select(src1: cv2.GMat, src2: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ...
|
304 |
+
|
305 |
+
def sepFilter(src: cv2.GMat, ddepth: int, kernelX: cv2.typing.MatLike, kernelY: cv2.typing.MatLike, anchor: cv2.typing.Point, delta: cv2.typing.Scalar, borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
306 |
+
|
307 |
+
def split3(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat]: ...
|
308 |
+
|
309 |
+
def split4(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat, cv2.GMat]: ...
|
310 |
+
|
311 |
+
def sqrt(src: cv2.GMat) -> cv2.GMat: ...
|
312 |
+
|
313 |
+
def sub(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
|
314 |
+
|
315 |
+
def subC(src: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
|
316 |
+
|
317 |
+
def subRC(c: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
|
318 |
+
|
319 |
+
def sum(src: cv2.GMat) -> cv2.GScalar: ...
|
320 |
+
|
321 |
+
@typing.overload
|
322 |
+
def threshold(src: cv2.GMat, thresh: cv2.GScalar, maxval: cv2.GScalar, type: int) -> cv2.GMat: ...
|
323 |
+
@typing.overload
|
324 |
+
def threshold(src: cv2.GMat, maxval: cv2.GScalar, type: int) -> tuple[cv2.GMat, cv2.GScalar]: ...
|
325 |
+
|
326 |
+
def transpose(src: cv2.GMat) -> cv2.GMat: ...
|
327 |
+
|
328 |
+
def warpAffine(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
329 |
+
|
330 |
+
def warpPerspective(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
|
331 |
+
|
332 |
+
|
cv2/gapi/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (22.5 kB). View file
|
|
cv2/gapi/core/__init__.pyi
ADDED
File without changes
|
cv2/gapi/core/cpu/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Functions
|
6 |
+
def kernels() -> cv2.GKernelPackage: ...
|
7 |
+
|
8 |
+
|
cv2/gapi/core/fluid/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Functions
|
6 |
+
def kernels() -> cv2.GKernelPackage: ...
|
7 |
+
|
8 |
+
|
cv2/gapi/core/ocl/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Functions
|
6 |
+
def kernels() -> cv2.GKernelPackage: ...
|
7 |
+
|
8 |
+
|
cv2/gapi/ie/__init__.pyi
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2.typing
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Enumerations
|
6 |
+
TraitAs_TENSOR: int
|
7 |
+
TRAIT_AS_TENSOR: int
|
8 |
+
TraitAs_IMAGE: int
|
9 |
+
TRAIT_AS_IMAGE: int
|
10 |
+
TraitAs = int
|
11 |
+
"""One of [TraitAs_TENSOR, TRAIT_AS_TENSOR, TraitAs_IMAGE, TRAIT_AS_IMAGE]"""
|
12 |
+
|
13 |
+
Sync: int
|
14 |
+
SYNC: int
|
15 |
+
Async: int
|
16 |
+
ASYNC: int
|
17 |
+
InferMode = int
|
18 |
+
"""One of [Sync, SYNC, Async, ASYNC]"""
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
# Classes
|
23 |
+
class PyParams:
|
24 |
+
# Functions
|
25 |
+
@typing.overload
|
26 |
+
def __init__(self) -> None: ...
|
27 |
+
@typing.overload
|
28 |
+
def __init__(self, tag: str, model: str, weights: str, device: str) -> None: ...
|
29 |
+
@typing.overload
|
30 |
+
def __init__(self, tag: str, model: str, device: str) -> None: ...
|
31 |
+
|
32 |
+
def constInput(self, layer_name: str, data: cv2.typing.MatLike, hint: TraitAs = ...) -> PyParams: ...
|
33 |
+
|
34 |
+
def cfgNumRequests(self, nireq: int) -> PyParams: ...
|
35 |
+
|
36 |
+
def cfgBatchSize(self, size: int) -> PyParams: ...
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
# Functions
|
41 |
+
@typing.overload
|
42 |
+
def params(tag: str, model: str, weights: str, device: str) -> PyParams: ...
|
43 |
+
@typing.overload
|
44 |
+
def params(tag: str, model: str, device: str) -> PyParams: ...
|
45 |
+
|
46 |
+
|
cv2/gapi/ie/detail/__init__.pyi
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ParamDesc_Kind_Load: int
|
2 |
+
PARAM_DESC_KIND_LOAD: int
|
3 |
+
ParamDesc_Kind_Import: int
|
4 |
+
PARAM_DESC_KIND_IMPORT: int
|
5 |
+
ParamDesc_Kind = int
|
6 |
+
"""One of [ParamDesc_Kind_Load, PARAM_DESC_KIND_LOAD, ParamDesc_Kind_Import, PARAM_DESC_KIND_IMPORT]"""
|
7 |
+
|
8 |
+
|
9 |
+
# Classes
|
10 |
+
|
cv2/gapi/imgproc/__init__.pyi
ADDED
File without changes
|
cv2/gapi/imgproc/fluid/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Functions
|
6 |
+
def kernels() -> cv2.GKernelPackage: ...
|
7 |
+
|
8 |
+
|
cv2/gapi/oak/__init__.pyi
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
EncoderConfig_RateControlMode_CBR: int
|
2 |
+
ENCODER_CONFIG_RATE_CONTROL_MODE_CBR: int
|
3 |
+
EncoderConfig_RateControlMode_VBR: int
|
4 |
+
ENCODER_CONFIG_RATE_CONTROL_MODE_VBR: int
|
5 |
+
EncoderConfig_RateControlMode = int
|
6 |
+
"""One of [EncoderConfig_RateControlMode_CBR, ENCODER_CONFIG_RATE_CONTROL_MODE_CBR, EncoderConfig_RateControlMode_VBR, ENCODER_CONFIG_RATE_CONTROL_MODE_VBR]"""
|
7 |
+
|
8 |
+
EncoderConfig_Profile_H264_BASELINE: int
|
9 |
+
ENCODER_CONFIG_PROFILE_H264_BASELINE: int
|
10 |
+
EncoderConfig_Profile_H264_HIGH: int
|
11 |
+
ENCODER_CONFIG_PROFILE_H264_HIGH: int
|
12 |
+
EncoderConfig_Profile_H264_MAIN: int
|
13 |
+
ENCODER_CONFIG_PROFILE_H264_MAIN: int
|
14 |
+
EncoderConfig_Profile_H265_MAIN: int
|
15 |
+
ENCODER_CONFIG_PROFILE_H265_MAIN: int
|
16 |
+
EncoderConfig_Profile_MJPEG: int
|
17 |
+
ENCODER_CONFIG_PROFILE_MJPEG: int
|
18 |
+
EncoderConfig_Profile = int
|
19 |
+
"""One of [EncoderConfig_Profile_H264_BASELINE, ENCODER_CONFIG_PROFILE_H264_BASELINE, EncoderConfig_Profile_H264_HIGH, ENCODER_CONFIG_PROFILE_H264_HIGH, EncoderConfig_Profile_H264_MAIN, ENCODER_CONFIG_PROFILE_H264_MAIN, EncoderConfig_Profile_H265_MAIN, ENCODER_CONFIG_PROFILE_H265_MAIN, EncoderConfig_Profile_MJPEG, ENCODER_CONFIG_PROFILE_MJPEG]"""
|
20 |
+
|
21 |
+
ColorCameraParams_BoardSocket_RGB: int
|
22 |
+
COLOR_CAMERA_PARAMS_BOARD_SOCKET_RGB: int
|
23 |
+
ColorCameraParams_BoardSocket_BGR: int
|
24 |
+
COLOR_CAMERA_PARAMS_BOARD_SOCKET_BGR: int
|
25 |
+
ColorCameraParams_BoardSocket = int
|
26 |
+
"""One of [ColorCameraParams_BoardSocket_RGB, COLOR_CAMERA_PARAMS_BOARD_SOCKET_RGB, ColorCameraParams_BoardSocket_BGR, COLOR_CAMERA_PARAMS_BOARD_SOCKET_BGR]"""
|
27 |
+
|
28 |
+
ColorCameraParams_Resolution_THE_1080_P: int
|
29 |
+
COLOR_CAMERA_PARAMS_RESOLUTION_THE_1080_P: int
|
30 |
+
ColorCameraParams_Resolution = int
|
31 |
+
"""One of [ColorCameraParams_Resolution_THE_1080_P, COLOR_CAMERA_PARAMS_RESOLUTION_THE_1080_P]"""
|
32 |
+
|
33 |
+
|
34 |
+
# Classes
|
35 |
+
|
cv2/gapi/onnx/__init__.pyi
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2.typing
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Enumerations
|
6 |
+
TraitAs_TENSOR: int
|
7 |
+
TRAIT_AS_TENSOR: int
|
8 |
+
TraitAs_IMAGE: int
|
9 |
+
TRAIT_AS_IMAGE: int
|
10 |
+
TraitAs = int
|
11 |
+
"""One of [TraitAs_TENSOR, TRAIT_AS_TENSOR, TraitAs_IMAGE, TRAIT_AS_IMAGE]"""
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
# Classes
|
16 |
+
class PyParams:
|
17 |
+
# Functions
|
18 |
+
@typing.overload
|
19 |
+
def __init__(self) -> None: ...
|
20 |
+
@typing.overload
|
21 |
+
def __init__(self, tag: str, model_path: str) -> None: ...
|
22 |
+
|
23 |
+
def cfgMeanStd(self, layer_name: str, m: cv2.typing.Scalar, s: cv2.typing.Scalar) -> PyParams: ...
|
24 |
+
|
25 |
+
def cfgNormalize(self, layer_name: str, flag: bool) -> PyParams: ...
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
# Functions
|
30 |
+
def params(tag: str, model_path: str) -> PyParams: ...
|
31 |
+
|
32 |
+
|
cv2/gapi/ov/__init__.pyi
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2.typing
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Classes
|
6 |
+
class PyParams:
|
7 |
+
# Functions
|
8 |
+
@typing.overload
|
9 |
+
def __init__(self) -> None: ...
|
10 |
+
@typing.overload
|
11 |
+
def __init__(self, tag: str, model_path: str, bin_path: str, device: str) -> None: ...
|
12 |
+
@typing.overload
|
13 |
+
def __init__(self, tag: str, blob_path: str, device: str) -> None: ...
|
14 |
+
|
15 |
+
def cfgPluginConfig(self, config: cv2.typing.map_string_and_string) -> PyParams: ...
|
16 |
+
|
17 |
+
@typing.overload
|
18 |
+
def cfgInputTensorLayout(self, tensor_layout: str) -> PyParams: ...
|
19 |
+
@typing.overload
|
20 |
+
def cfgInputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
21 |
+
|
22 |
+
@typing.overload
|
23 |
+
def cfgInputModelLayout(self, tensor_layout: str) -> PyParams: ...
|
24 |
+
@typing.overload
|
25 |
+
def cfgInputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
26 |
+
|
27 |
+
@typing.overload
|
28 |
+
def cfgOutputTensorLayout(self, tensor_layout: str) -> PyParams: ...
|
29 |
+
@typing.overload
|
30 |
+
def cfgOutputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
31 |
+
|
32 |
+
@typing.overload
|
33 |
+
def cfgOutputModelLayout(self, tensor_layout: str) -> PyParams: ...
|
34 |
+
@typing.overload
|
35 |
+
def cfgOutputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
36 |
+
|
37 |
+
@typing.overload
|
38 |
+
def cfgOutputTensorPrecision(self, precision: int) -> PyParams: ...
|
39 |
+
@typing.overload
|
40 |
+
def cfgOutputTensorPrecision(self, precision_map: cv2.typing.map_string_and_int) -> PyParams: ...
|
41 |
+
|
42 |
+
@typing.overload
|
43 |
+
def cfgReshape(self, new_shape: typing.Sequence[int]) -> PyParams: ...
|
44 |
+
@typing.overload
|
45 |
+
def cfgReshape(self, new_shape_map: cv2.typing.map_string_and_vector_size_t) -> PyParams: ...
|
46 |
+
|
47 |
+
def cfgNumRequests(self, nireq: int) -> PyParams: ...
|
48 |
+
|
49 |
+
@typing.overload
|
50 |
+
def cfgMean(self, mean_values: typing.Sequence[float]) -> PyParams: ...
|
51 |
+
@typing.overload
|
52 |
+
def cfgMean(self, mean_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
|
53 |
+
|
54 |
+
@typing.overload
|
55 |
+
def cfgScale(self, scale_values: typing.Sequence[float]) -> PyParams: ...
|
56 |
+
@typing.overload
|
57 |
+
def cfgScale(self, scale_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
|
58 |
+
|
59 |
+
@typing.overload
|
60 |
+
def cfgResize(self, interpolation: int) -> PyParams: ...
|
61 |
+
@typing.overload
|
62 |
+
def cfgResize(self, interpolation: cv2.typing.map_string_and_int) -> PyParams: ...
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
# Functions
|
67 |
+
@typing.overload
|
68 |
+
def params(tag: str, model_path: str, weights: str, device: str) -> PyParams: ...
|
69 |
+
@typing.overload
|
70 |
+
def params(tag: str, bin_path: str, device: str) -> PyParams: ...
|
71 |
+
|
72 |
+
|
cv2/gapi/own/__init__.pyi
ADDED
File without changes
|
cv2/gapi/own/detail/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MatHeader_AUTO_STEP: int
|
2 |
+
MAT_HEADER_AUTO_STEP: int
|
3 |
+
MatHeader_TYPE_MASK: int
|
4 |
+
MAT_HEADER_TYPE_MASK: int
|
5 |
+
|
6 |
+
|
7 |
+
# Classes
|
8 |
+
|
cv2/gapi/render/__init__.pyi
ADDED
File without changes
|
cv2/gapi/render/ocv/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Functions
|
6 |
+
def kernels() -> cv2.GKernelPackage: ...
|
7 |
+
|
8 |
+
|
cv2/gapi/streaming/__init__.pyi
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import typing
|
3 |
+
|
4 |
+
|
5 |
+
# Enumerations
|
6 |
+
sync_policy_dont_sync: int
|
7 |
+
SYNC_POLICY_DONT_SYNC: int
|
8 |
+
sync_policy_drop: int
|
9 |
+
SYNC_POLICY_DROP: int
|
10 |
+
sync_policy = int
|
11 |
+
"""One of [sync_policy_dont_sync, SYNC_POLICY_DONT_SYNC, sync_policy_drop, SYNC_POLICY_DROP]"""
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
# Classes
|
16 |
+
class queue_capacity:
|
17 |
+
capacity: int
|
18 |
+
|
19 |
+
# Functions
|
20 |
+
def __init__(self, cap: int = ...) -> None: ...
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
# Functions
|
25 |
+
def desync(g: cv2.GMat) -> cv2.GMat: ...
|
26 |
+
|
27 |
+
def seqNo(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
|
28 |
+
|
29 |
+
def seq_id(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
|
30 |
+
|
31 |
+
@typing.overload
|
32 |
+
def size(src: cv2.GMat) -> cv2.GOpaqueT: ...
|
33 |
+
@typing.overload
|
34 |
+
def size(r: cv2.GOpaqueT) -> cv2.GOpaqueT: ...
|
35 |
+
@typing.overload
|
36 |
+
def size(src: cv2.GFrame) -> cv2.GOpaqueT: ...
|
37 |
+
|
38 |
+
def timestamp(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
|
39 |
+
|
40 |
+
|
cv2/gapi/video/__init__.pyi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Enumerations
|
2 |
+
TYPE_BS_MOG2: int
|
3 |
+
TYPE_BS_KNN: int
|
4 |
+
BackgroundSubtractorType = int
|
5 |
+
"""One of [TYPE_BS_MOG2, TYPE_BS_KNN]"""
|
6 |
+
|
7 |
+
|
8 |
+
|
cv2/gapi/wip/__init__.pyi
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import cv2.gapi
|
3 |
+
import cv2.gapi.wip.gst
|
4 |
+
import cv2.typing
|
5 |
+
import typing
|
6 |
+
|
7 |
+
|
8 |
+
# Classes
|
9 |
+
class GOutputs:
|
10 |
+
# Functions
|
11 |
+
def getGMat(self) -> cv2.GMat: ...
|
12 |
+
|
13 |
+
def getGScalar(self) -> cv2.GScalar: ...
|
14 |
+
|
15 |
+
def getGArray(self, type: cv2.gapi.ArgType) -> cv2.GArrayT: ...
|
16 |
+
|
17 |
+
def getGOpaque(self, type: cv2.gapi.ArgType) -> cv2.GOpaqueT: ...
|
18 |
+
|
19 |
+
|
20 |
+
class IStreamSource:
|
21 |
+
...
|
22 |
+
|
23 |
+
|
24 |
+
# Functions
|
25 |
+
def get_streaming_source(pipeline: cv2.gapi.wip.gst.GStreamerPipeline, appsinkName: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ...
|
26 |
+
|
27 |
+
@typing.overload
|
28 |
+
def make_capture_src(path: str, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ...
|
29 |
+
@typing.overload
|
30 |
+
def make_capture_src(id: int, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ...
|
31 |
+
|
32 |
+
def make_gst_src(pipeline: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ...
|
33 |
+
|
34 |
+
|
cv2/gapi/wip/draw/__init__.pyi
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import cv2.typing
|
3 |
+
import typing
|
4 |
+
|
5 |
+
|
6 |
+
# Classes
|
7 |
+
class Text:
|
8 |
+
text: str
|
9 |
+
org: cv2.typing.Point
|
10 |
+
ff: int
|
11 |
+
fs: float
|
12 |
+
color: cv2.typing.Scalar
|
13 |
+
thick: int
|
14 |
+
lt: int
|
15 |
+
bottom_left_origin: bool
|
16 |
+
|
17 |
+
# Functions
|
18 |
+
@typing.overload
|
19 |
+
def __init__(self, text_: str, org_: cv2.typing.Point, ff_: int, fs_: float, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., bottom_left_origin_: bool = ...) -> None: ...
|
20 |
+
@typing.overload
|
21 |
+
def __init__(self) -> None: ...
|
22 |
+
|
23 |
+
|
24 |
+
class Rect:
|
25 |
+
rect: cv2.typing.Rect
|
26 |
+
color: cv2.typing.Scalar
|
27 |
+
thick: int
|
28 |
+
lt: int
|
29 |
+
shift: int
|
30 |
+
|
31 |
+
# Functions
|
32 |
+
@typing.overload
|
33 |
+
def __init__(self) -> None: ...
|
34 |
+
@typing.overload
|
35 |
+
def __init__(self, rect_: cv2.typing.Rect2i, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
|
36 |
+
|
37 |
+
|
38 |
+
class Circle:
|
39 |
+
center: cv2.typing.Point
|
40 |
+
radius: int
|
41 |
+
color: cv2.typing.Scalar
|
42 |
+
thick: int
|
43 |
+
lt: int
|
44 |
+
shift: int
|
45 |
+
|
46 |
+
# Functions
|
47 |
+
@typing.overload
|
48 |
+
def __init__(self, center_: cv2.typing.Point, radius_: int, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
|
49 |
+
@typing.overload
|
50 |
+
def __init__(self) -> None: ...
|
51 |
+
|
52 |
+
|
53 |
+
class Line:
|
54 |
+
pt1: cv2.typing.Point
|
55 |
+
pt2: cv2.typing.Point
|
56 |
+
color: cv2.typing.Scalar
|
57 |
+
thick: int
|
58 |
+
lt: int
|
59 |
+
shift: int
|
60 |
+
|
61 |
+
# Functions
|
62 |
+
@typing.overload
|
63 |
+
def __init__(self, pt1_: cv2.typing.Point, pt2_: cv2.typing.Point, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
|
64 |
+
@typing.overload
|
65 |
+
def __init__(self) -> None: ...
|
66 |
+
|
67 |
+
|
68 |
+
class Mosaic:
|
69 |
+
mos: cv2.typing.Rect
|
70 |
+
cellSz: int
|
71 |
+
decim: int
|
72 |
+
|
73 |
+
# Functions
|
74 |
+
@typing.overload
|
75 |
+
def __init__(self) -> None: ...
|
76 |
+
@typing.overload
|
77 |
+
def __init__(self, mos_: cv2.typing.Rect2i, cellSz_: int, decim_: int) -> None: ...
|
78 |
+
|
79 |
+
|
80 |
+
class Image:
|
81 |
+
org: cv2.typing.Point
|
82 |
+
img: cv2.typing.MatLike
|
83 |
+
alpha: cv2.typing.MatLike
|
84 |
+
|
85 |
+
# Functions
|
86 |
+
@typing.overload
|
87 |
+
def __init__(self, org_: cv2.typing.Point, img_: cv2.typing.MatLike, alpha_: cv2.typing.MatLike) -> None: ...
|
88 |
+
@typing.overload
|
89 |
+
def __init__(self) -> None: ...
|
90 |
+
|
91 |
+
|
92 |
+
class Poly:
|
93 |
+
points: typing.Sequence[cv2.typing.Point]
|
94 |
+
color: cv2.typing.Scalar
|
95 |
+
thick: int
|
96 |
+
lt: int
|
97 |
+
shift: int
|
98 |
+
|
99 |
+
# Functions
|
100 |
+
@typing.overload
|
101 |
+
def __init__(self, points_: typing.Sequence[cv2.typing.Point], color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
|
102 |
+
@typing.overload
|
103 |
+
def __init__(self) -> None: ...
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
# Functions
|
108 |
+
@typing.overload
|
109 |
+
def render(bgr: cv2.typing.MatLike, prims: typing.Sequence[cv2.typing.Prim], args: typing.Sequence[cv2.GCompileArg] = ...) -> None: ...
|
110 |
+
@typing.overload
|
111 |
+
def render(y_plane: cv2.typing.MatLike, uv_plane: cv2.typing.MatLike, prims: typing.Sequence[cv2.typing.Prim], args: typing.Sequence[cv2.GCompileArg] = ...) -> None: ...
|
112 |
+
|
113 |
+
def render3ch(src: cv2.GMat, prims: cv2.GArrayT) -> cv2.GMat: ...
|
114 |
+
|
115 |
+
def renderNV12(y: cv2.GMat, uv: cv2.GMat, prims: cv2.GArrayT) -> tuple[cv2.GMat, cv2.GMat]: ...
|
116 |
+
|
117 |
+
|
cv2/gapi/wip/gst/__init__.pyi
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing
|
2 |
+
|
3 |
+
|
4 |
+
GStreamerSource_OutputType_FRAME: int
|
5 |
+
GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME: int
|
6 |
+
GStreamerSource_OutputType_MAT: int
|
7 |
+
GSTREAMER_SOURCE_OUTPUT_TYPE_MAT: int
|
8 |
+
GStreamerSource_OutputType = int
|
9 |
+
"""One of [GStreamerSource_OutputType_FRAME, GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME, GStreamerSource_OutputType_MAT, GSTREAMER_SOURCE_OUTPUT_TYPE_MAT]"""
|
10 |
+
|
11 |
+
|
12 |
+
# Classes
|
13 |
+
class GStreamerPipeline:
|
14 |
+
# Functions
|
15 |
+
def __init__(self, pipeline: str) -> None: ...
|
16 |
+
|
17 |
+
|
18 |
+
|
cv2/gapi/wip/onevpl/__init__.pyi
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Enumerations
|
2 |
+
AccelType_HOST: int
|
3 |
+
ACCEL_TYPE_HOST: int
|
4 |
+
AccelType_DX11: int
|
5 |
+
ACCEL_TYPE_DX11: int
|
6 |
+
AccelType_VAAPI: int
|
7 |
+
ACCEL_TYPE_VAAPI: int
|
8 |
+
AccelType_LAST_VALUE: int
|
9 |
+
ACCEL_TYPE_LAST_VALUE: int
|
10 |
+
AccelType = int
|
11 |
+
"""One of [AccelType_HOST, ACCEL_TYPE_HOST, AccelType_DX11, ACCEL_TYPE_DX11, AccelType_VAAPI, ACCEL_TYPE_VAAPI, AccelType_LAST_VALUE, ACCEL_TYPE_LAST_VALUE]"""
|
12 |
+
|
13 |
+
|
14 |
+
|
cv2/ipp/__init__.pyi
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing
|
2 |
+
|
3 |
+
|
4 |
+
# Functions
|
5 |
+
def getIppVersion() -> str: ...
|
6 |
+
|
7 |
+
def setUseIPP(flag: bool) -> None: ...
|
8 |
+
|
9 |
+
def setUseIPP_NotExact(flag: bool) -> None: ...
|
10 |
+
|
11 |
+
def useIPP() -> bool: ...
|
12 |
+
|
13 |
+
def useIPP_NotExact() -> bool: ...
|
14 |
+
|
15 |
+
|