diff --git "a/python_libs_opencv.txt" "b/python_libs_opencv.txt" new file mode 100644--- /dev/null +++ "b/python_libs_opencv.txt" @@ -0,0 +1,30722 @@ +# File: opencv-master/doc/pattern_tools/gen_pattern.py +"""""" +import argparse +import numpy as np +import json +import gzip +from svgfig import * + +class PatternMaker: + + def __init__(self, cols, rows, output, units, square_size, radius_rate, page_width, page_height, markers, aruco_marker_size, dict_file): + self.cols = cols + self.rows = rows + self.output = output + self.units = units + self.square_size = square_size + self.radius_rate = radius_rate + self.width = page_width + self.height = page_height + self.markers = markers + self.aruco_marker_size = aruco_marker_size + self.dict_file = dict_file + self.g = SVG('g') + + def make_circles_pattern(self): + spacing = self.square_size + r = spacing / self.radius_rate + pattern_width = (self.cols - 1.0) * spacing + 2.0 * r + pattern_height = (self.rows - 1.0) * spacing + 2.0 * r + x_spacing = (self.width - pattern_width) / 2.0 + y_spacing = (self.height - pattern_height) / 2.0 + for x in range(0, self.cols): + for y in range(0, self.rows): + dot = SVG('circle', cx=x * spacing + x_spacing + r, cy=y * spacing + y_spacing + r, r=r, fill='black', stroke='none') + self.g.append(dot) + + def make_acircles_pattern(self): + spacing = self.square_size + r = spacing / self.radius_rate + pattern_width = (self.cols - 1.0) * 2 * spacing + spacing + 2.0 * r + pattern_height = (self.rows - 1.0) * spacing + 2.0 * r + x_spacing = (self.width - pattern_width) / 2.0 + y_spacing = (self.height - pattern_height) / 2.0 + for x in range(0, self.cols): + for y in range(0, self.rows): + dot = SVG('circle', cx=2 * x * spacing + y % 2 * spacing + x_spacing + r, cy=y * spacing + y_spacing + r, r=r, fill='black', stroke='none') + self.g.append(dot) + + def make_checkerboard_pattern(self): + spacing = self.square_size + xspacing = (self.width - self.cols * self.square_size) / 2.0 + yspacing = (self.height - self.rows * self.square_size) / 2.0 + for x in range(0, self.cols): + for y in range(0, self.rows): + if x % 2 == y % 2: + square = SVG('rect', x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing, height=spacing, fill='black', stroke='none') + self.g.append(square) + + @staticmethod + def _make_round_rect(x, y, diam, corners=('right', 'right', 'right', 'right')): + rad = diam / 2 + cw_point = ((0, 0), (diam, 0), (diam, diam), (0, diam)) + mid_cw_point = ((0, rad), (rad, 0), (diam, rad), (rad, diam)) + res_str = 'M{},{} '.format(x + mid_cw_point[0][0], y + mid_cw_point[0][1]) + n = len(cw_point) + for i in range(n): + if corners[i] == 'right': + res_str += 'L{},{} L{},{} '.format(x + cw_point[i][0], y + cw_point[i][1], x + mid_cw_point[(i + 1) % n][0], y + mid_cw_point[(i + 1) % n][1]) + elif corners[i] == 'round': + res_str += 'A{},{} 0,0,1 {},{} '.format(rad, rad, x + mid_cw_point[(i + 1) % n][0], y + mid_cw_point[(i + 1) % n][1]) + else: + raise TypeError('unknown corner type') + return res_str + + def _get_type(self, x, y): + corners = ['right', 'right', 'right', 'right'] + is_inside = True + if x == 0: + corners[0] = 'round' + corners[3] = 'round' + is_inside = False + if y == 0: + corners[0] = 'round' + corners[1] = 'round' + is_inside = False + if x == self.cols - 1: + corners[1] = 'round' + corners[2] = 'round' + is_inside = False + if y == self.rows - 1: + corners[2] = 'round' + corners[3] = 'round' + is_inside = False + return (corners, is_inside) + + def make_radon_checkerboard_pattern(self): + spacing = self.square_size + xspacing = (self.width - self.cols * self.square_size) / 2.0 + yspacing = (self.height - self.rows * self.square_size) / 2.0 + for x in range(0, self.cols): + for y in range(0, self.rows): + if x % 2 == y % 2: + (corner_types, is_inside) = self._get_type(x, y) + if is_inside: + square = SVG('rect', x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing, height=spacing, fill='black', stroke='none') + else: + square = SVG('path', d=self._make_round_rect(x * spacing + xspacing, y * spacing + yspacing, spacing, corner_types), fill='black', stroke='none') + self.g.append(square) + if self.markers is not None: + r = self.square_size * 0.17 + pattern_width = (self.cols - 1.0) * spacing + 2.0 * r + pattern_height = (self.rows - 1.0) * spacing + 2.0 * r + x_spacing = (self.width - pattern_width) / 2.0 + y_spacing = (self.height - pattern_height) / 2.0 + for (x, y) in self.markers: + color = 'black' + if x % 2 == y % 2: + color = 'white' + dot = SVG('circle', cx=x * spacing + x_spacing + r, cy=y * spacing + y_spacing + r, r=r, fill=color, stroke='none') + self.g.append(dot) + + @staticmethod + def _create_marker_bits(markerSize_bits, byteList): + marker = np.zeros((markerSize_bits + 2, markerSize_bits + 2)) + bits = marker[1:markerSize_bits + 1, 1:markerSize_bits + 1] + for i in range(markerSize_bits): + for j in range(markerSize_bits): + bits[i][j] = int(byteList[i * markerSize_bits + j]) + return marker + + def make_charuco_board(self): + if self.aruco_marker_size > self.square_size: + print('Error: Aruco marker cannot be lager than chessboard square!') + return + if self.dict_file.split('.')[-1] == 'gz': + with gzip.open(self.dict_file, 'r') as fin: + json_bytes = fin.read() + json_str = json_bytes.decode('utf-8') + dictionary = json.loads(json_str) + else: + f = open(self.dict_file) + dictionary = json.load(f) + if dictionary['nmarkers'] < int(self.cols * self.rows / 2): + print('Error: Aruco dictionary contains less markers than it needs for chosen board. Please choose another dictionary or use smaller board than required for chosen board') + return + markerSize_bits = dictionary['markersize'] + side = self.aruco_marker_size / (markerSize_bits + 2) + spacing = self.square_size + xspacing = (self.width - self.cols * self.square_size) / 2.0 + yspacing = (self.height - self.rows * self.square_size) / 2.0 + ch_ar_border = (self.square_size - self.aruco_marker_size) / 2 + if ch_ar_border < side * 0.7: + print('Marker border {} is less than 70% of ArUco pin size {}. Please increase --square_size or decrease --marker_size for stable board detection'.format(ch_ar_border, int(side))) + marker_id = 0 + for y in range(0, self.rows): + for x in range(0, self.cols): + if x % 2 == y % 2: + square = SVG('rect', x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing, height=spacing, fill='black', stroke='none') + self.g.append(square) + else: + img_mark = self._create_marker_bits(markerSize_bits, dictionary['marker_' + str(marker_id)]) + marker_id += 1 + x_pos = x * spacing + xspacing + y_pos = y * spacing + yspacing + square = SVG('rect', x=x_pos + ch_ar_border, y=y_pos + ch_ar_border, width=self.aruco_marker_size, height=self.aruco_marker_size, fill='black', stroke='none') + self.g.append(square) + for x_ in range(len(img_mark[0])): + for y_ in range(len(img_mark)): + if img_mark[y_][x_] != 0: + square = SVG('rect', x=x_pos + ch_ar_border + x_ * side, y=y_pos + ch_ar_border + y_ * side, width=side, height=side, fill='white', stroke='white', stroke_width=spacing * 0.01) + self.g.append(square) + + def save(self): + c = canvas(self.g, width='%d%s' % (self.width, self.units), height='%d%s' % (self.height, self.units), viewBox='0 0 %d %d' % (self.width, self.height)) + c.save(self.output) + +def main(): + parser = argparse.ArgumentParser(description='generate camera-calibration pattern', add_help=False) + parser.add_argument('-H', '--help', help='show help', action='store_true', dest='show_help') + parser.add_argument('-o', '--output', help='output file', default='out.svg', action='store', dest='output') + parser.add_argument('-c', '--columns', help='pattern columns', default='8', action='store', dest='columns', type=int) + parser.add_argument('-r', '--rows', help='pattern rows', default='11', action='store', dest='rows', type=int) + parser.add_argument('-T', '--type', help='type of pattern', default='circles', action='store', dest='p_type', choices=['circles', 'acircles', 'checkerboard', 'radon_checkerboard', 'charuco_board']) + parser.add_argument('-u', '--units', help='length unit', default='mm', action='store', dest='units', choices=['mm', 'inches', 'px', 'm']) + parser.add_argument('-s', '--square_size', help='size of squares in pattern', default='20.0', action='store', dest='square_size', type=float) + parser.add_argument('-R', '--radius_rate', help='circles_radius = square_size/radius_rate', default='5.0', action='store', dest='radius_rate', type=float) + parser.add_argument('-w', '--page_width', help='page width in units', default=argparse.SUPPRESS, action='store', dest='page_width', type=float) + parser.add_argument('-h', '--page_height', help='page height in units', default=argparse.SUPPRESS, action='store', dest='page_height', type=float) + parser.add_argument('-a', '--page_size', help='page size, superseded if -h and -w are set', default='A4', action='store', dest='page_size', choices=['A0', 'A1', 'A2', 'A3', 'A4', 'A5']) + parser.add_argument('-m', '--markers', help='list of cells with markers for the radon checkerboard. Marker coordinates as list of numbers: -m 1 2 3 4 means markers in cells [1, 2] and [3, 4]', default=argparse.SUPPRESS, action='store', dest='markers', nargs='+', type=int) + parser.add_argument('-p', '--marker_size', help='aruco markers size for ChAruco pattern (default 10.0)', default='10.0', action='store', dest='aruco_marker_size', type=float) + parser.add_argument('-f', '--dict_file', help='file name of custom aruco dictionary for ChAruco pattern', default='DICT_ARUCO_ORIGINAL.json', action='store', dest='dict_file', type=str) + args = parser.parse_args() + show_help = args.show_help + if show_help: + parser.print_help() + return + output = args.output + columns = args.columns + rows = args.rows + p_type = args.p_type + units = args.units + square_size = args.square_size + radius_rate = args.radius_rate + aruco_marker_size = args.aruco_marker_size + dict_file = args.dict_file + if 'page_width' and 'page_height' in args: + page_width = args.page_width + page_height = args.page_height + else: + page_size = args.page_size + page_sizes = {'A0': [840, 1188], 'A1': [594, 840], 'A2': [420, 594], 'A3': [297, 420], 'A4': [210, 297], 'A5': [148, 210]} + page_width = page_sizes[page_size][0] + page_height = page_sizes[page_size][1] + markers = None + if p_type == 'radon_checkerboard' and 'markers' in args: + if len(args.markers) % 2 == 1: + raise ValueError('The length of the markers array={} must be even'.format(len(args.markers))) + markers = set() + for (x, y) in zip(args.markers[::2], args.markers[1::2]): + if x in range(0, columns) and y in range(0, rows): + markers.add((x, y)) + else: + raise ValueError('The marker {},{} is outside the checkerboard'.format(x, y)) + if p_type == 'charuco_board' and aruco_marker_size >= square_size: + raise ValueError('ArUco markers size must be smaller than square size') + pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height, markers, aruco_marker_size, dict_file) + mp = {'circles': pm.make_circles_pattern, 'acircles': pm.make_acircles_pattern, 'checkerboard': pm.make_checkerboard_pattern, 'radon_checkerboard': pm.make_radon_checkerboard_pattern, 'charuco_board': pm.make_charuco_board} + mp[p_type]() + pm.save() +if __name__ == '__main__': + main() + +# File: opencv-master/doc/pattern_tools/svgfig.py +import re, codecs, os, platform, copy, itertools, math, cmath, random, sys, copy +_epsilon = 1e-05 +if sys.version_info >= (3, 0): + long = int + basestring = (str, bytes) +try: + UNICODE_EXISTS = bool(type(unicode)) +except NameError: + unicode = lambda s: str(s) +if re.search('windows', platform.system(), re.I): + try: + import _winreg + _default_directory = _winreg.QueryValueEx(_winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\Current Version\\Explorer\\Shell Folders'), 'Desktop')[0] + except: + _default_directory = os.path.expanduser('~') + os.sep + 'Desktop' +_default_fileName = 'tmp.svg' +_hacks = {} +_hacks['inkscape-text-vertical-shift'] = False +__version__ = '1.0.1' + +def rgb(r, g, b, maximum=1.0): + return '#%02x%02x%02x' % (max(0, min(r * 255.0 / maximum, 255)), max(0, min(g * 255.0 / maximum, 255)), max(0, min(b * 255.0 / maximum, 255))) + +def attr_preprocess(attr): + attrCopy = attr.copy() + for name in attr.keys(): + name_colon = re.sub('__', ':', name) + if name_colon != name: + attrCopy[name_colon] = attrCopy[name] + del attrCopy[name] + name = name_colon + name_dash = re.sub('_', '-', name) + if name_dash != name: + attrCopy[name_dash] = attrCopy[name] + del attrCopy[name] + name = name_dash + return attrCopy + +class SVG: + + def __init__(self, *t_sub, **attr): + if len(t_sub) == 0: + raise TypeError('SVG element must have a t (SVG type)') + self.t = t_sub[0] + self.sub = list(t_sub[1:]) + self.attr = attr_preprocess(attr) + + def __getitem__(self, ti): + obj = self + if isinstance(ti, (list, tuple)): + for i in ti[:-1]: + obj = obj[i] + ti = ti[-1] + if isinstance(ti, (int, long, slice)): + return obj.sub[ti] + else: + return obj.attr[ti] + + def __setitem__(self, ti, value): + obj = self + if isinstance(ti, (list, tuple)): + for i in ti[:-1]: + obj = obj[i] + ti = ti[-1] + if isinstance(ti, (int, long, slice)): + obj.sub[ti] = value + else: + obj.attr[ti] = value + + def __delitem__(self, ti): + obj = self + if isinstance(ti, (list, tuple)): + for i in ti[:-1]: + obj = obj[i] + ti = ti[-1] + if isinstance(ti, (int, long, slice)): + del obj.sub[ti] + else: + del obj.attr[ti] + + def __contains__(self, value): + return value in self.attr + + def __eq__(self, other): + if id(self) == id(other): + return True + return isinstance(other, SVG) and self.t == other.t and (self.sub == other.sub) and (self.attr == other.attr) + + def __ne__(self, other): + return not self == other + + def append(self, x): + self.sub.append(x) + + def prepend(self, x): + self.sub[0:0] = [x] + + def extend(self, x): + self.sub.extend(x) + + def clone(self, shallow=False): + if shallow: + return copy.copy(self) + else: + return copy.deepcopy(self) + + class SVGDepthIterator: + + def __init__(self, svg, ti, depth_limit): + self.svg = svg + self.ti = ti + self.shown = False + self.depth_limit = depth_limit + + def __iter__(self): + return self + + def next(self): + if not self.shown: + self.shown = True + if self.ti != (): + return (self.ti, self.svg) + if not isinstance(self.svg, SVG): + raise StopIteration + if self.depth_limit is not None and len(self.ti) >= self.depth_limit: + raise StopIteration + if 'iterators' not in self.__dict__: + self.iterators = [] + for (i, s) in enumerate(self.svg.sub): + self.iterators.append(self.__class__(s, self.ti + (i,), self.depth_limit)) + for (k, s) in self.svg.attr.items(): + self.iterators.append(self.__class__(s, self.ti + (k,), self.depth_limit)) + self.iterators = itertools.chain(*self.iterators) + return self.iterators.next() + + def depth_first(self, depth_limit=None): + return self.SVGDepthIterator(self, (), depth_limit) + + def breadth_first(self, depth_limit=None): + raise NotImplementedError('Got an algorithm for breadth-first searching a tree without effectively copying the tree?') + + def __iter__(self): + return self.depth_first() + + def items(self, sub=True, attr=True, text=True): + output = [] + for (ti, s) in self: + show = False + if isinstance(ti[-1], (int, long)): + if isinstance(s, basestring): + show = text + else: + show = sub + else: + show = attr + if show: + output.append((ti, s)) + return output + + def keys(self, sub=True, attr=True, text=True): + return [ti for (ti, s) in self.items(sub, attr, text)] + + def values(self, sub=True, attr=True, text=True): + return [s for (ti, s) in self.items(sub, attr, text)] + + def __repr__(self): + return self.xml(depth_limit=0) + + def __str__(self): + return self.tree(sub=True, attr=False, text=False) + + def tree(self, depth_limit=None, sub=True, attr=True, text=True, tree_width=20, obj_width=80): + output = [] + line = '%s %s' % ('%%-%ds' % tree_width % repr(None), '%%-%ds' % obj_width % repr(self)[0:obj_width]) + output.append(line) + for (ti, s) in self.depth_first(depth_limit): + show = False + if isinstance(ti[-1], (int, long)): + if isinstance(s, basestring): + show = text + else: + show = sub + else: + show = attr + if show: + line = '%s %s' % ('%%-%ds' % tree_width % repr(list(ti)), '%%-%ds' % obj_width % (' ' * len(ti) + repr(s))[0:obj_width]) + output.append(line) + return '\n'.join(output) + + def xml(self, indent=u' ', newl=u'\n', depth_limit=None, depth=0): + attrstr = [] + for (n, v) in self.attr.items(): + if isinstance(v, dict): + v = u'; '.join([u'%s:%s' % (ni, vi) for (ni, vi) in v.items()]) + elif isinstance(v, (list, tuple)): + v = u', '.join(v) + attrstr.append(u' %s=%s' % (n, repr(v))) + attrstr = u''.join(attrstr) + if len(self.sub) == 0: + return u'%s<%s%s />' % (indent * depth, self.t, attrstr) + if depth_limit is None or depth_limit > depth: + substr = [] + for s in self.sub: + if isinstance(s, SVG): + substr.append(s.xml(indent, newl, depth_limit, depth + 1) + newl) + elif isinstance(s, basestring): + substr.append(u'%s%s%s' % (indent * (depth + 1), s, newl)) + else: + substr.append('%s%s%s' % (indent * (depth + 1), repr(s), newl)) + substr = u''.join(substr) + return u'%s<%s%s>%s%s%s' % (indent * depth, self.t, attrstr, newl, substr, indent * depth, self.t) + else: + return u'%s<%s (%d sub)%s />' % (indent * depth, self.t, len(self.sub), attrstr) + + def standalone_xml(self, indent=u' ', newl=u'\n', encoding=u'utf-8'): + if self.t == 'svg': + top = self + else: + top = canvas(self) + return u'\n\n\n' % encoding + u''.join(top.__standalone_xml(indent, newl)) + + def __standalone_xml(self, indent, newl): + output = [u'<%s' % self.t] + for (n, v) in self.attr.items(): + if isinstance(v, dict): + v = u'; '.join([u'%s:%s' % (ni, vi) for (ni, vi) in v.items()]) + elif isinstance(v, (list, tuple)): + v = u', '.join(v) + output.append(u' %s="%s"' % (n, v)) + if len(self.sub) == 0: + output.append(u' />%s%s' % (newl, newl)) + return output + elif self.t == 'text' or self.t == 'tspan' or self.t == 'style': + output.append(u'>') + else: + output.append(u'>%s%s' % (newl, newl)) + for s in self.sub: + if isinstance(s, SVG): + output.extend(s.__standalone_xml(indent, newl)) + else: + output.append(unicode(s)) + if self.t == 'tspan': + output.append(u'' % self.t) + else: + output.append(u'%s%s' % (self.t, newl, newl)) + return output + + def interpret_fileName(self, fileName=None): + if fileName is None: + fileName = _default_fileName + if re.search('windows', platform.system(), re.I) and (not os.path.isabs(fileName)): + fileName = _default_directory + os.sep + fileName + return fileName + + def save(self, fileName=None, encoding='utf-8', compresslevel=None): + fileName = self.interpret_fileName(fileName) + if compresslevel is not None or re.search('\\.svgz$', fileName, re.I) or re.search('\\.gz$', fileName, re.I): + import gzip + if compresslevel is None: + f = gzip.GzipFile(fileName, 'w') + else: + f = gzip.GzipFile(fileName, 'w', compresslevel) + f = codecs.EncodedFile(f, 'utf-8', encoding) + f.write(self.standalone_xml(encoding=encoding)) + f.close() + else: + f = codecs.open(fileName, 'w', encoding=encoding) + f.write(self.standalone_xml(encoding=encoding)) + f.close() + + def inkview(self, fileName=None, encoding='utf-8'): + fileName = self.interpret_fileName(fileName) + self.save(fileName, encoding) + os.spawnvp(os.P_NOWAIT, 'inkview', ('inkview', fileName)) + + def inkscape(self, fileName=None, encoding='utf-8'): + fileName = self.interpret_fileName(fileName) + self.save(fileName, encoding) + os.spawnvp(os.P_NOWAIT, 'inkscape', ('inkscape', fileName)) + + def firefox(self, fileName=None, encoding='utf-8'): + fileName = self.interpret_fileName(fileName) + self.save(fileName, encoding) + os.spawnvp(os.P_NOWAIT, 'firefox', ('firefox', fileName)) +_canvas_defaults = {'width': '400px', 'height': '400px', 'viewBox': '0 0 100 100', 'xmlns': 'http://www.w3.org/2000/svg', 'xmlns:xlink': 'http://www.w3.org/1999/xlink', 'version': '1.1', 'style': {'stroke': 'black', 'fill': 'none', 'stroke-width': '0.5pt', 'stroke-linejoin': 'round', 'text-anchor': 'middle'}, 'font-family': ['Helvetica', 'Arial', 'FreeSans', 'Sans', 'sans', 'sans-serif']} + +def canvas(*sub, **attr): + attributes = dict(_canvas_defaults) + attributes.update(attr) + if sub is None or sub == (): + return SVG('svg', **attributes) + else: + return SVG('svg', *sub, **attributes) + +def canvas_outline(*sub, **attr): + svg = canvas(*sub, **attr) + match = re.match('[, \\t]*([0-9e.+\\-]+)[, \\t]+([0-9e.+\\-]+)[, \\t]+([0-9e.+\\-]+)[, \\t]+([0-9e.+\\-]+)[, \\t]*', svg['viewBox']) + if match is None: + raise ValueError('canvas viewBox is incorrectly formatted') + (x, y, width, height) = [float(x) for x in match.groups()] + svg.prepend(SVG('rect', x=x, y=y, width=width, height=height, stroke='none', fill='cornsilk')) + svg.append(SVG('rect', x=x, y=y, width=width, height=height, stroke='black', fill='none')) + return svg + +def template(fileName, svg, replaceme='REPLACEME'): + output = load(fileName) + for (ti, s) in output: + if isinstance(s, SVG) and s.t == replaceme: + output[ti] = svg + return output + +def load(fileName): + return load_stream(file(fileName)) + +def load_stream(stream): + from xml.sax import handler, make_parser + from xml.sax.handler import feature_namespaces, feature_external_ges, feature_external_pes + + class ContentHandler(handler.ContentHandler): + + def __init__(self): + self.stack = [] + self.output = None + self.all_whitespace = re.compile('^\\s*$') + + def startElement(self, name, attr): + s = SVG(name) + s.attr = dict(attr.items()) + if len(self.stack) > 0: + last = self.stack[-1] + last.sub.append(s) + self.stack.append(s) + + def characters(self, ch): + if not isinstance(ch, basestring) or self.all_whitespace.match(ch) is None: + if len(self.stack) > 0: + last = self.stack[-1] + if len(last.sub) > 0 and isinstance(last.sub[-1], basestring): + last.sub[-1] = last.sub[-1] + '\n' + ch + else: + last.sub.append(ch) + + def endElement(self, name): + if len(self.stack) > 0: + last = self.stack[-1] + if isinstance(last, SVG) and last.t == 'style' and ('type' in last.attr) and (last.attr['type'] == 'text/css') and (len(last.sub) == 1) and isinstance(last.sub[0], basestring): + last.sub[0] = '' + self.output = self.stack.pop() + ch = ContentHandler() + parser = make_parser() + parser.setContentHandler(ch) + parser.setFeature(feature_namespaces, 0) + parser.setFeature(feature_external_ges, 0) + parser.parse(stream) + return ch.output + +def set_func_name(f, name): + try: + f.func_name = name + except TypeError: + pass + +def totrans(expr, vars=('x', 'y'), globals=None, locals=None): + if locals is None: + locals = {} + if callable(expr): + if expr.func_code.co_argcount == 2: + return expr + elif expr.func_code.co_argcount == 1: + split = lambda z: (z.real, z.imag) + output = lambda x, y: split(expr(x + y * 1j)) + set_func_name(output, expr.func_name) + return output + else: + raise TypeError('must be a function of 2 or 1 variables') + if len(vars) == 2: + g = math.__dict__ + if globals is not None: + g.update(globals) + output = eval('lambda %s, %s: (%s)' % (vars[0], vars[1], expr), g, locals) + set_func_name(output, '%s,%s -> %s' % (vars[0], vars[1], expr)) + return output + elif len(vars) == 1: + g = cmath.__dict__ + if globals is not None: + g.update(globals) + output = eval('lambda %s: (%s)' % (vars[0], expr), g, locals) + split = lambda z: (z.real, z.imag) + output2 = lambda x, y: split(output(x + y * 1j)) + set_func_name(output2, '%s -> %s' % (vars[0], expr)) + return output2 + else: + raise TypeError('vars must have 2 or 1 elements') + +def window(xmin, xmax, ymin, ymax, x=0, y=0, width=100, height=100, xlogbase=None, ylogbase=None, minusInfinity=-1000, flipx=False, flipy=True): + if flipx: + ox1 = x + width + ox2 = x + else: + ox1 = x + ox2 = x + width + if flipy: + oy1 = y + height + oy2 = y + else: + oy1 = y + oy2 = y + height + ix1 = xmin + iy1 = ymin + ix2 = xmax + iy2 = ymax + if xlogbase is not None and (ix1 <= 0.0 or ix2 <= 0.0): + raise ValueError('x range incompatible with log scaling: (%g, %g)' % (ix1, ix2)) + if ylogbase is not None and (iy1 <= 0.0 or iy2 <= 0.0): + raise ValueError('y range incompatible with log scaling: (%g, %g)' % (iy1, iy2)) + + def maybelog(t, it1, it2, ot1, ot2, logbase): + if t <= 0.0: + return minusInfinity + else: + return ot1 + 1.0 * (math.log(t, logbase) - math.log(it1, logbase)) / (math.log(it2, logbase) - math.log(it1, logbase)) * (ot2 - ot1) + (xlogstr, ylogstr) = ('', '') + if xlogbase is None: + xfunc = lambda x: ox1 + 1.0 * (x - ix1) / (ix2 - ix1) * (ox2 - ox1) + else: + xfunc = lambda x: maybelog(x, ix1, ix2, ox1, ox2, xlogbase) + xlogstr = ' xlog=%g' % xlogbase + if ylogbase is None: + yfunc = lambda y: oy1 + 1.0 * (y - iy1) / (iy2 - iy1) * (oy2 - oy1) + else: + yfunc = lambda y: maybelog(y, iy1, iy2, oy1, oy2, ylogbase) + ylogstr = ' ylog=%g' % ylogbase + output = lambda x, y: (xfunc(x), yfunc(y)) + set_func_name(output, '(%g, %g), (%g, %g) -> (%g, %g), (%g, %g)%s%s' % (ix1, ix2, iy1, iy2, ox1, ox2, oy1, oy2, xlogstr, ylogstr)) + return output + +def rotate(angle, cx=0, cy=0): + angle *= math.pi / 180.0 + return lambda x, y: (cx + math.cos(angle) * (x - cx) - math.sin(angle) * (y - cy), cy + math.sin(angle) * (x - cx) + math.cos(angle) * (y - cy)) + +class Fig: + + def __repr__(self): + if self.trans is None: + return '' % len(self.d) + elif isinstance(self.trans, basestring): + return ' %s>' % (len(self.d), self.trans) + else: + return '' % (len(self.d), self.trans.func_name) + + def __init__(self, *d, **kwds): + self.d = list(d) + defaults = {'trans': None} + defaults.update(kwds) + kwds = defaults + self.trans = kwds['trans'] + del kwds['trans'] + if len(kwds) != 0: + raise TypeError('Fig() got unexpected keyword arguments %s' % kwds.keys()) + + def SVG(self, trans=None): + if trans is None: + trans = self.trans + if isinstance(trans, basestring): + trans = totrans(trans) + output = SVG('g') + for s in self.d: + if isinstance(s, SVG): + output.append(s) + elif isinstance(s, Fig): + strans = s.trans + if isinstance(strans, basestring): + strans = totrans(strans) + if trans is None: + subtrans = strans + elif strans is None: + subtrans = trans + else: + subtrans = lambda x, y: trans(*strans(x, y)) + output.sub += s.SVG(subtrans).sub + elif s is None: + pass + else: + output.append(s.SVG(trans)) + return output + +class Plot: + + def __repr__(self): + if self.trans is None: + return '' % len(self.d) + else: + return '' % (len(self.d), self.trans.func_name) + + def __init__(self, xmin, xmax, ymin, ymax, *d, **kwds): + (self.xmin, self.xmax, self.ymin, self.ymax) = (xmin, xmax, ymin, ymax) + self.d = list(d) + defaults = {'trans': None, 'x': 5, 'y': 5, 'width': 90, 'height': 90, 'flipx': False, 'flipy': True, 'minusInfinity': -1000, 'atx': 0, 'xticks': -10, 'xminiticks': True, 'xlabels': True, 'xlogbase': None, 'aty': 0, 'yticks': -10, 'yminiticks': True, 'ylabels': True, 'ylogbase': None, 'arrows': None, 'text_attr': {}, 'axis_attr': {}} + defaults.update(kwds) + kwds = defaults + self.trans = kwds['trans'] + del kwds['trans'] + self.x = kwds['x'] + del kwds['x'] + self.y = kwds['y'] + del kwds['y'] + self.width = kwds['width'] + del kwds['width'] + self.height = kwds['height'] + del kwds['height'] + self.flipx = kwds['flipx'] + del kwds['flipx'] + self.flipy = kwds['flipy'] + del kwds['flipy'] + self.minusInfinity = kwds['minusInfinity'] + del kwds['minusInfinity'] + self.atx = kwds['atx'] + del kwds['atx'] + self.xticks = kwds['xticks'] + del kwds['xticks'] + self.xminiticks = kwds['xminiticks'] + del kwds['xminiticks'] + self.xlabels = kwds['xlabels'] + del kwds['xlabels'] + self.xlogbase = kwds['xlogbase'] + del kwds['xlogbase'] + self.aty = kwds['aty'] + del kwds['aty'] + self.yticks = kwds['yticks'] + del kwds['yticks'] + self.yminiticks = kwds['yminiticks'] + del kwds['yminiticks'] + self.ylabels = kwds['ylabels'] + del kwds['ylabels'] + self.ylogbase = kwds['ylogbase'] + del kwds['ylogbase'] + self.arrows = kwds['arrows'] + del kwds['arrows'] + self.text_attr = kwds['text_attr'] + del kwds['text_attr'] + self.axis_attr = kwds['axis_attr'] + del kwds['axis_attr'] + if len(kwds) != 0: + raise TypeError('Plot() got unexpected keyword arguments %s' % kwds.keys()) + + def SVG(self, trans=None): + if trans is None: + trans = self.trans + if isinstance(trans, basestring): + trans = totrans(trans) + self.last_window = window(self.xmin, self.xmax, self.ymin, self.ymax, x=self.x, y=self.y, width=self.width, height=self.height, xlogbase=self.xlogbase, ylogbase=self.ylogbase, minusInfinity=self.minusInfinity, flipx=self.flipx, flipy=self.flipy) + d = [Axes(self.xmin, self.xmax, self.ymin, self.ymax, self.atx, self.aty, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, self.arrows, self.text_attr, **self.axis_attr)] + self.d + return Fig(Fig(*d, **{'trans': trans})).SVG(self.last_window) + +class Frame: + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + axis_defaults = {} + tick_length = 1.5 + minitick_length = 0.75 + text_xaxis_offset = 1.0 + text_yaxis_offset = 2.0 + text_xtitle_offset = 6.0 + text_ytitle_offset = 12.0 + + def __repr__(self): + return '' % len(self.d) + + def __init__(self, xmin, xmax, ymin, ymax, *d, **kwds): + (self.xmin, self.xmax, self.ymin, self.ymax) = (xmin, xmax, ymin, ymax) + self.d = list(d) + defaults = {'x': 20, 'y': 5, 'width': 75, 'height': 80, 'flipx': False, 'flipy': True, 'minusInfinity': -1000, 'xtitle': None, 'xticks': -10, 'xminiticks': True, 'xlabels': True, 'x2labels': None, 'xlogbase': None, 'ytitle': None, 'yticks': -10, 'yminiticks': True, 'ylabels': True, 'y2labels': None, 'ylogbase': None, 'text_attr': {}, 'axis_attr': {}} + defaults.update(kwds) + kwds = defaults + self.x = kwds['x'] + del kwds['x'] + self.y = kwds['y'] + del kwds['y'] + self.width = kwds['width'] + del kwds['width'] + self.height = kwds['height'] + del kwds['height'] + self.flipx = kwds['flipx'] + del kwds['flipx'] + self.flipy = kwds['flipy'] + del kwds['flipy'] + self.minusInfinity = kwds['minusInfinity'] + del kwds['minusInfinity'] + self.xtitle = kwds['xtitle'] + del kwds['xtitle'] + self.xticks = kwds['xticks'] + del kwds['xticks'] + self.xminiticks = kwds['xminiticks'] + del kwds['xminiticks'] + self.xlabels = kwds['xlabels'] + del kwds['xlabels'] + self.x2labels = kwds['x2labels'] + del kwds['x2labels'] + self.xlogbase = kwds['xlogbase'] + del kwds['xlogbase'] + self.ytitle = kwds['ytitle'] + del kwds['ytitle'] + self.yticks = kwds['yticks'] + del kwds['yticks'] + self.yminiticks = kwds['yminiticks'] + del kwds['yminiticks'] + self.ylabels = kwds['ylabels'] + del kwds['ylabels'] + self.y2labels = kwds['y2labels'] + del kwds['y2labels'] + self.ylogbase = kwds['ylogbase'] + del kwds['ylogbase'] + self.text_attr = dict(self.text_defaults) + self.text_attr.update(kwds['text_attr']) + del kwds['text_attr'] + self.axis_attr = dict(self.axis_defaults) + self.axis_attr.update(kwds['axis_attr']) + del kwds['axis_attr'] + if len(kwds) != 0: + raise TypeError('Frame() got unexpected keyword arguments %s' % kwds.keys()) + + def SVG(self): + self.last_window = window(self.xmin, self.xmax, self.ymin, self.ymax, x=self.x, y=self.y, width=self.width, height=self.height, xlogbase=self.xlogbase, ylogbase=self.ylogbase, minusInfinity=self.minusInfinity, flipx=self.flipx, flipy=self.flipy) + left = YAxis(self.ymin, self.ymax, self.xmin, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, None, None, None, self.text_attr, **self.axis_attr) + right = YAxis(self.ymin, self.ymax, self.xmax, self.yticks, self.yminiticks, self.y2labels, self.ylogbase, None, None, None, self.text_attr, **self.axis_attr) + bottom = XAxis(self.xmin, self.xmax, self.ymin, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, None, None, None, self.text_attr, **self.axis_attr) + top = XAxis(self.xmin, self.xmax, self.ymax, self.xticks, self.xminiticks, self.x2labels, self.xlogbase, None, None, None, self.text_attr, **self.axis_attr) + left.tick_start = -self.tick_length + left.tick_end = 0 + left.minitick_start = -self.minitick_length + left.minitick_end = 0.0 + left.text_start = self.text_yaxis_offset + right.tick_start = 0.0 + right.tick_end = self.tick_length + right.minitick_start = 0.0 + right.minitick_end = self.minitick_length + right.text_start = -self.text_yaxis_offset + right.text_attr['text-anchor'] = 'start' + bottom.tick_start = 0.0 + bottom.tick_end = self.tick_length + bottom.minitick_start = 0.0 + bottom.minitick_end = self.minitick_length + bottom.text_start = -self.text_xaxis_offset + top.tick_start = -self.tick_length + top.tick_end = 0.0 + top.minitick_start = -self.minitick_length + top.minitick_end = 0.0 + top.text_start = self.text_xaxis_offset + top.text_attr['dominant-baseline'] = 'text-after-edge' + output = Fig(*self.d).SVG(self.last_window) + output.prepend(left.SVG(self.last_window)) + output.prepend(bottom.SVG(self.last_window)) + output.prepend(right.SVG(self.last_window)) + output.prepend(top.SVG(self.last_window)) + if self.xtitle is not None: + output.append(SVG('text', self.xtitle, transform='translate(%g, %g)' % (self.x + self.width / 2.0, self.y + self.height + self.text_xtitle_offset), dominant_baseline='text-before-edge', **self.text_attr)) + if self.ytitle is not None: + output.append(SVG('text', self.ytitle, transform='translate(%g, %g) rotate(-90)' % (self.x - self.text_ytitle_offset, self.y + self.height / 2.0), **self.text_attr)) + return output + +def pathtoPath(svg): + if not isinstance(svg, SVG) or svg.t != 'path': + raise TypeError('Only SVG objects can be converted into Paths') + attr = dict(svg.attr) + d = attr['d'] + del attr['d'] + for key in attr.keys(): + if not isinstance(key, str): + value = attr[key] + del attr[key] + attr[str(key)] = value + return Path(d, **attr) + +class Path: + defaults = {} + + def __repr__(self): + return '' % (len(self.d), self.attr) + + def __init__(self, d=[], **attr): + if isinstance(d, basestring): + self.d = self.parse(d) + else: + self.d = list(d) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def parse_whitespace(self, index, pathdata): + while index < len(pathdata) and pathdata[index] in (' ', '\t', '\r', '\n', ','): + index += 1 + return (index, pathdata) + + def parse_command(self, index, pathdata): + (index, pathdata) = self.parse_whitespace(index, pathdata) + if index >= len(pathdata): + return (None, index, pathdata) + command = pathdata[index] + if 'A' <= command <= 'Z' or 'a' <= command <= 'z': + index += 1 + return (command, index, pathdata) + else: + return (None, index, pathdata) + + def parse_number(self, index, pathdata): + (index, pathdata) = self.parse_whitespace(index, pathdata) + if index >= len(pathdata): + return (None, index, pathdata) + first_digit = pathdata[index] + if '0' <= first_digit <= '9' or first_digit in ('-', '+', '.'): + start = index + while index < len(pathdata) and ('0' <= pathdata[index] <= '9' or pathdata[index] in ('-', '+', '.', 'e', 'E')): + index += 1 + end = index + index = end + return (float(pathdata[start:end]), index, pathdata) + else: + return (None, index, pathdata) + + def parse_boolean(self, index, pathdata): + (index, pathdata) = self.parse_whitespace(index, pathdata) + if index >= len(pathdata): + return (None, index, pathdata) + first_digit = pathdata[index] + if first_digit in ('0', '1'): + index += 1 + return (int(first_digit), index, pathdata) + else: + return (None, index, pathdata) + + def parse(self, pathdata): + output = [] + index = 0 + while True: + (command, index, pathdata) = self.parse_command(index, pathdata) + (index, pathdata) = self.parse_whitespace(index, pathdata) + if command is None and index == len(pathdata): + break + if command in ('Z', 'z'): + output.append((command,)) + elif command in ('H', 'h', 'V', 'v'): + errstring = 'Path command "%s" requires a number at index %d' % (command, index) + (num1, index, pathdata) = self.parse_number(index, pathdata) + if num1 is None: + raise ValueError(errstring) + while num1 is not None: + output.append((command, num1)) + (num1, index, pathdata) = self.parse_number(index, pathdata) + elif command in ('M', 'm', 'L', 'l', 'T', 't'): + errstring = 'Path command "%s" requires an x,y pair at index %d' % (command, index) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + if num1 is None: + raise ValueError(errstring) + while num1 is not None: + if num2 is None: + raise ValueError(errstring) + output.append((command, num1, num2, False)) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + elif command in ('S', 's', 'Q', 'q'): + errstring = 'Path command "%s" requires a cx,cy,x,y quadruplet at index %d' % (command, index) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_number(index, pathdata) + if num1 is None: + raise ValueError(errstring) + while num1 is not None: + if num2 is None or num3 is None or num4 is None: + raise ValueError(errstring) + output.append((command, num1, num2, False, num3, num4, False)) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_number(index, pathdata) + elif command in ('C', 'c'): + errstring = 'Path command "%s" requires a c1x,c1y,c2x,c2y,x,y sextuplet at index %d' % (command, index) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_number(index, pathdata) + (num5, index, pathdata) = self.parse_number(index, pathdata) + (num6, index, pathdata) = self.parse_number(index, pathdata) + if num1 is None: + raise ValueError(errstring) + while num1 is not None: + if num2 is None or num3 is None or num4 is None or (num5 is None) or (num6 is None): + raise ValueError(errstring) + output.append((command, num1, num2, False, num3, num4, False, num5, num6, False)) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_number(index, pathdata) + (num5, index, pathdata) = self.parse_number(index, pathdata) + (num6, index, pathdata) = self.parse_number(index, pathdata) + elif command in ('A', 'a'): + errstring = 'Path command "%s" requires a rx,ry,angle,large-arc-flag,sweep-flag,x,y septuplet at index %d' % (command, index) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_boolean(index, pathdata) + (num5, index, pathdata) = self.parse_boolean(index, pathdata) + (num6, index, pathdata) = self.parse_number(index, pathdata) + (num7, index, pathdata) = self.parse_number(index, pathdata) + if num1 is None: + raise ValueError(errstring) + while num1 is not None: + if num2 is None or num3 is None or num4 is None or (num5 is None) or (num6 is None) or (num7 is None): + raise ValueError(errstring) + output.append((command, num1, num2, False, num3, num4, num5, num6, num7, False)) + (num1, index, pathdata) = self.parse_number(index, pathdata) + (num2, index, pathdata) = self.parse_number(index, pathdata) + (num3, index, pathdata) = self.parse_number(index, pathdata) + (num4, index, pathdata) = self.parse_boolean(index, pathdata) + (num5, index, pathdata) = self.parse_boolean(index, pathdata) + (num6, index, pathdata) = self.parse_number(index, pathdata) + (num7, index, pathdata) = self.parse_number(index, pathdata) + return output + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + (x, y, X, Y) = (None, None, None, None) + output = [] + for datum in self.d: + if not isinstance(datum, (tuple, list)): + raise TypeError('pathdata elements must be tuples/lists') + command = datum[0] + if command in ('Z', 'z'): + (x, y, X, Y) = (None, None, None, None) + output.append('Z') + elif command in ('H', 'h', 'V', 'v'): + (command, num1) = datum + if command == 'H' or (command == 'h' and x is None): + x = num1 + elif command == 'h': + x += num1 + elif command == 'V' or (command == 'v' and y is None): + y = num1 + elif command == 'v': + y += num1 + if trans is None: + (X, Y) = (x, y) + else: + (X, Y) = trans(x, y) + output.append('L%g %g' % (X, Y)) + elif command in ('M', 'm', 'L', 'l', 'T', 't'): + (command, num1, num2, isglobal12) = datum + if trans is None or isglobal12: + if command.isupper() or X is None or Y is None: + (X, Y) = (num1, num2) + else: + X += num1 + Y += num2 + (x, y) = (X, Y) + else: + if command.isupper() or x is None or y is None: + (x, y) = (num1, num2) + else: + x += num1 + y += num2 + (X, Y) = trans(x, y) + COMMAND = command.capitalize() + output.append('%s%g %g' % (COMMAND, X, Y)) + elif command in ('S', 's', 'Q', 'q'): + (command, num1, num2, isglobal12, num3, num4, isglobal34) = datum + if trans is None or isglobal12: + if command.isupper() or X is None or Y is None: + (CX, CY) = (num1, num2) + else: + CX = X + num1 + CY = Y + num2 + else: + if command.isupper() or x is None or y is None: + (cx, cy) = (num1, num2) + else: + cx = x + num1 + cy = y + num2 + (CX, CY) = trans(cx, cy) + if trans is None or isglobal34: + if command.isupper() or X is None or Y is None: + (X, Y) = (num3, num4) + else: + X += num3 + Y += num4 + (x, y) = (X, Y) + else: + if command.isupper() or x is None or y is None: + (x, y) = (num3, num4) + else: + x += num3 + y += num4 + (X, Y) = trans(x, y) + COMMAND = command.capitalize() + output.append('%s%g %g %g %g' % (COMMAND, CX, CY, X, Y)) + elif command in ('C', 'c'): + (command, num1, num2, isglobal12, num3, num4, isglobal34, num5, num6, isglobal56) = datum + if trans is None or isglobal12: + if command.isupper() or X is None or Y is None: + (C1X, C1Y) = (num1, num2) + else: + C1X = X + num1 + C1Y = Y + num2 + else: + if command.isupper() or x is None or y is None: + (c1x, c1y) = (num1, num2) + else: + c1x = x + num1 + c1y = y + num2 + (C1X, C1Y) = trans(c1x, c1y) + if trans is None or isglobal34: + if command.isupper() or X is None or Y is None: + (C2X, C2Y) = (num3, num4) + else: + C2X = X + num3 + C2Y = Y + num4 + else: + if command.isupper() or x is None or y is None: + (c2x, c2y) = (num3, num4) + else: + c2x = x + num3 + c2y = y + num4 + (C2X, C2Y) = trans(c2x, c2y) + if trans is None or isglobal56: + if command.isupper() or X is None or Y is None: + (X, Y) = (num5, num6) + else: + X += num5 + Y += num6 + (x, y) = (X, Y) + else: + if command.isupper() or x is None or y is None: + (x, y) = (num5, num6) + else: + x += num5 + y += num6 + (X, Y) = trans(x, y) + COMMAND = command.capitalize() + output.append('%s%g %g %g %g %g %g' % (COMMAND, C1X, C1Y, C2X, C2Y, X, Y)) + elif command in ('A', 'a'): + (command, num1, num2, isglobal12, angle, large_arc_flag, sweep_flag, num3, num4, isglobal34) = datum + (oldx, oldy) = (x, y) + (OLDX, OLDY) = (X, Y) + if trans is None or isglobal34: + if command.isupper() or X is None or Y is None: + (X, Y) = (num3, num4) + else: + X += num3 + Y += num4 + (x, y) = (X, Y) + else: + if command.isupper() or x is None or y is None: + (x, y) = (num3, num4) + else: + x += num3 + y += num4 + (X, Y) = trans(x, y) + if x is not None and y is not None: + (centerx, centery) = ((x + oldx) / 2.0, (y + oldy) / 2.0) + (CENTERX, CENTERY) = ((X + OLDX) / 2.0, (Y + OLDY) / 2.0) + if trans is None or isglobal12: + RX = CENTERX + num1 + RY = CENTERY + num2 + else: + rx = centerx + num1 + ry = centery + num2 + (RX, RY) = trans(rx, ry) + COMMAND = command.capitalize() + output.append('%s%g %g %g %d %d %g %g' % (COMMAND, RX - CENTERX, RY - CENTERY, angle, large_arc_flag, sweep_flag, X, Y)) + elif command in (',', '.'): + (command, num1, num2, isglobal12, angle, num3, num4, isglobal34) = datum + if trans is None or isglobal34: + if command == '.' or X is None or Y is None: + (X, Y) = (num3, num4) + else: + X += num3 + Y += num4 + (x, y) = (None, None) + else: + if command == '.' or x is None or y is None: + (x, y) = (num3, num4) + else: + x += num3 + y += num4 + (X, Y) = trans(x, y) + if trans is None or isglobal12: + RX = X + num1 + RY = Y + num2 + else: + rx = x + num1 + ry = y + num2 + (RX, RY) = trans(rx, ry) + (RX, RY) = (RX - X, RY - Y) + (X1, Y1) = (X + RX * math.cos(angle * math.pi / 180.0), Y + RX * math.sin(angle * math.pi / 180.0)) + (X2, Y2) = (X + RY * math.sin(angle * math.pi / 180.0), Y - RY * math.cos(angle * math.pi / 180.0)) + (X3, Y3) = (X - RX * math.cos(angle * math.pi / 180.0), Y - RX * math.sin(angle * math.pi / 180.0)) + (X4, Y4) = (X - RY * math.sin(angle * math.pi / 180.0), Y + RY * math.cos(angle * math.pi / 180.0)) + output.append('M%g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %gA%g %g %g 0 0 %g %g' % (X1, Y1, RX, RY, angle, X2, Y2, RX, RY, angle, X3, Y3, RX, RY, angle, X4, Y4, RX, RY, angle, X1, Y1)) + return SVG('path', d=''.join(output), **self.attr) + +def funcRtoC(expr, var='t', globals=None, locals=None): + if locals is None: + locals = {} + g = cmath.__dict__ + if globals is not None: + g.update(globals) + output = eval('lambda %s: (%s)' % (var, expr), g, locals) + split = lambda z: (z.real, z.imag) + output2 = lambda t: split(output(t)) + set_func_name(output2, '%s -> %s' % (var, expr)) + return output2 + +def funcRtoR2(expr, var='t', globals=None, locals=None): + if locals is None: + locals = {} + g = math.__dict__ + if globals is not None: + g.update(globals) + output = eval('lambda %s: (%s)' % (var, expr), g, locals) + set_func_name(output, '%s -> %s' % (var, expr)) + return output + +def funcRtoR(expr, var='x', globals=None, locals=None): + if locals is None: + locals = {} + g = math.__dict__ + if globals is not None: + g.update(globals) + output = eval('lambda %s: (%s, %s)' % (var, var, expr), g, locals) + set_func_name(output, '%s -> %s' % (var, expr)) + return output + +class Curve: + defaults = {} + random_sampling = True + recursion_limit = 15 + linearity_limit = 0.05 + discontinuity_limit = 5.0 + + def __repr__(self): + return '' % (self.f, self.low, self.high, self.attr) + + def __init__(self, f, low, high, loop=False, **attr): + self.f = f + self.low = low + self.high = high + self.loop = loop + self.attr = dict(self.defaults) + self.attr.update(attr) + + class Sample: + + def __repr__(self): + (t, x, y, X, Y) = (self.t, self.x, self.y, self.X, self.Y) + if t is not None: + t = '%g' % t + if x is not None: + x = '%g' % x + if y is not None: + y = '%g' % y + if X is not None: + X = '%g' % X + if Y is not None: + Y = '%g' % Y + return '' % (t, x, y, X, Y) + + def __init__(self, t): + self.t = t + + def link(self, left, right): + (self.left, self.right) = (left, right) + + def evaluate(self, f, trans): + (self.x, self.y) = f(self.t) + if trans is None: + (self.X, self.Y) = (self.x, self.y) + else: + (self.X, self.Y) = trans(self.x, self.y) + + class Samples: + + def __repr__(self): + return '' % len(self) + + def __init__(self, left, right): + (self.left, self.right) = (left, right) + + def __len__(self): + count = 0 + current = self.left + while current is not None: + count += 1 + current = current.right + return count + + def __iter__(self): + self.current = self.left + return self + + def next(self): + current = self.current + if current is None: + raise StopIteration + self.current = self.current.right + return current + + def sample(self, trans=None): + oldrecursionlimit = sys.getrecursionlimit() + sys.setrecursionlimit(self.recursion_limit + 100) + try: + if not self.low < self.high: + raise ValueError('low must be less than high') + (low, high) = (self.Sample(float(self.low)), self.Sample(float(self.high))) + low.link(None, high) + high.link(low, None) + low.evaluate(self.f, trans) + high.evaluate(self.f, trans) + self.subsample(low, high, 0, trans) + left = low + while left.right is not None: + mid = left.right + right = mid.right + if right is not None and left.X is not None and (left.Y is not None) and (mid.X is not None) and (mid.Y is not None) and (right.X is not None) and (right.Y is not None): + numer = left.X * (right.Y - mid.Y) + mid.X * (left.Y - right.Y) + right.X * (mid.Y - left.Y) + denom = math.sqrt((left.X - right.X) ** 2 + (left.Y - right.Y) ** 2) + if denom != 0.0 and abs(numer / denom) < self.linearity_limit: + left.right = right + right.left = left + else: + left = left.right + else: + left = left.right + self.last_samples = self.Samples(low, high) + finally: + sys.setrecursionlimit(oldrecursionlimit) + + def subsample(self, left, right, depth, trans=None): + if self.random_sampling: + mid = self.Sample(left.t + random.uniform(0.3, 0.7) * (right.t - left.t)) + else: + mid = self.Sample(left.t + 0.5 * (right.t - left.t)) + left.right = mid + right.left = mid + mid.link(left, right) + mid.evaluate(self.f, trans) + numer = left.X * (right.Y - mid.Y) + mid.X * (left.Y - right.Y) + right.X * (mid.Y - left.Y) + denom = math.sqrt((left.X - right.X) ** 2 + (left.Y - right.Y) ** 2) + if depth < 3 or (denom == 0 and left.t != right.t) or denom > self.discontinuity_limit or (denom != 0.0 and abs(numer / denom) > self.linearity_limit): + if depth < self.recursion_limit: + self.subsample(left, mid, depth + 1, trans) + self.subsample(mid, right, depth + 1, trans) + else: + mid.y = mid.Y = None + + def SVG(self, trans=None): + return self.Path(trans).SVG() + + def Path(self, trans=None, local=False): + if isinstance(trans, basestring): + trans = totrans(trans) + if isinstance(self.f, basestring): + self.f = funcRtoR2(self.f) + self.sample(trans) + output = [] + for s in self.last_samples: + if s.X is not None and s.Y is not None: + if s.left is None or s.left.Y is None: + command = 'M' + else: + command = 'L' + if local: + output.append((command, s.x, s.y, False)) + else: + output.append((command, s.X, s.Y, True)) + if self.loop: + output.append(('Z',)) + return Path(output, **self.attr) + +class Poly: + defaults = {} + + def __repr__(self): + return '' % (len(self.d), self.mode, repr(self.loop), self.attr) + + def __init__(self, d=[], mode='L', loop=False, **attr): + self.d = list(d) + self.mode = mode + self.loop = loop + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + return self.Path(trans).SVG() + + def Path(self, trans=None, local=False): + if isinstance(trans, basestring): + trans = totrans(trans) + if self.mode[0] == 'L' or self.mode[0] == 'l': + mode = 'L' + elif self.mode[0] == 'B' or self.mode[0] == 'b': + mode = 'B' + elif self.mode[0] == 'V' or self.mode[0] == 'v': + mode = 'V' + elif self.mode[0] == 'F' or self.mode[0] == 'f': + mode = 'F' + elif self.mode[0] == 'S' or self.mode[0] == 's': + mode = 'S' + (vx, vy) = ([0.0] * len(self.d), [0.0] * len(self.d)) + for i in xrange(len(self.d)): + inext = (i + 1) % len(self.d) + iprev = (i - 1) % len(self.d) + vx[i] = (self.d[inext][0] - self.d[iprev][0]) / 2.0 + vy[i] = (self.d[inext][1] - self.d[iprev][1]) / 2.0 + if not self.loop and (i == 0 or i == len(self.d) - 1): + (vx[i], vy[i]) = (0.0, 0.0) + else: + raise ValueError('mode must be "lines", "bezier", "velocity", "foreback", "smooth", or an abbreviation') + d = [] + indexes = list(range(len(self.d))) + if self.loop and len(self.d) > 0: + indexes.append(0) + for i in indexes: + inext = (i + 1) % len(self.d) + iprev = (i - 1) % len(self.d) + (x, y) = (self.d[i][0], self.d[i][1]) + if trans is None: + (X, Y) = (x, y) + else: + (X, Y) = trans(x, y) + if d == []: + if local: + d.append(('M', x, y, False)) + else: + d.append(('M', X, Y, True)) + elif mode == 'L': + if local: + d.append(('L', x, y, False)) + else: + d.append(('L', X, Y, True)) + elif mode == 'B': + (c1x, c1y) = (self.d[i][2], self.d[i][3]) + if trans is None: + (C1X, C1Y) = (c1x, c1y) + else: + (C1X, C1Y) = trans(c1x, c1y) + (c2x, c2y) = (self.d[i][4], self.d[i][5]) + if trans is None: + (C2X, C2Y) = (c2x, c2y) + else: + (C2X, C2Y) = trans(c2x, c2y) + if local: + d.append(('C', c1x, c1y, False, c2x, c2y, False, x, y, False)) + else: + d.append(('C', C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) + elif mode == 'V': + (c1x, c1y) = (self.d[iprev][2] / 3.0 + self.d[iprev][0], self.d[iprev][3] / 3.0 + self.d[iprev][1]) + (c2x, c2y) = (self.d[i][2] / -3.0 + x, self.d[i][3] / -3.0 + y) + if trans is None: + (C1X, C1Y) = (c1x, c1y) + else: + (C1X, C1Y) = trans(c1x, c1y) + if trans is None: + (C2X, C2Y) = (c2x, c2y) + else: + (C2X, C2Y) = trans(c2x, c2y) + if local: + d.append(('C', c1x, c1y, False, c2x, c2y, False, x, y, False)) + else: + d.append(('C', C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) + elif mode == 'F': + (c1x, c1y) = (self.d[iprev][4] / 3.0 + self.d[iprev][0], self.d[iprev][5] / 3.0 + self.d[iprev][1]) + (c2x, c2y) = (self.d[i][2] / -3.0 + x, self.d[i][3] / -3.0 + y) + if trans is None: + (C1X, C1Y) = (c1x, c1y) + else: + (C1X, C1Y) = trans(c1x, c1y) + if trans is None: + (C2X, C2Y) = (c2x, c2y) + else: + (C2X, C2Y) = trans(c2x, c2y) + if local: + d.append(('C', c1x, c1y, False, c2x, c2y, False, x, y, False)) + else: + d.append(('C', C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) + elif mode == 'S': + (c1x, c1y) = (vx[iprev] / 3.0 + self.d[iprev][0], vy[iprev] / 3.0 + self.d[iprev][1]) + (c2x, c2y) = (vx[i] / -3.0 + x, vy[i] / -3.0 + y) + if trans is None: + (C1X, C1Y) = (c1x, c1y) + else: + (C1X, C1Y) = trans(c1x, c1y) + if trans is None: + (C2X, C2Y) = (c2x, c2y) + else: + (C2X, C2Y) = trans(c2x, c2y) + if local: + d.append(('C', c1x, c1y, False, c2x, c2y, False, x, y, False)) + else: + d.append(('C', C1X, C1Y, True, C2X, C2Y, True, X, Y, True)) + if self.loop and len(self.d) > 0: + d.append(('Z',)) + return Path(d, **self.attr) + +class Text: + defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + + def __repr__(self): + return '' % (repr(self.d), self.x, self.y, self.attr) + + def __init__(self, x, y, d, **attr): + self.x = x + self.y = y + self.d = unicode(d) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + (X, Y) = (self.x, self.y) + if trans is not None: + (X, Y) = trans(X, Y) + return SVG('text', self.d, x=X, y=Y, **self.attr) + +class TextGlobal: + defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + + def __repr__(self): + return '' % (repr(self.d), str(self.x), str(self.y), self.attr) + + def __init__(self, x, y, d, **attr): + self.x = x + self.y = y + self.d = unicode(d) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + return SVG('text', self.d, x=self.x, y=self.y, **self.attr) +_symbol_templates = {'dot': SVG('symbol', SVG('circle', cx=0, cy=0, r=1, stroke='none', fill='black'), viewBox='0 0 1 1', overflow='visible'), 'box': SVG('symbol', SVG('rect', x1=-1, y1=-1, x2=1, y2=1, stroke='none', fill='black'), viewBox='0 0 1 1', overflow='visible'), 'uptri': SVG('symbol', SVG('path', d='M -1 0.866 L 1 0.866 L 0 -0.866 Z', stroke='none', fill='black'), viewBox='0 0 1 1', overflow='visible'), 'downtri': SVG('symbol', SVG('path', d='M -1 -0.866 L 1 -0.866 L 0 0.866 Z', stroke='none', fill='black'), viewBox='0 0 1 1', overflow='visible')} + +def make_symbol(id, shape='dot', **attr): + output = copy.deepcopy(_symbol_templates[shape]) + for i in output.sub: + i.attr.update(attr_preprocess(attr)) + output['id'] = id + return output +_circular_dot = make_symbol('circular_dot') + +class Dots: + defaults = {} + + def __repr__(self): + return '' % (len(self.d), self.attr) + + def __init__(self, d=[], symbol=None, width=1.0, height=1.0, **attr): + self.d = list(d) + self.width = width + self.height = height + self.attr = dict(self.defaults) + self.attr.update(attr) + if symbol is None: + self.symbol = _circular_dot + elif isinstance(symbol, SVG): + self.symbol = symbol + else: + self.symbol = make_symbol(symbol) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + output = SVG('g', SVG('defs', self.symbol)) + id = '#%s' % self.symbol['id'] + for p in self.d: + (x, y) = (p[0], p[1]) + if trans is None: + (X, Y) = (x, y) + else: + (X, Y) = trans(x, y) + item = SVG('use', x=X, y=Y, xlink__href=id) + if self.width is not None: + item['width'] = self.width + if self.height is not None: + item['height'] = self.height + output.append(item) + return output +_marker_templates = {'arrow_start': SVG('marker', SVG('path', d='M 9 3.6 L 10.5 0 L 0 3.6 L 10.5 7.2 L 9 3.6 Z'), viewBox='0 0 10.5 7.2', refX='9', refY='3.6', markerWidth='10.5', markerHeight='7.2', markerUnits='strokeWidth', orient='auto', stroke='none', fill='black'), 'arrow_end': SVG('marker', SVG('path', d='M 1.5 3.6 L 0 0 L 10.5 3.6 L 0 7.2 L 1.5 3.6 Z'), viewBox='0 0 10.5 7.2', refX='1.5', refY='3.6', markerWidth='10.5', markerHeight='7.2', markerUnits='strokeWidth', orient='auto', stroke='none', fill='black')} + +def make_marker(id, shape, **attr): + output = copy.deepcopy(_marker_templates[shape]) + for i in output.sub: + i.attr.update(attr_preprocess(attr)) + output['id'] = id + return output + +class Line(Curve): + defaults = {} + + def __repr__(self): + return '' % (self.x1, self.y1, self.x2, self.y2, self.attr) + + def __init__(self, x1, y1, x2, y2, arrow_start=None, arrow_end=None, **attr): + (self.x1, self.y1, self.x2, self.y2) = (x1, y1, x2, y2) + (self.arrow_start, self.arrow_end) = (arrow_start, arrow_end) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + line = self.Path(trans).SVG() + if self.arrow_start != False and self.arrow_start is not None or (self.arrow_end != False and self.arrow_end is not None): + defs = SVG('defs') + if self.arrow_start != False and self.arrow_start is not None: + if isinstance(self.arrow_start, SVG): + defs.append(self.arrow_start) + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start['id'] + elif isinstance(self.arrow_start, basestring): + defs.append(make_marker(self.arrow_start, 'arrow_start')) + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start + else: + raise TypeError('arrow_start must be False/None or an id string for the new marker') + if self.arrow_end != False and self.arrow_end is not None: + if isinstance(self.arrow_end, SVG): + defs.append(self.arrow_end) + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end['id'] + elif isinstance(self.arrow_end, basestring): + defs.append(make_marker(self.arrow_end, 'arrow_end')) + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end + else: + raise TypeError('arrow_end must be False/None or an id string for the new marker') + return SVG('g', defs, line) + return line + + def Path(self, trans=None, local=False): + self.f = lambda t: (self.x1 + t * (self.x2 - self.x1), self.y1 + t * (self.y2 - self.y1)) + self.low = 0.0 + self.high = 1.0 + self.loop = False + if trans is None: + return Path([('M', self.x1, self.y1, not local), ('L', self.x2, self.y2, not local)], **self.attr) + else: + return Curve.Path(self, trans, local) + +class LineGlobal: + defaults = {} + + def __repr__(self): + (local1, local2) = ('', '') + if self.local1: + local1 = 'L' + if self.local2: + local2 = 'L' + return '' % (local1, str(self.x1), str(self.y1), local2, str(self.x2), str(self.y2), self.attr) + + def __init__(self, x1, y1, x2, y2, local1=False, local2=False, arrow_start=None, arrow_end=None, **attr): + (self.x1, self.y1, self.x2, self.y2) = (x1, y1, x2, y2) + (self.local1, self.local2) = (local1, local2) + (self.arrow_start, self.arrow_end) = (arrow_start, arrow_end) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + (X1, Y1, X2, Y2) = (self.x1, self.y1, self.x2, self.y2) + if self.local1: + (X1, Y1) = trans(X1, Y1) + if self.local2: + (X2, Y2) = trans(X2, Y2) + line = SVG('path', d='M%s %s L%s %s' % (X1, Y1, X2, Y2), **self.attr) + if self.arrow_start != False and self.arrow_start is not None or (self.arrow_end != False and self.arrow_end is not None): + defs = SVG('defs') + if self.arrow_start != False and self.arrow_start is not None: + if isinstance(self.arrow_start, SVG): + defs.append(self.arrow_start) + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start['id'] + elif isinstance(self.arrow_start, basestring): + defs.append(make_marker(self.arrow_start, 'arrow_start')) + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start + else: + raise TypeError('arrow_start must be False/None or an id string for the new marker') + if self.arrow_end != False and self.arrow_end is not None: + if isinstance(self.arrow_end, SVG): + defs.append(self.arrow_end) + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end['id'] + elif isinstance(self.arrow_end, basestring): + defs.append(make_marker(self.arrow_end, 'arrow_end')) + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end + else: + raise TypeError('arrow_end must be False/None or an id string for the new marker') + return SVG('g', defs, line) + return line + +class VLine(Line): + defaults = {} + + def __repr__(self): + return '' % (self.y1, self.y2, self.x, self.attr) + + def __init__(self, y1, y2, x, **attr): + self.x = x + self.attr = dict(self.defaults) + self.attr.update(attr) + Line.__init__(self, x, y1, x, y2, **self.attr) + + def Path(self, trans=None, local=False): + self.x1 = self.x + self.x2 = self.x + return Line.Path(self, trans, local) + +class HLine(Line): + defaults = {} + + def __repr__(self): + return '' % (self.x1, self.x2, self.y, self.attr) + + def __init__(self, x1, x2, y, **attr): + self.y = y + self.attr = dict(self.defaults) + self.attr.update(attr) + Line.__init__(self, x1, y, x2, y, **self.attr) + + def Path(self, trans=None, local=False): + self.y1 = self.y + self.y2 = self.y + return Line.Path(self, trans, local) + +class Rect(Curve): + defaults = {} + + def __repr__(self): + return '' % (self.x1, self.y1, self.x2, self.y2, self.attr) + + def __init__(self, x1, y1, x2, y2, **attr): + (self.x1, self.y1, self.x2, self.y2) = (x1, y1, x2, y2) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + return self.Path(trans).SVG() + + def Path(self, trans=None, local=False): + if trans is None: + return Path([('M', self.x1, self.y1, not local), ('L', self.x2, self.y1, not local), ('L', self.x2, self.y2, not local), ('L', self.x1, self.y2, not local), ('Z',)], **self.attr) + else: + self.low = 0.0 + self.high = 1.0 + self.loop = False + self.f = lambda t: (self.x1 + t * (self.x2 - self.x1), self.y1) + d1 = Curve.Path(self, trans, local).d + self.f = lambda t: (self.x2, self.y1 + t * (self.y2 - self.y1)) + d2 = Curve.Path(self, trans, local).d + del d2[0] + self.f = lambda t: (self.x2 + t * (self.x1 - self.x2), self.y2) + d3 = Curve.Path(self, trans, local).d + del d3[0] + self.f = lambda t: (self.x1, self.y2 + t * (self.y1 - self.y2)) + d4 = Curve.Path(self, trans, local).d + del d4[0] + return Path(d=d1 + d2 + d3 + d4 + [('Z',)], **self.attr) + +class Ellipse(Curve): + defaults = {} + + def __repr__(self): + return '' % (self.x, self.y, self.ax, self.ay, self.b, self.attr) + + def __init__(self, x, y, ax, ay, b, **attr): + (self.x, self.y, self.ax, self.ay, self.b) = (x, y, ax, ay, b) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + return self.Path(trans).SVG() + + def Path(self, trans=None, local=False): + angle = math.atan2(self.ay, self.ax) + math.pi / 2.0 + bx = self.b * math.cos(angle) + by = self.b * math.sin(angle) + self.f = lambda t: (self.x + self.ax * math.cos(t) + bx * math.sin(t), self.y + self.ay * math.cos(t) + by * math.sin(t)) + self.low = -math.pi + self.high = math.pi + self.loop = True + return Curve.Path(self, trans, local) + +def unumber(x): + output = u'%g' % x + if output[0] == u'-': + output = u'–' + output[1:] + index = output.find(u'e') + if index != -1: + uniout = unicode(output[:index]) + u'×10' + saw_nonzero = False + for n in output[index + 1:]: + if n == u'+': + pass + elif n == u'-': + uniout += u'⁻' + elif n == u'0': + if saw_nonzero: + uniout += u'⁰' + elif n == u'1': + saw_nonzero = True + uniout += u'¹' + elif n == u'2': + saw_nonzero = True + uniout += u'²' + elif n == u'3': + saw_nonzero = True + uniout += u'³' + elif u'4' <= n <= u'9': + saw_nonzero = True + if saw_nonzero: + uniout += eval('u"\\u%x"' % (8304 + ord(n) - ord(u'0'))) + else: + uniout += n + if uniout[:2] == u'1×': + uniout = uniout[2:] + return uniout + return output + +class Ticks: + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + tick_start = -1.5 + tick_end = 1.5 + minitick_start = -0.75 + minitick_end = 0.75 + text_start = 2.5 + text_angle = 0.0 + + def __repr__(self): + return '' % (self.f, self.low, self.high, str(self.ticks), str(self.labels), self.attr) + + def __init__(self, f, low, high, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, text_attr={}, **attr): + self.f = f + self.low = low + self.high = high + self.ticks = ticks + self.miniticks = miniticks + self.labels = labels + self.logbase = logbase + self.arrow_start = arrow_start + self.arrow_end = arrow_end + self.attr = dict(self.defaults) + self.attr.update(attr) + self.text_attr = dict(self.text_defaults) + self.text_attr.update(text_attr) + + def orient_tickmark(self, t, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + if trans is None: + f = self.f + else: + f = lambda t: trans(*self.f(t)) + eps = _epsilon * abs(self.high - self.low) + (X, Y) = f(t) + (Xprime, Yprime) = f(t + eps) + (xhatx, xhaty) = ((Xprime - X) / eps, (Yprime - Y) / eps) + norm = math.sqrt(xhatx ** 2 + xhaty ** 2) + if norm != 0: + (xhatx, xhaty) = (xhatx / norm, xhaty / norm) + else: + (xhatx, xhaty) = (1.0, 0.0) + angle = math.atan2(xhaty, xhatx) + math.pi / 2.0 + (yhatx, yhaty) = (math.cos(angle), math.sin(angle)) + return ((X, Y), (xhatx, xhaty), (yhatx, yhaty), angle) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + (self.last_ticks, self.last_miniticks) = self.interpret() + tickmarks = Path([], **self.attr) + minitickmarks = Path([], **self.attr) + output = SVG('g') + if self.arrow_start != False and self.arrow_start is not None or (self.arrow_end != False and self.arrow_end is not None): + defs = SVG('defs') + if self.arrow_start != False and self.arrow_start is not None: + if isinstance(self.arrow_start, SVG): + defs.append(self.arrow_start) + elif isinstance(self.arrow_start, basestring): + defs.append(make_marker(self.arrow_start, 'arrow_start')) + else: + raise TypeError('arrow_start must be False/None or an id string for the new marker') + if self.arrow_end != False and self.arrow_end is not None: + if isinstance(self.arrow_end, SVG): + defs.append(self.arrow_end) + elif isinstance(self.arrow_end, basestring): + defs.append(make_marker(self.arrow_end, 'arrow_end')) + else: + raise TypeError('arrow_end must be False/None or an id string for the new marker') + output.append(defs) + eps = _epsilon * (self.high - self.low) + for (t, label) in self.last_ticks.items(): + ((X, Y), (xhatx, xhaty), (yhatx, yhaty), angle) = self.orient_tickmark(t, trans) + if (not self.arrow_start or abs(t - self.low) > eps) and (not self.arrow_end or abs(t - self.high) > eps): + tickmarks.d.append(('M', X - yhatx * self.tick_start, Y - yhaty * self.tick_start, True)) + tickmarks.d.append(('L', X - yhatx * self.tick_end, Y - yhaty * self.tick_end, True)) + angle = (angle - math.pi / 2.0) * 180.0 / math.pi + self.text_angle + if _hacks['inkscape-text-vertical-shift']: + if self.text_start > 0: + X += math.cos(angle * math.pi / 180.0 + math.pi / 2.0) * 2.0 + Y += math.sin(angle * math.pi / 180.0 + math.pi / 2.0) * 2.0 + else: + X += math.cos(angle * math.pi / 180.0 + math.pi / 2.0) * 2.0 * 2.5 + Y += math.sin(angle * math.pi / 180.0 + math.pi / 2.0) * 2.0 * 2.5 + if label != '': + output.append(SVG('text', label, transform='translate(%g, %g) rotate(%g)' % (X - yhatx * self.text_start, Y - yhaty * self.text_start, angle), **self.text_attr)) + for t in self.last_miniticks: + skip = False + for tt in self.last_ticks.keys(): + if abs(t - tt) < eps: + skip = True + break + if not skip: + ((X, Y), (xhatx, xhaty), (yhatx, yhaty), angle) = self.orient_tickmark(t, trans) + if (not self.arrow_start or abs(t - self.low) > eps) and (not self.arrow_end or abs(t - self.high) > eps): + minitickmarks.d.append(('M', X - yhatx * self.minitick_start, Y - yhaty * self.minitick_start, True)) + minitickmarks.d.append(('L', X - yhatx * self.minitick_end, Y - yhaty * self.minitick_end, True)) + output.prepend(tickmarks.SVG(trans)) + output.prepend(minitickmarks.SVG(trans)) + return output + + def interpret(self): + if self.labels is None or self.labels == False: + format = lambda x: '' + elif self.labels == True: + format = unumber + elif isinstance(self.labels, basestring): + format = lambda x: self.labels % x + elif callable(self.labels): + format = self.labels + else: + raise TypeError('labels must be None/False, True, a format string, or a number->string function') + ticks = self.ticks + if ticks is None or ticks == False: + return ({}, []) + elif isinstance(ticks, (int, long)): + if ticks == True: + ticks = -10 + if self.logbase is None: + ticks = self.compute_ticks(ticks, format) + else: + ticks = self.compute_logticks(self.logbase, ticks, format) + if self.miniticks == True: + if self.logbase is None: + return (ticks, self.compute_miniticks(ticks)) + else: + return (ticks, self.compute_logminiticks(self.logbase)) + elif isinstance(self.miniticks, (int, long)): + return (ticks, self.regular_miniticks(self.miniticks)) + elif getattr(self.miniticks, '__iter__', False): + return (ticks, self.miniticks) + elif self.miniticks == False or self.miniticks is None: + return (ticks, []) + else: + raise TypeError('miniticks must be None/False, True, a number of desired miniticks, or a list of numbers') + elif getattr(ticks, '__iter__', False): + if not isinstance(ticks, dict): + output = {} + eps = _epsilon * (self.high - self.low) + for x in ticks: + if format == unumber and abs(x) < eps: + output[x] = u'0' + else: + output[x] = format(x) + ticks = output + else: + pass + if self.miniticks == True: + if self.logbase is None: + return (ticks, self.compute_miniticks(ticks)) + else: + return (ticks, self.compute_logminiticks(self.logbase)) + elif isinstance(self.miniticks, (int, long)): + return (ticks, self.regular_miniticks(self.miniticks)) + elif getattr(self.miniticks, '__iter__', False): + return (ticks, self.miniticks) + elif self.miniticks == False or self.miniticks is None: + return (ticks, []) + else: + raise TypeError('miniticks must be None/False, True, a number of desired miniticks, or a list of numbers') + else: + raise TypeError('ticks must be None/False, a number of desired ticks, a list of numbers, or a dictionary of explicit markers') + + def compute_ticks(self, N, format): + if self.low >= self.high: + raise ValueError('low must be less than high') + if N == 1: + raise ValueError('N can be 0 or >1 to specify the exact number of ticks or negative to specify a maximum') + eps = _epsilon * (self.high - self.low) + if N >= 0: + output = {} + x = self.low + for i in xrange(N): + if format == unumber and abs(x) < eps: + label = u'0' + else: + label = format(x) + output[x] = label + x += (self.high - self.low) / (N - 1.0) + return output + N = -N + counter = 0 + granularity = 10 ** math.ceil(math.log10(max(abs(self.low), abs(self.high)))) + lowN = math.ceil(1.0 * self.low / granularity) + highN = math.floor(1.0 * self.high / granularity) + while lowN > highN: + countermod3 = counter % 3 + if countermod3 == 0: + granularity *= 0.5 + elif countermod3 == 1: + granularity *= 0.4 + else: + granularity *= 0.5 + counter += 1 + lowN = math.ceil(1.0 * self.low / granularity) + highN = math.floor(1.0 * self.high / granularity) + last_granularity = granularity + last_trial = None + while True: + trial = {} + for n in range(int(lowN), int(highN) + 1): + x = n * granularity + if format == unumber and abs(x) < eps: + label = u'0' + else: + label = format(x) + trial[x] = label + if int(highN) + 1 - int(lowN) >= N: + if last_trial is None: + (v1, v2) = (self.low, self.high) + return {v1: format(v1), v2: format(v2)} + else: + (low_in_ticks, high_in_ticks) = (False, False) + for t in last_trial.keys(): + if 1.0 * abs(t - self.low) / last_granularity < _epsilon: + low_in_ticks = True + if 1.0 * abs(t - self.high) / last_granularity < _epsilon: + high_in_ticks = True + lowN = 1.0 * self.low / last_granularity + highN = 1.0 * self.high / last_granularity + if abs(lowN - round(lowN)) < _epsilon and (not low_in_ticks): + last_trial[self.low] = format(self.low) + if abs(highN - round(highN)) < _epsilon and (not high_in_ticks): + last_trial[self.high] = format(self.high) + return last_trial + last_granularity = granularity + last_trial = trial + countermod3 = counter % 3 + if countermod3 == 0: + granularity *= 0.5 + elif countermod3 == 1: + granularity *= 0.4 + else: + granularity *= 0.5 + counter += 1 + lowN = math.ceil(1.0 * self.low / granularity) + highN = math.floor(1.0 * self.high / granularity) + + def regular_miniticks(self, N): + output = [] + x = self.low + for i in xrange(N): + output.append(x) + x += (self.high - self.low) / (N - 1.0) + return output + + def compute_miniticks(self, original_ticks): + if len(original_ticks) < 2: + original_ticks = ticks(self.low, self.high) + original_ticks = original_ticks.keys() + original_ticks.sort() + if self.low > original_ticks[0] + _epsilon or self.high < original_ticks[-1] - _epsilon: + raise ValueError('original_ticks {%g...%g} extend beyond [%g, %g]' % (original_ticks[0], original_ticks[-1], self.low, self.high)) + granularities = [] + for i in range(len(original_ticks) - 1): + granularities.append(original_ticks[i + 1] - original_ticks[i]) + spacing = 10 ** math.ceil(math.log10(min(granularities)) - 1) + output = [] + x = original_ticks[0] - math.ceil(1.0 * (original_ticks[0] - self.low) / spacing) * spacing + while x <= self.high: + if x >= self.low: + already_in_ticks = False + for t in original_ticks: + if abs(x - t) < _epsilon * (self.high - self.low): + already_in_ticks = True + if not already_in_ticks: + output.append(x) + x += spacing + return output + + def compute_logticks(self, base, N, format): + if self.low >= self.high: + raise ValueError('low must be less than high') + if N == 1: + raise ValueError('N can be 0 or >1 to specify the exact number of ticks or negative to specify a maximum') + eps = _epsilon * (self.high - self.low) + if N >= 0: + output = {} + x = self.low + for i in xrange(N): + if format == unumber and abs(x) < eps: + label = u'0' + else: + label = format(x) + output[x] = label + x += (self.high - self.low) / (N - 1.0) + return output + N = -N + lowN = math.floor(math.log(self.low, base)) + highN = math.ceil(math.log(self.high, base)) + output = {} + for n in range(int(lowN), int(highN) + 1): + x = base ** n + label = format(x) + if self.low <= x <= self.high: + output[x] = label + for i in range(1, len(output)): + keys = output.keys() + keys.sort() + keys = keys[::i] + values = map(lambda k: output[k], keys) + if len(values) <= N: + for k in output.keys(): + if k not in keys: + output[k] = '' + break + if len(output) <= 2: + output2 = self.compute_ticks(N=-int(math.ceil(N / 2.0)), format=format) + lowest = min(output2) + for k in output: + if k < lowest: + output2[k] = output[k] + output = output2 + return output + + def compute_logminiticks(self, base): + if self.low >= self.high: + raise ValueError('low must be less than high') + lowN = math.floor(math.log(self.low, base)) + highN = math.ceil(math.log(self.high, base)) + output = [] + num_ticks = 0 + for n in range(int(lowN), int(highN) + 1): + x = base ** n + if self.low <= x <= self.high: + num_ticks += 1 + for m in range(2, int(math.ceil(base))): + minix = m * x + if self.low <= minix <= self.high: + output.append(minix) + if num_ticks <= 2: + return [] + else: + return output + +class CurveAxis(Curve, Ticks): + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + + def __repr__(self): + return '' % (self.f, self.low, self.high, str(self.ticks), str(self.labels), self.attr) + + def __init__(self, f, low, high, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, text_attr={}, **attr): + tattr = dict(self.text_defaults) + tattr.update(text_attr) + Curve.__init__(self, f, low, high) + Ticks.__init__(self, f, low, high, ticks, miniticks, labels, logbase, arrow_start, arrow_end, tattr, **attr) + + def SVG(self, trans=None): + func = Curve.SVG(self, trans) + ticks = Ticks.SVG(self, trans) + if self.arrow_start != False and self.arrow_start is not None: + if isinstance(self.arrow_start, basestring): + func.attr['marker-start'] = 'url(#%s)' % self.arrow_start + else: + func.attr['marker-start'] = 'url(#%s)' % self.arrow_start.id + if self.arrow_end != False and self.arrow_end is not None: + if isinstance(self.arrow_end, basestring): + func.attr['marker-end'] = 'url(#%s)' % self.arrow_end + else: + func.attr['marker-end'] = 'url(#%s)' % self.arrow_end.id + ticks.append(func) + return ticks + +class LineAxis(Line, Ticks): + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + + def __repr__(self): + return '' % (self.x1, self.y1, self.x2, self.y2, str(self.ticks), str(self.labels), self.attr) + + def __init__(self, x1, y1, x2, y2, start=0.0, end=1.0, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): + self.start = start + self.end = end + self.exclude = exclude + tattr = dict(self.text_defaults) + tattr.update(text_attr) + Line.__init__(self, x1, y1, x2, y2, **attr) + Ticks.__init__(self, None, None, None, ticks, miniticks, labels, logbase, arrow_start, arrow_end, tattr, **attr) + + def interpret(self): + if self.exclude is not None and (not (isinstance(self.exclude, (tuple, list)) and len(self.exclude) == 2 and isinstance(self.exclude[0], (int, long, float)) and isinstance(self.exclude[1], (int, long, float)))): + raise TypeError('exclude must either be None or (low, high)') + (ticks, miniticks) = Ticks.interpret(self) + if self.exclude is None: + return (ticks, miniticks) + ticks2 = {} + for (loc, label) in ticks.items(): + if self.exclude[0] <= loc <= self.exclude[1]: + ticks2[loc] = '' + else: + ticks2[loc] = label + return (ticks2, miniticks) + + def SVG(self, trans=None): + line = Line.SVG(self, trans) + f01 = self.f + self.f = lambda t: f01(1.0 * (t - self.start) / (self.end - self.start)) + self.low = self.start + self.high = self.end + if self.arrow_start != False and self.arrow_start is not None: + if isinstance(self.arrow_start, basestring): + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start + else: + line.attr['marker-start'] = 'url(#%s)' % self.arrow_start.id + if self.arrow_end != False and self.arrow_end is not None: + if isinstance(self.arrow_end, basestring): + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end + else: + line.attr['marker-end'] = 'url(#%s)' % self.arrow_end.id + ticks = Ticks.SVG(self, trans) + ticks.append(line) + return ticks + +class XAxis(LineAxis): + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5, 'dominant-baseline': 'text-before-edge'} + text_start = -1.0 + text_angle = 0.0 + + def __repr__(self): + return '' % (self.xmin, self.xmax, self.aty, str(self.ticks), str(self.labels), self.attr) + + def __init__(self, xmin, xmax, aty=0, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): + self.aty = aty + tattr = dict(self.text_defaults) + tattr.update(text_attr) + LineAxis.__init__(self, xmin, aty, xmax, aty, xmin, xmax, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, tattr, **attr) + + def SVG(self, trans=None): + self.y1 = self.aty + self.y2 = self.aty + return LineAxis.SVG(self, trans) + +class YAxis(LineAxis): + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5, 'text-anchor': 'end', 'dominant-baseline': 'middle'} + text_start = 2.5 + text_angle = 90.0 + + def __repr__(self): + return '' % (self.ymin, self.ymax, self.atx, str(self.ticks), str(self.labels), self.attr) + + def __init__(self, ymin, ymax, atx=0, ticks=-10, miniticks=True, labels=True, logbase=None, arrow_start=None, arrow_end=None, exclude=None, text_attr={}, **attr): + self.atx = atx + tattr = dict(self.text_defaults) + tattr.update(text_attr) + LineAxis.__init__(self, atx, ymin, atx, ymax, ymin, ymax, ticks, miniticks, labels, logbase, arrow_start, arrow_end, exclude, tattr, **attr) + + def SVG(self, trans=None): + self.x1 = self.atx + self.x2 = self.atx + return LineAxis.SVG(self, trans) + +class Axes: + defaults = {'stroke-width': '0.25pt'} + text_defaults = {'stroke': 'none', 'fill': 'black', 'font-size': 5} + + def __repr__(self): + return '' % (self.xmin, self.xmax, self.ymin, self.ymax, self.atx, self.aty, self.attr) + + def __init__(self, xmin, xmax, ymin, ymax, atx=0, aty=0, xticks=-10, xminiticks=True, xlabels=True, xlogbase=None, yticks=-10, yminiticks=True, ylabels=True, ylogbase=None, arrows=None, text_attr={}, **attr): + (self.xmin, self.xmax) = (xmin, xmax) + (self.ymin, self.ymax) = (ymin, ymax) + (self.atx, self.aty) = (atx, aty) + (self.xticks, self.xminiticks, self.xlabels, self.xlogbase) = (xticks, xminiticks, xlabels, xlogbase) + (self.yticks, self.yminiticks, self.ylabels, self.ylogbase) = (yticks, yminiticks, ylabels, ylogbase) + self.arrows = arrows + self.text_attr = dict(self.text_defaults) + self.text_attr.update(text_attr) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + (atx, aty) = (self.atx, self.aty) + if atx < self.xmin: + atx = self.xmin + if atx > self.xmax: + atx = self.xmax + if aty < self.ymin: + aty = self.ymin + if aty > self.ymax: + aty = self.ymax + xmargin = 0.1 * abs(self.ymin - self.ymax) + xexclude = (atx - xmargin, atx + xmargin) + ymargin = 0.1 * abs(self.xmin - self.xmax) + yexclude = (aty - ymargin, aty + ymargin) + if self.arrows is not None and self.arrows != False: + xarrow_start = self.arrows + '.xstart' + xarrow_end = self.arrows + '.xend' + yarrow_start = self.arrows + '.ystart' + yarrow_end = self.arrows + '.yend' + else: + xarrow_start = xarrow_end = yarrow_start = yarrow_end = None + xaxis = XAxis(self.xmin, self.xmax, aty, self.xticks, self.xminiticks, self.xlabels, self.xlogbase, xarrow_start, xarrow_end, exclude=xexclude, text_attr=self.text_attr, **self.attr).SVG(trans) + yaxis = YAxis(self.ymin, self.ymax, atx, self.yticks, self.yminiticks, self.ylabels, self.ylogbase, yarrow_start, yarrow_end, exclude=yexclude, text_attr=self.text_attr, **self.attr).SVG(trans) + return SVG('g', *xaxis.sub + yaxis.sub) + +class HGrid(Ticks): + defaults = {'stroke-width': '0.25pt', 'stroke': 'gray'} + mini_defaults = {'stroke-width': '0.25pt', 'stroke': 'lightgray', 'stroke-dasharray': '1,1'} + + def __repr__(self): + return '' % (self.xmin, self.xmax, self.low, self.high, str(self.ticks), str(self.miniticks), self.attr) + + def __init__(self, xmin, xmax, low, high, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): + (self.xmin, self.xmax) = (xmin, xmax) + self.mini_attr = dict(self.mini_defaults) + self.mini_attr.update(mini_attr) + Ticks.__init__(self, None, low, high, ticks, miniticks, None, logbase) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + (self.last_ticks, self.last_miniticks) = Ticks.interpret(self) + ticksd = [] + for t in self.last_ticks.keys(): + ticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d + miniticksd = [] + for t in self.last_miniticks: + miniticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d + return SVG('g', Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) + +class VGrid(Ticks): + defaults = {'stroke-width': '0.25pt', 'stroke': 'gray'} + mini_defaults = {'stroke-width': '0.25pt', 'stroke': 'lightgray', 'stroke-dasharray': '1,1'} + + def __repr__(self): + return '' % (self.ymin, self.ymax, self.low, self.high, str(self.ticks), str(self.miniticks), self.attr) + + def __init__(self, ymin, ymax, low, high, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): + (self.ymin, self.ymax) = (ymin, ymax) + self.mini_attr = dict(self.mini_defaults) + self.mini_attr.update(mini_attr) + Ticks.__init__(self, None, low, high, ticks, miniticks, None, logbase) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + (self.last_ticks, self.last_miniticks) = Ticks.interpret(self) + ticksd = [] + for t in self.last_ticks.keys(): + ticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d + miniticksd = [] + for t in self.last_miniticks: + miniticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d + return SVG('g', Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) + +class Grid(Ticks): + defaults = {'stroke-width': '0.25pt', 'stroke': 'gray'} + mini_defaults = {'stroke-width': '0.25pt', 'stroke': 'lightgray', 'stroke-dasharray': '1,1'} + + def __repr__(self): + return '' % (self.xmin, self.xmax, self.ymin, self.ymax, str(self.ticks), str(self.miniticks), self.attr) + + def __init__(self, xmin, xmax, ymin, ymax, ticks=-10, miniticks=False, logbase=None, mini_attr={}, **attr): + (self.xmin, self.xmax) = (xmin, xmax) + (self.ymin, self.ymax) = (ymin, ymax) + self.mini_attr = dict(self.mini_defaults) + self.mini_attr.update(mini_attr) + Ticks.__init__(self, None, None, None, ticks, miniticks, None, logbase) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + (self.low, self.high) = (self.xmin, self.xmax) + (self.last_xticks, self.last_xminiticks) = Ticks.interpret(self) + (self.low, self.high) = (self.ymin, self.ymax) + (self.last_yticks, self.last_yminiticks) = Ticks.interpret(self) + ticksd = [] + for t in self.last_xticks.keys(): + ticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d + for t in self.last_yticks.keys(): + ticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d + miniticksd = [] + for t in self.last_xminiticks: + miniticksd += Line(t, self.ymin, t, self.ymax).Path(trans).d + for t in self.last_yminiticks: + miniticksd += Line(self.xmin, t, self.xmax, t).Path(trans).d + return SVG('g', Path(d=ticksd, **self.attr).SVG(), Path(d=miniticksd, **self.mini_attr).SVG()) + +class XErrorBars: + defaults = {'stroke-width': '0.25pt'} + + def __repr__(self): + return '' % len(self.d) + + def __init__(self, d=[], **attr): + self.d = list(d) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + output = SVG('g') + for p in self.d: + (x, y) = (p[0], p[1]) + if len(p) == 3: + bars = [x - p[2], x + p[2]] + else: + bars = [x + pi for pi in p[2:]] + (start, end) = (min(bars), max(bars)) + output.append(LineAxis(start, y, end, y, start, end, bars, False, False, **self.attr).SVG(trans)) + return output + +class YErrorBars: + defaults = {'stroke-width': '0.25pt'} + + def __repr__(self): + return '' % len(self.d) + + def __init__(self, d=[], **attr): + self.d = list(d) + self.attr = dict(self.defaults) + self.attr.update(attr) + + def SVG(self, trans=None): + if isinstance(trans, basestring): + trans = totrans(trans) + output = SVG('g') + for p in self.d: + (x, y) = (p[0], p[1]) + if len(p) == 3: + bars = [y - p[2], y + p[2]] + else: + bars = [y + pi for pi in p[2:]] + (start, end) = (min(bars), max(bars)) + output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans)) + return output + +# File: opencv-master/doc/tools/add_signatures.py +"""""" +from __future__ import print_function +import sys +sys.dont_write_bytecode = True +import os +from pprint import pprint +import re +import logging +import json +import html_functions +import doxygen_scan +loglevel = os.environ.get('LOGLEVEL', None) +if loglevel: + logging.basicConfig(level=loglevel) +ROOT_DIR = sys.argv[1] +PYTHON_SIGNATURES_FILE = sys.argv[2] +JAVA_OR_PYTHON = sys.argv[3] +ADD_JAVA = False +ADD_PYTHON = False +if JAVA_OR_PYTHON == 'python': + ADD_PYTHON = True +python_signatures = dict() +with open(PYTHON_SIGNATURES_FILE, 'rt') as f: + python_signatures = json.load(f) + print('Loaded Python signatures: %d' % len(python_signatures)) +import xml.etree.ElementTree as ET +root = ET.parse(ROOT_DIR + 'opencv.tag') +files_dict = {} +namespaces = root.findall("./compound[@kind='namespace']") +for ns in namespaces: + ns_name = ns.find('./name').text + doxygen_scan.scan_namespace_constants(ns, ns_name, files_dict) + doxygen_scan.scan_namespace_functions(ns, ns_name, files_dict) +classes = root.findall("./compound[@kind='class']") +for c in classes: + c_name = c.find('./name').text + file = c.find('./filename').text + doxygen_scan.scan_class_methods(c, c_name, files_dict) +print('Doxygen files to scan: %s' % len(files_dict)) +files_processed = 0 +files_skipped = 0 +symbols_processed = 0 +for file in files_dict: + anchor_list = files_dict[file] + active_anchors = [a for a in anchor_list if a.cppname in python_signatures] + if len(active_anchors) == 0: + files_skipped = files_skipped + 1 + continue + active_anchors_dict = {a.anchor: a for a in active_anchors} + if len(active_anchors_dict) != len(active_anchors): + logging.info('Duplicate entries detected: %s -> %s (%s)' % (len(active_anchors), len(active_anchors_dict), file)) + files_processed = files_processed + 1 + symbols_processed = symbols_processed + len(active_anchors_dict) + logging.info('File: %r' % file) + html_functions.insert_python_signatures(python_signatures, active_anchors_dict, ROOT_DIR + file) +print('Done (processed files %d, symbols %d, skipped %d files)' % (files_processed, symbols_processed, files_skipped)) + +# File: opencv-master/doc/tools/doxygen_scan.py +import traceback + +class Symbol(object): + + def __init__(self, anchor, type, cppname): + self.anchor = anchor + self.type = type + self.cppname = cppname + + def __repr__(self): + return '%s:%s@%s' % (self.type, self.cppname, self.anchor) + +def add_to_file(files_dict, file, anchor): + anchors = files_dict.setdefault(file, []) + anchors.append(anchor) + +def scan_namespace_constants(ns, ns_name, files_dict): + constants = ns.findall("./member[@kind='enumvalue']") + for c in constants: + c_name = c.find('./name').text + name = ns_name + '::' + c_name + file = c.find('./anchorfile').text + anchor = c.find('./anchor').text + add_to_file(files_dict, file, Symbol(anchor, 'const', name)) + +def scan_namespace_functions(ns, ns_name, files_dict): + functions = ns.findall("./member[@kind='function']") + for f in functions: + f_name = f.find('./name').text + name = ns_name + '::' + f_name + file = f.find('./anchorfile').text + anchor = f.find('./anchor').text + add_to_file(files_dict, file, Symbol(anchor, 'fn', name)) + +def scan_class_methods(c, c_name, files_dict): + methods = c.findall("./member[@kind='function']") + for m in methods: + m_name = m.find('./name').text + name = c_name + '::' + m_name + file = m.find('./anchorfile').text + anchor = m.find('./anchor').text + add_to_file(files_dict, file, Symbol(anchor, 'method', name)) + +# File: opencv-master/doc/tools/html_functions.py +from __future__ import print_function +import sys +import logging +import os +import re +from pprint import pprint +import traceback +try: + import bs4 + from bs4 import BeautifulSoup +except ImportError: + raise ImportError('Error: Install BeautifulSoup (bs4) for adding Python & Java signatures documentation') + +def load_html_file(file_dir): + with open(file_dir, 'rb') as fp: + data = fp.read() + if os.name == 'nt' or sys.version_info[0] == 3: + data = data.decode(encoding='utf-8', errors='strict') + data = re.sub('(\\>)([ ]+)', lambda match: match.group(1) + '!space!' * len(match.group(2)), data) + data = re.sub('([ ]+)(\\<)', lambda match: '!space!' * len(match.group(1)) + match.group(2), data) + if os.name == 'nt' or sys.version_info[0] == 3: + data = data.encode('utf-8', 'ignore') + soup = BeautifulSoup(data, 'html.parser') + return soup + +def update_html(file, soup): + s = str(soup) + s = s.replace('!space!', ' ') + if os.name == 'nt' or sys.version_info[0] == 3: + s = s.encode('utf-8', 'ignore') + with open(file, 'wb') as f: + f.write(s) + +def insert_python_signatures(python_signatures, symbols_dict, filepath): + soup = load_html_file(filepath) + entries = soup.find_all(lambda tag: tag.name == 'a' and tag.has_attr('id')) + for e in entries: + anchor = e['id'] + if anchor in symbols_dict: + s = symbols_dict[anchor] + logging.info('Process: %r' % s) + if s.type == 'fn' or s.type == 'method': + process_fn(soup, e, python_signatures[s.cppname], s) + elif s.type == 'const': + process_const(soup, e, python_signatures[s.cppname], s) + else: + logging.error('unsupported type: %s' % s) + update_html(filepath, soup) + +def process_fn(soup, anchor, python_signature, symbol): + try: + r = anchor.find_next_sibling(class_='memitem').find(class_='memproto').find('table') + insert_python_fn_signature(soup, r, python_signature, symbol) + except: + logging.error("Can't process: %s" % symbol) + traceback.print_exc() + pprint(anchor) + +def process_const(soup, anchor, python_signature, symbol): + try: + description = append(soup.new_tag('div', **{'class': ['python_language']}), 'Python: ' + python_signature[0]['name']) + old = anchor.find_next_sibling('div', class_='python_language') + if old is None: + anchor.parent.append(description) + else: + old.replace_with(description) + except: + logging.error("Can't process: %s" % symbol) + traceback.print_exc() + pprint(anchor) + +def insert_python_fn_signature(soup, table, variants, symbol): + description = create_python_fn_description(soup, variants) + description['class'] = 'python_language' + soup = insert_or_replace(table, description, 'table', 'python_language') + return soup + +def create_python_fn_description(soup, variants): + language = 'Python:' + table = soup.new_tag('table') + heading_row = soup.new_tag('th') + table.append(append(soup.new_tag('tr'), append(soup.new_tag('th', colspan=999, style='text-align:left'), language))) + for v in variants: + add_signature_to_table(soup, table, v, language, type) + return table + +def add_signature_to_table(soup, table, signature, language, type): + row = soup.new_tag('tr') + row.append(soup.new_tag('td', style='width: 20px;')) + row.append(append(soup.new_tag('td'), signature['name'] + '(')) + row.append(append(soup.new_tag('td', **{'class': 'paramname'}), signature['arg'])) + row.append(append(soup.new_tag('td'), ') -> ')) + row.append(append(soup.new_tag('td'), signature['ret'])) + table.append(row) + +def append(target, obj): + target.append(obj) + return target + +def insert_or_replace(element_before, new_element, tag, tag_class): + old = element_before.find_next_sibling(tag, class_=tag_class) + if old is None: + element_before.insert_after(new_element) + else: + old.replace_with(new_element) + +# File: opencv-master/doc/tools/scan_tutorials.py +from pathlib import Path +import re + +class Tutorial(object): + + def __init__(self, path): + self.path = path + self.title = None + self.children = [] + self.prev = None + self.next = None + with open(path, 'rt') as f: + self.parse(f) + + def parse(self, f): + rx_title = re.compile('\\{#(\\w+)\\}') + rx_subpage = re.compile('@subpage\\s+(\\w+)') + rx_prev = re.compile('@prev_tutorial\\{(\\w+)\\}') + rx_next = re.compile('@next_tutorial\\{(\\w+)\\}') + for line in f: + if self.title is None: + m = rx_title.search(line) + if m: + self.title = m.group(1) + continue + if self.prev is None: + m = rx_prev.search(line) + if m: + self.prev = m.group(1) + continue + if self.next is None: + m = rx_next.search(line) + if m: + self.next = m.group(1) + continue + m = rx_subpage.search(line) + if m: + self.children.append(m.group(1)) + continue + + def verify_prev_next(self, storage): + res = True + if self.title is None: + print('[W] No title') + res = False + prev = None + for one in self.children: + c = storage[one] + if c.prev is not None and c.prev != prev: + print('[W] Wrong prev_tutorial: expected {} / actual {}'.format(c.prev, prev)) + res = False + prev = c.title + next = None + for one in reversed(self.children): + c = storage[one] + if c.next is not None and c.next != next: + print('[W] Wrong next_tutorial: expected {} / actual {}'.format(c.next, next)) + res = False + next = c.title + if len(self.children) == 0 and self.prev is None and (self.next is None): + print('[W] No prev and next tutorials') + res = False + return res +if __name__ == '__main__': + p = Path('tutorials') + print("Looking for tutorials in: '{}'".format(p)) + all_tutorials = dict() + for f in p.glob('**/*'): + if f.suffix.lower() in ('.markdown', '.md'): + t = Tutorial(f) + all_tutorials[t.title] = t + res = 0 + print('Found: {}'.format(len(all_tutorials))) + print('------') + for (title, t) in all_tutorials.items(): + if not t.verify_prev_next(all_tutorials): + print('[E] Verification failed: {}'.format(t.path)) + print('------') + res = 1 + exit(res) + +# File: opencv-master/modules/core/misc/python/package/mat_wrapper/__init__.py +__all__ = [] +import numpy as np +import cv2 as cv +from typing import TYPE_CHECKING, Any +if TYPE_CHECKING: + _NumPyArrayNumeric = np.ndarray[Any, np.dtype[np.integer[Any] | np.floating[Any]]] +else: + _NumPyArrayNumeric = np.ndarray + +class Mat(_NumPyArrayNumeric): + + def __new__(cls, arr, **kwargs): + obj = arr.view(Mat) + return obj + + def __init__(self, arr, **kwargs): + self.wrap_channels = kwargs.pop('wrap_channels', getattr(arr, 'wrap_channels', False)) + if len(kwargs) > 0: + raise TypeError('Unknown parameters: {}'.format(repr(kwargs))) + + def __array_finalize__(self, obj): + if obj is None: + return + self.wrap_channels = getattr(obj, 'wrap_channels', None) +Mat.__module__ = cv.__name__ +cv.Mat = Mat +cv._registerMatType(Mat) + +# File: opencv-master/modules/core/src/opencl/runtime/generator/common.py +from __future__ import print_function +import sys, os, re + +def remove_comments(s): + + def replacer(match): + s = match.group(0) + if s.startswith('/'): + return '' + else: + return s + pattern = re.compile('//.*?$|/\\*.*?\\*/|\\\'(?:\\\\.|[^\\\\\\\'])*\\\'|"(?:\\\\.|[^\\\\"])*"', re.DOTALL | re.MULTILINE) + return re.sub(pattern, replacer, s) + +def getTokens(s): + return re.findall('[a-z_A-Z0-9_]+|[^[a-z_A-Z0-9_ \\n\\r\\t]', s) + +def getParameter(pos, tokens): + deep = 0 + p = [] + while True: + if pos >= len(tokens): + break + if (tokens[pos] == ')' or tokens[pos] == ',') and deep == 0: + if tokens[pos] == ')': + pos = len(tokens) + else: + pos += 1 + break + if tokens[pos] == '(': + deep += 1 + if tokens[pos] == ')': + deep -= 1 + p.append(tokens[pos]) + pos += 1 + return (' '.join(p), pos) + +def getParameters(i, tokens): + assert tokens[i] == '(' + i += 1 + params = [] + while True: + if i >= len(tokens) or tokens[i] == ')': + break + (param, i) = getParameter(i, tokens) + if len(param) > 0: + params.append(param) + else: + assert False + break + if len(params) > 0 and params[0] == 'void': + del params[0] + return params + +def postProcessParameters(fns): + fns.sort(key=lambda x: x['name']) + for fn in fns: + fn['params_full'] = list(fn['params']) + for i in range(len(fn['params'])): + p = fn['params'][i] + if p.find('(') != -1: + p = re.sub('\\* *([a-zA-Z0-9_]*) ?\\)', '*)', p, 1) + fn['params'][i] = p + continue + parts = re.findall('[a-z_A-Z0-9]+|\\*', p) + if len(parts) > 1: + if parts[-1].find('*') == -1: + del parts[-1] + fn['params'][i] = ' '.join(parts) + +def readFunctionFilter(fns, fileName): + try: + f = open(fileName, 'r') + except: + print("ERROR: Can't open filter file: %s" % fileName) + return 0 + count = 0 + while f: + line = f.readline() + if not line: + break + assert isinstance(line, str) + if line.startswith('#') or line.startswith('//'): + continue + line = line.replace('\n', '') + if len(line) == 0: + continue + found = False + for fn in fns: + if fn['name'] == line: + found = True + fn['enabled'] = True + if not found: + sys.exit('FATAL ERROR: Unknown function: %s' % line) + count = count + 1 + f.close() + return count + +def outputToString(f): + + def wrapped(*args, **kwargs): + from io import StringIO + old_stdout = sys.stdout + sys.stdout = str_stdout = StringIO() + res = f(*args, **kwargs) + assert res is None + sys.stdout = old_stdout + result = str_stdout.getvalue() + result = re.sub('([^\\n /]) [ ]+', '\\1 ', result) + result = re.sub(' ,', ',', result) + result = re.sub(' \\*', '*', result) + result = re.sub('\\( ', '(', result) + result = re.sub(' \\)', ')', result) + return result + return wrapped + +@outputToString +def generateFilterNames(fns): + for fn in fns: + print('%s%s' % ('' if 'enabled' in fn else '//', fn['name'])) + print('#total %d' % len(fns)) +callback_check = re.compile('([^\\(]*\\(.*)(\\* *)(\\).*\\(.*\\))') + +def getTypeWithParam(t, p): + if callback_check.match(t): + return callback_check.sub('\\1 *' + p + '\\3', t) + return t + ' ' + p + +@outputToString +def generateStructDefinitions(fns, lprefix='opencl_fn', enumprefix='OPENCL_FN'): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + commentStr = '' if 'enabled' in fn else '//' + decl_args = [] + for (i, t) in enumerate(fn['params']): + decl_args.append(getTypeWithParam(t, 'p%d' % (i + 1))) + decl_args_str = '(' + ', '.join(decl_args) + ')' + print('%s%s%d(%s_%s, %s, %s)' % (commentStr, lprefix, len(fn['params']), enumprefix, fn['name'], ' '.join(fn['ret']), decl_args_str)) + print(commentStr + '%s%s (%s *%s)(%s) =\n%s %s_%s_switch_fn;' % (' '.join(fn['modifiers'] + ' ') if len(fn['modifiers']) > 0 else '', ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params']), commentStr, enumprefix, fn['name'])) + print(commentStr + 'static const struct DynamicFnEntry %s_definition = { "%s", (void**)&%s};' % (fn['name'], fn['name'], fn['name'])) + print() + +@outputToString +def generateStaticDefinitions(fns): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + commentStr = '' if 'enabled' in fn else '//' + decl_args = [] + for (i, t) in enumerate(fn['params']): + decl_args.append(getTypeWithParam(t, 'p%d' % (i + 1))) + decl_args_str = '(' + ', '.join(decl_args) + ')' + print(commentStr + 'CL_RUNTIME_EXPORT %s%s (%s *%s_pfn)(%s) = %s;' % (' '.join(fn['modifiers'] + ' ') if len(fn['modifiers']) > 0 else '', ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params']), fn['name'])) + +@outputToString +def generateListOfDefinitions(fns, name='opencl_fn_list'): + print('// generated by %s' % os.path.basename(sys.argv[0])) + print('static const struct DynamicFnEntry* %s[] = {' % name) + for fn in fns: + commentStr = '' if 'enabled' in fn else '//' + if 'enabled' in fn: + print(' &%s_definition,' % fn['name']) + else: + print(' NULL/*&%s_definition*/,' % fn['name']) + first = False + print('};') + +@outputToString +def generateEnums(fns, prefix='OPENCL_FN'): + print('// generated by %s' % os.path.basename(sys.argv[0])) + print('enum %s_ID {' % prefix) + for (i, fn) in enumerate(fns): + commentStr = '' if 'enabled' in fn else '//' + print(commentStr + ' %s_%s = %d,' % (prefix, fn['name'], i)) + print('};') + +@outputToString +def generateRemapOrigin(fns): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + print('#define %s %s_' % (fn['name'], fn['name'])) + +@outputToString +def generateRemapDynamic(fns): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + print('#undef %s' % fn['name']) + commentStr = '' if 'enabled' in fn else '//' + print(commentStr + '#define %s %s_pfn' % (fn['name'], fn['name'])) + +@outputToString +def generateFnDeclaration(fns): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + commentStr = '' if 'enabled' in fn else '//' + print(commentStr + 'extern CL_RUNTIME_EXPORT %s %s (%s *%s)(%s);' % (' '.join(fn['modifiers']), ' '.join(fn['ret']), ' '.join(fn['calling']), fn['name'], ', '.join(fn['params'] if 'params_full' not in fn else fn['params_full']))) + +@outputToString +def generateTemplates(total, lprefix, switch_name, calling_convention=''): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for sz in range(total): + template_params = ['ID', '_R', 'decl_args'] + params = ['p%d' % (i + 1) for i in range(0, sz)] + print('#define %s%d(%s) \\' % (lprefix, sz, ', '.join(template_params))) + print(' typedef _R (%s *ID##FN)decl_args; \\' % calling_convention) + print(' static _R %s ID##_switch_fn decl_args \\' % calling_convention) + print(' { return ((ID##FN)%s(ID))(%s); } \\' % (switch_name, ', '.join(params))) + print('') + +@outputToString +def generateInlineWrappers(fns): + print('// generated by %s' % os.path.basename(sys.argv[0])) + for fn in fns: + commentStr = '' if 'enabled' in fn else '//' + print('#undef %s' % fn['name']) + print(commentStr + '#define %s %s_fn' % (fn['name'], fn['name'])) + params = [] + call_params = [] + for i in range(0, len(fn['params'])): + t = fn['params'][i] + if t.find('*)') >= 0: + p = re.sub('\\*\\)', ' *p%d)' % i, t, 1) + params.append(p) + else: + params.append('%s p%d' % (t, i)) + call_params.append('p%d' % i) + if len(fn['ret']) == 1 and fn['ret'][0] == 'void': + print(commentStr + 'inline void %s(%s) { %s_pfn(%s); }' % (fn['name'], ', '.join(params), fn['name'], ', '.join(call_params))) + else: + print(commentStr + 'inline %s %s(%s) { return %s_pfn(%s); }' % (' '.join(fn['ret']), fn['name'], ', '.join(params), fn['name'], ', '.join(call_params))) + +def ProcessTemplate(inputFile, ctx, noteLine='//\n// AUTOGENERATED, DO NOT EDIT\n//'): + f = open(inputFile, 'r') + if noteLine: + print(noteLine) + for line in f: + if line.startswith('@'): + assert line[-1] == '\n' + line = line[:-1] + assert line[-1] == '@' + name = line[1:-1] + assert name in ctx, name + line = ctx[name] + ('\n' if len(ctx[name]) > 0 and ctx[name][-1] != '\n' else '') + sys.stdout.write(line) + f.close() + +# File: opencv-master/modules/core/src/opencl/runtime/generator/parser_cl.py +from __future__ import print_function +import sys, re +from common import remove_comments, getTokens, getParameters, postProcessParameters +try: + if len(sys.argv) > 1: + module_name = sys.argv[1] + outfile = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s.hpp' % module_name, 'w') + outfile_impl = open('../autogenerated/%s_impl.hpp' % module_name, 'w') + outfile_static_impl = open('../autogenerated/%s_static_impl.hpp' % module_name, 'w') + outfile_wrappers = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s_wrappers.hpp' % module_name, 'w') + if len(sys.argv) > 2: + f = open(sys.argv[2], 'r') + else: + f = sys.stdin + else: + sys.exit('ERROR. Specify output file') +except: + sys.exit("ERROR. Can't open input/output file, check parameters") +fns = [] +while True: + line = f.readline() + if len(line) == 0: + break + assert isinstance(line, str) + parts = line.split() + if line.startswith('extern') and line.find('CL_API_CALL') != -1: + while True: + nl = f.readline() + nl = nl.strip() + nl = re.sub('\\n', '', nl) + if len(nl) == 0: + break + line += ' ' + nl + line = remove_comments(line) + parts = getTokens(line) + fn = {} + modifiers = [] + ret = [] + calling = [] + i = 1 + while i < len(parts): + if parts[i].startswith('CL_'): + modifiers.append(parts[i]) + else: + break + i += 1 + while i < len(parts): + if not parts[i].startswith('CL_'): + ret.append(parts[i]) + else: + break + i += 1 + while i < len(parts): + calling.append(parts[i]) + i += 1 + if parts[i - 1] == 'CL_API_CALL': + break + fn['modifiers'] = [] + fn['ret'] = ret + fn['calling'] = calling + name = parts[i] + i += 1 + fn['name'] = name + print('name=' + name) + params = getParameters(i, parts) + fn['params'] = params + fns.append(fn) +f.close() +print('Found %d functions' % len(fns)) +postProcessParameters(fns) +from pprint import pprint +pprint(fns) +from common import * +filterFileName = './filter/%s_functions.list' % module_name +numEnabled = readFunctionFilter(fns, filterFileName) +functionsFilter = generateFilterNames(fns) +filter_file = open(filterFileName, 'w') +filter_file.write(functionsFilter) +ctx = {} +ctx['CL_REMAP_ORIGIN'] = generateRemapOrigin(fns) +ctx['CL_REMAP_DYNAMIC'] = generateRemapDynamic(fns) +ctx['CL_FN_DECLARATIONS'] = generateFnDeclaration(fns) +sys.stdout = outfile +ProcessTemplate('template/%s.hpp.in' % module_name, ctx) +ctx['CL_FN_INLINE_WRAPPERS'] = generateInlineWrappers(fns) +sys.stdout = outfile_wrappers +ProcessTemplate('template/%s_wrappers.hpp.in' % module_name, ctx) +if module_name == 'opencl_core': + ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns) + ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns) + ctx['CL_FN_ENUMS'] = generateEnums(fns) + ctx['CL_FN_SWITCH'] = generateTemplates(15, 'opencl_fn', 'opencl_check_fn', 'CL_API_CALL') +else: + lprefix = module_name + '_fn' + enumprefix = module_name.upper() + '_FN' + fn_list_name = module_name + '_fn_list' + ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, lprefix=lprefix, enumprefix=enumprefix) + ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, fn_list_name) + ctx['CL_FN_ENUMS'] = generateEnums(fns, prefix=enumprefix) + ctx['CL_FN_SWITCH'] = generateTemplates(15, lprefix, '%s_check_fn' % module_name, 'CL_API_CALL') +ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % numEnabled +sys.stdout = outfile_impl +ProcessTemplate('template/%s_impl.hpp.in' % module_name, ctx) +sys.stdout = outfile_static_impl +ProcessTemplate('template/static_impl.hpp.in', dict(CL_STATIC_DEFINITIONS=generateStaticDefinitions(fns))) + +# File: opencv-master/modules/core/src/opencl/runtime/generator/parser_clblas.py +from __future__ import print_function +import sys, re +from common import remove_comments, getTokens, getParameters, postProcessParameters +try: + if len(sys.argv) > 1: + f = open(sys.argv[1], 'r') + else: + f = sys.stdin +except: + sys.exit("ERROR. Can't open input file") +fns = [] +while True: + line = f.readline() + if len(line) == 0: + break + assert isinstance(line, str) + line = line.strip() + parts = line.split() + if (line.startswith('clblas') or line.startswith('cl_') or line == 'void') and len(line.split()) == 1 and (line.find('(') == -1): + fn = {} + modifiers = [] + ret = [] + calling = [] + i = 0 + while i < len(parts): + if parts[i].startswith('CL_'): + modifiers.append(parts[i]) + else: + break + i += 1 + while i < len(parts): + if not parts[i].startswith('CL_'): + ret.append(parts[i]) + else: + break + i += 1 + while i < len(parts): + calling.append(parts[i]) + i += 1 + fn['modifiers'] = [] + fn['ret'] = ret + fn['calling'] = calling + line = f.readline() + while True: + nl = f.readline() + nl = nl.strip() + nl = re.sub('\\n', '', nl) + if len(nl) == 0: + break + line += ' ' + nl + line = remove_comments(line) + parts = getTokens(line) + i = 0 + name = parts[i] + i += 1 + fn['name'] = name + print('name=' + name) + params = getParameters(i, parts) + fn['params'] = params + fns.append(fn) +f.close() +print('Found %d functions' % len(fns)) +postProcessParameters(fns) +from pprint import pprint +pprint(fns) +from common import * +filterFileName = './filter/opencl_clblas_functions.list' +numEnabled = readFunctionFilter(fns, filterFileName) +functionsFilter = generateFilterNames(fns) +filter_file = open(filterFileName, 'w') +filter_file.write(functionsFilter) +ctx = {} +ctx['CLAMDBLAS_REMAP_ORIGIN'] = generateRemapOrigin(fns) +ctx['CLAMDBLAS_REMAP_DYNAMIC'] = generateRemapDynamic(fns) +ctx['CLAMDBLAS_FN_DECLARATIONS'] = generateFnDeclaration(fns) +sys.stdout = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/opencl_clblas.hpp', 'w') +ProcessTemplate('template/opencl_clblas.hpp.in', ctx) +ctx['CL_FN_ENUMS'] = generateEnums(fns, 'OPENCLAMDBLAS_FN') +ctx['CL_FN_SWITCH'] = generateTemplates(23, 'openclamdblas_fn', 'openclamdblas_check_fn', '') +ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, 'openclamdblas_fn', 'OPENCLAMDBLAS_FN') +ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, 'openclamdblas_fn') +ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % numEnabled +sys.stdout = open('../autogenerated/opencl_clblas_impl.hpp', 'w') +ProcessTemplate('template/opencl_clblas_impl.hpp.in', ctx) + +# File: opencv-master/modules/core/src/opencl/runtime/generator/parser_clfft.py +from __future__ import print_function +import sys, re +from common import remove_comments, getTokens, getParameters, postProcessParameters +try: + if len(sys.argv) > 1: + f = open(sys.argv[1], 'r') + else: + f = sys.stdin +except: + sys.exit("ERROR. Can't open input file") +fns = [] +while True: + line = f.readline() + if len(line) == 0: + break + assert isinstance(line, str) + line = line.strip() + if line.startswith('CLFFTAPI'): + line = re.sub('\\n', '', line) + while True: + nl = f.readline() + nl = nl.strip() + nl = re.sub('\\n', '', nl) + if len(nl) == 0: + break + line += ' ' + nl + line = remove_comments(line) + parts = getTokens(line) + fn = {} + modifiers = [] + ret = [] + calling = [] + i = 0 + while True: + if parts[i] == 'CLFFTAPI': + modifiers.append(parts[i]) + else: + break + i += 1 + while i < len(parts): + if not parts[i] == '(': + ret.append(parts[i]) + else: + del ret[-1] + i -= 1 + break + i += 1 + fn['modifiers'] = [] + fn['ret'] = ret + fn['calling'] = calling + name = parts[i] + i += 1 + fn['name'] = name + print('name=' + name) + params = getParameters(i, parts) + if len(params) > 0 and params[0] == 'void': + del params[0] + fn['params'] = params + fns.append(fn) +f.close() +print('Found %d functions' % len(fns)) +postProcessParameters(fns) +from pprint import pprint +pprint(fns) +from common import * +filterFileName = './filter/opencl_clfft_functions.list' +numEnabled = readFunctionFilter(fns, filterFileName) +functionsFilter = generateFilterNames(fns) +filter_file = open(filterFileName, 'w') +filter_file.write(functionsFilter) +ctx = {} +ctx['CLAMDFFT_REMAP_ORIGIN'] = generateRemapOrigin(fns) +ctx['CLAMDFFT_REMAP_DYNAMIC'] = generateRemapDynamic(fns) +ctx['CLAMDFFT_FN_DECLARATIONS'] = generateFnDeclaration(fns) +sys.stdout = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/opencl_clfft.hpp', 'w') +ProcessTemplate('template/opencl_clfft.hpp.in', ctx) +ctx['CL_FN_ENUMS'] = generateEnums(fns, 'OPENCLAMDFFT_FN') +ctx['CL_FN_SWITCH'] = generateTemplates(23, 'openclamdfft_fn', 'openclamdfft_check_fn', '') +ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, 'openclamdfft_fn', 'OPENCLAMDFFT_FN') +ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, 'openclamdfft_fn') +ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % numEnabled +sys.stdout = open('../autogenerated/opencl_clfft_impl.hpp', 'w') +ProcessTemplate('template/opencl_clfft_impl.hpp.in', ctx) + +# File: opencv-master/modules/dnn/misc/face_detector_accuracy.py +import os +import json +from fnmatch import fnmatch +from math import pi +import cv2 as cv +import argparse +import os +import sys +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +parser = argparse.ArgumentParser(description='Evaluate OpenCV face detection algorithms using COCO evaluation tool, http://cocodataset.org/#detections-eval') +parser.add_argument('--proto', help='Path to .prototxt of Caffe model or .pbtxt of TensorFlow graph') +parser.add_argument('--model', help='Path to .caffemodel trained in Caffe or .pb from TensorFlow') +parser.add_argument('--cascade', help='Optional path to trained Haar cascade as an additional model for evaluation') +parser.add_argument('--ann', help='Path to text file with ground truth annotations') +parser.add_argument('--pics', help='Path to images root directory') +parser.add_argument('--fddb', help='Evaluate FDDB dataset, http://vis-www.cs.umass.edu/fddb/', action='store_true') +parser.add_argument('--wider', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true') +args = parser.parse_args() +dataset = {} +dataset['images'] = [] +dataset['categories'] = [{'id': 0, 'name': 'face'}] +dataset['annotations'] = [] + +def ellipse2Rect(params): + rad_x = params[0] + rad_y = params[1] + angle = params[2] * 180.0 / pi + center_x = params[3] + center_y = params[4] + pts = cv.ellipse2Poly((int(center_x), int(center_y)), (int(rad_x), int(rad_y)), int(angle), 0, 360, 10) + rect = cv.boundingRect(pts) + left = rect[0] + top = rect[1] + right = rect[0] + rect[2] + bottom = rect[1] + rect[3] + return (left, top, right, bottom) + +def addImage(imagePath): + assert 'images' in dataset + imageId = len(dataset['images']) + dataset['images'].append({'id': int(imageId), 'file_name': imagePath}) + return imageId + +def addBBox(imageId, left, top, width, height): + assert 'annotations' in dataset + dataset['annotations'].append({'id': len(dataset['annotations']), 'image_id': int(imageId), 'category_id': 0, 'bbox': [int(left), int(top), int(width), int(height)], 'iscrowd': 0, 'area': float(width * height)}) + +def addDetection(detections, imageId, left, top, width, height, score): + detections.append({'image_id': int(imageId), 'category_id': 0, 'bbox': [int(left), int(top), int(width), int(height)], 'score': float(score)}) + +def fddb_dataset(annotations, images): + for d in os.listdir(annotations): + if fnmatch(d, 'FDDB-fold-*-ellipseList.txt'): + with open(os.path.join(annotations, d), 'rt') as f: + lines = [line.rstrip('\n') for line in f] + lineId = 0 + while lineId < len(lines): + imgPath = lines[lineId] + lineId += 1 + imageId = addImage(os.path.join(images, imgPath) + '.jpg') + img = cv.imread(os.path.join(images, imgPath) + '.jpg') + numFaces = int(lines[lineId]) + lineId += 1 + for i in range(numFaces): + params = [float(v) for v in lines[lineId].split()] + lineId += 1 + (left, top, right, bottom) = ellipse2Rect(params) + addBBox(imageId, left, top, width=right - left + 1, height=bottom - top + 1) + +def wider_dataset(annotations, images): + with open(annotations, 'rt') as f: + lines = [line.rstrip('\n') for line in f] + lineId = 0 + while lineId < len(lines): + imgPath = lines[lineId] + lineId += 1 + imageId = addImage(os.path.join(images, imgPath)) + numFaces = int(lines[lineId]) + lineId += 1 + for i in range(numFaces): + params = [int(v) for v in lines[lineId].split()] + lineId += 1 + (left, top, width, height) = (params[0], params[1], params[2], params[3]) + addBBox(imageId, left, top, width, height) + +def evaluate(): + cocoGt = COCO('annotations.json') + cocoDt = cocoGt.loadRes('detections.json') + cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() +assert args.fddb or args.wider +if args.fddb: + fddb_dataset(args.ann, args.pics) +elif args.wider: + wider_dataset(args.ann, args.pics) +with open('annotations.json', 'wt') as f: + json.dump(dataset, f) +detections = [] +if args.proto and args.model: + net = cv.dnn.readNet(args.proto, args.model) + + def detect(img, imageId): + imgWidth = img.shape[1] + imgHeight = img.shape[0] + net.setInput(cv.dnn.blobFromImage(img, 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)) + out = net.forward() + for i in range(out.shape[2]): + confidence = out[0, 0, i, 2] + left = int(out[0, 0, i, 3] * img.shape[1]) + top = int(out[0, 0, i, 4] * img.shape[0]) + right = int(out[0, 0, i, 5] * img.shape[1]) + bottom = int(out[0, 0, i, 6] * img.shape[0]) + x = max(0, min(left, img.shape[1] - 1)) + y = max(0, min(top, img.shape[0] - 1)) + w = max(0, min(right - x + 1, img.shape[1] - x)) + h = max(0, min(bottom - y + 1, img.shape[0] - y)) + addDetection(detections, imageId, x, y, w, h, score=confidence) +elif args.cascade: + cascade = cv.CascadeClassifier(args.cascade) + + def detect(img, imageId): + srcImgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + faces = cascade.detectMultiScale(srcImgGray) + for rect in faces: + (left, top, width, height) = (rect[0], rect[1], rect[2], rect[3]) + addDetection(detections, imageId, left, top, width, height, score=1.0) +for i in range(len(dataset['images'])): + sys.stdout.write('\r%d / %d' % (i + 1, len(dataset['images']))) + sys.stdout.flush() + img = cv.imread(dataset['images'][i]['file_name']) + imageId = int(dataset['images'][i]['id']) + detect(img, imageId) +with open('detections.json', 'wt') as f: + json.dump(detections, f) +evaluate() + +def rm(f): + if os.path.exists(f): + os.remove(f) +rm('annotations.json') +rm('detections.json') + +# File: opencv-master/modules/dnn/misc/quantize_face_detector.py +from __future__ import print_function +import sys +import argparse +import cv2 as cv +import tensorflow as tf +import numpy as np +import struct +if sys.version_info > (3,): + long = int +from tensorflow.python.tools import optimize_for_inference_lib +from tensorflow.tools.graph_transforms import TransformGraph +from tensorflow.core.framework.node_def_pb2 import NodeDef +from google.protobuf import text_format +parser = argparse.ArgumentParser(description="Use this script to create TensorFlow graph with weights from OpenCV's face detection network. Only backbone part of SSD model is converted this way. Look for .pbtxt configuration file at https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn/opencv_face_detector.pbtxt") +parser.add_argument('--model', help='Path to .caffemodel weights', required=True) +parser.add_argument('--proto', help='Path to .prototxt Caffe model definition', required=True) +parser.add_argument('--pb', help='Path to output .pb TensorFlow model', required=True) +parser.add_argument('--pbtxt', help='Path to output .pbxt TensorFlow graph', required=True) +parser.add_argument('--quantize', help='Quantize weights to uint8', action='store_true') +parser.add_argument('--fp16', help='Convert weights to half precision floats', action='store_true') +args = parser.parse_args() +assert not args.quantize or not args.fp16 +dtype = tf.float16 if args.fp16 else tf.float32 +cvNet = cv.dnn.readNetFromCaffe(args.proto, args.model) + +def dnnLayer(name): + return cvNet.getLayer(long(cvNet.getLayerId(name))) + +def scale(x, name): + with tf.variable_scope(name): + layer = dnnLayer(name) + w = tf.Variable(layer.blobs[0].flatten(), dtype=dtype, name='mul') + if len(layer.blobs) > 1: + b = tf.Variable(layer.blobs[1].flatten(), dtype=dtype, name='add') + return tf.nn.bias_add(tf.multiply(x, w), b) + else: + return tf.multiply(x, w, name) + +def conv(x, name, stride=1, pad='SAME', dilation=1, activ=None): + with tf.variable_scope(name): + layer = dnnLayer(name) + w = tf.Variable(layer.blobs[0].transpose(2, 3, 1, 0), dtype=dtype, name='weights') + if dilation == 1: + conv = tf.nn.conv2d(x, filter=w, strides=(1, stride, stride, 1), padding=pad) + else: + assert stride == 1 + conv = tf.nn.atrous_conv2d(x, w, rate=dilation, padding=pad) + if len(layer.blobs) > 1: + b = tf.Variable(layer.blobs[1].flatten(), dtype=dtype, name='bias') + conv = tf.nn.bias_add(conv, b) + return activ(conv) if activ else conv + +def batch_norm(x, name): + with tf.variable_scope(name): + if x.dtype != tf.float32: + x = tf.cast(x, tf.float32) + layer = dnnLayer(name) + assert len(layer.blobs) >= 3 + mean = layer.blobs[0].flatten() + std = layer.blobs[1].flatten() + scale = layer.blobs[2].flatten() + eps = 1e-05 + hasBias = len(layer.blobs) > 3 + hasWeights = scale.shape != (1,) + if not hasWeights and (not hasBias): + mean /= scale[0] + std /= scale[0] + mean = tf.Variable(mean, dtype=tf.float32, name='mean') + std = tf.Variable(std, dtype=tf.float32, name='std') + gamma = tf.Variable(scale if hasWeights else np.ones(mean.shape), dtype=tf.float32, name='gamma') + beta = tf.Variable(layer.blobs[3].flatten() if hasBias else np.zeros(mean.shape), dtype=tf.float32, name='beta') + bn = tf.nn.fused_batch_norm(x, gamma, beta, mean, std, eps, is_training=False)[0] + if bn.dtype != dtype: + bn = tf.cast(bn, dtype) + return bn + +def l2norm(x, name): + with tf.variable_scope(name): + layer = dnnLayer(name) + w = tf.Variable(layer.blobs[0].flatten(), dtype=dtype, name='mul') + return tf.nn.l2_normalize(x, 3, epsilon=1e-10) * w +inp = tf.placeholder(dtype, [1, 300, 300, 3], 'data') +data_bn = batch_norm(inp, 'data_bn') +data_scale = scale(data_bn, 'data_scale') +data_scale = tf.space_to_batch_nd(data_scale, [1, 1], [[3, 3], [3, 3]], name='Pad') +conv1_h = conv(data_scale, stride=2, pad='VALID', name='conv1_h') +conv1_bn_h = batch_norm(conv1_h, 'conv1_bn_h') +conv1_scale_h = scale(conv1_bn_h, 'conv1_scale_h') +conv1_relu = tf.nn.relu(conv1_scale_h) +conv1_pool = tf.layers.max_pooling2d(conv1_relu, pool_size=(3, 3), strides=(2, 2), padding='SAME', name='conv1_pool') +layer_64_1_conv1_h = conv(conv1_pool, 'layer_64_1_conv1_h') +layer_64_1_bn2_h = batch_norm(layer_64_1_conv1_h, 'layer_64_1_bn2_h') +layer_64_1_scale2_h = scale(layer_64_1_bn2_h, 'layer_64_1_scale2_h') +layer_64_1_relu2 = tf.nn.relu(layer_64_1_scale2_h) +layer_64_1_conv2_h = conv(layer_64_1_relu2, 'layer_64_1_conv2_h') +layer_64_1_sum = layer_64_1_conv2_h + conv1_pool +layer_128_1_bn1_h = batch_norm(layer_64_1_sum, 'layer_128_1_bn1_h') +layer_128_1_scale1_h = scale(layer_128_1_bn1_h, 'layer_128_1_scale1_h') +layer_128_1_relu1 = tf.nn.relu(layer_128_1_scale1_h) +layer_128_1_conv1_h = conv(layer_128_1_relu1, stride=2, name='layer_128_1_conv1_h') +layer_128_1_bn2 = batch_norm(layer_128_1_conv1_h, 'layer_128_1_bn2') +layer_128_1_scale2 = scale(layer_128_1_bn2, 'layer_128_1_scale2') +layer_128_1_relu2 = tf.nn.relu(layer_128_1_scale2) +layer_128_1_conv2 = conv(layer_128_1_relu2, 'layer_128_1_conv2') +layer_128_1_conv_expand_h = conv(layer_128_1_relu1, stride=2, name='layer_128_1_conv_expand_h') +layer_128_1_sum = layer_128_1_conv2 + layer_128_1_conv_expand_h +layer_256_1_bn1 = batch_norm(layer_128_1_sum, 'layer_256_1_bn1') +layer_256_1_scale1 = scale(layer_256_1_bn1, 'layer_256_1_scale1') +layer_256_1_relu1 = tf.nn.relu(layer_256_1_scale1) +layer_256_1_conv1 = tf.space_to_batch_nd(layer_256_1_relu1, [1, 1], [[1, 1], [1, 1]], name='Pad_1') +layer_256_1_conv1 = conv(layer_256_1_conv1, stride=2, pad='VALID', name='layer_256_1_conv1') +layer_256_1_bn2 = batch_norm(layer_256_1_conv1, 'layer_256_1_bn2') +layer_256_1_scale2 = scale(layer_256_1_bn2, 'layer_256_1_scale2') +layer_256_1_relu2 = tf.nn.relu(layer_256_1_scale2) +layer_256_1_conv2 = conv(layer_256_1_relu2, 'layer_256_1_conv2') +layer_256_1_conv_expand = conv(layer_256_1_relu1, stride=2, name='layer_256_1_conv_expand') +layer_256_1_sum = layer_256_1_conv2 + layer_256_1_conv_expand +layer_512_1_bn1 = batch_norm(layer_256_1_sum, 'layer_512_1_bn1') +layer_512_1_scale1 = scale(layer_512_1_bn1, 'layer_512_1_scale1') +layer_512_1_relu1 = tf.nn.relu(layer_512_1_scale1) +layer_512_1_conv1_h = conv(layer_512_1_relu1, 'layer_512_1_conv1_h') +layer_512_1_bn2_h = batch_norm(layer_512_1_conv1_h, 'layer_512_1_bn2_h') +layer_512_1_scale2_h = scale(layer_512_1_bn2_h, 'layer_512_1_scale2_h') +layer_512_1_relu2 = tf.nn.relu(layer_512_1_scale2_h) +layer_512_1_conv2_h = conv(layer_512_1_relu2, dilation=2, name='layer_512_1_conv2_h') +layer_512_1_conv_expand_h = conv(layer_512_1_relu1, 'layer_512_1_conv_expand_h') +layer_512_1_sum = layer_512_1_conv2_h + layer_512_1_conv_expand_h +last_bn_h = batch_norm(layer_512_1_sum, 'last_bn_h') +last_scale_h = scale(last_bn_h, 'last_scale_h') +fc7 = tf.nn.relu(last_scale_h, name='last_relu') +conv6_1_h = conv(fc7, 'conv6_1_h', activ=tf.nn.relu) +conv6_2_h = conv(conv6_1_h, stride=2, name='conv6_2_h', activ=tf.nn.relu) +conv7_1_h = conv(conv6_2_h, 'conv7_1_h', activ=tf.nn.relu) +conv7_2_h = tf.space_to_batch_nd(conv7_1_h, [1, 1], [[1, 1], [1, 1]], name='Pad_2') +conv7_2_h = conv(conv7_2_h, stride=2, pad='VALID', name='conv7_2_h', activ=tf.nn.relu) +conv8_1_h = conv(conv7_2_h, pad='SAME', name='conv8_1_h', activ=tf.nn.relu) +conv8_2_h = conv(conv8_1_h, pad='VALID', name='conv8_2_h', activ=tf.nn.relu) +conv9_1_h = conv(conv8_2_h, 'conv9_1_h', activ=tf.nn.relu) +conv9_2_h = conv(conv9_1_h, pad='VALID', name='conv9_2_h', activ=tf.nn.relu) +conv4_3_norm = l2norm(layer_256_1_relu1, 'conv4_3_norm') +locations = [] +confidences = [] +flattenLayersNames = [] +for (top, suffix) in zip([locations, confidences], ['_mbox_loc', '_mbox_conf']): + for (bottom, name) in zip([conv4_3_norm, fc7, conv6_2_h, conv7_2_h, conv8_2_h, conv9_2_h], ['conv4_3_norm', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2']): + name += suffix + flat = tf.layers.flatten(conv(bottom, name)) + flattenLayersNames.append(flat.name[:flat.name.find(':')]) + top.append(flat) +mbox_loc = tf.concat(locations, axis=-1, name='mbox_loc') +mbox_conf = tf.concat(confidences, axis=-1, name='mbox_conf') +total = int(np.prod(mbox_conf.shape[1:])) +mbox_conf_reshape = tf.reshape(mbox_conf, [-1, 2], name='mbox_conf_reshape') +mbox_conf_softmax = tf.nn.softmax(mbox_conf_reshape, name='mbox_conf_softmax') +mbox_conf_flatten = tf.reshape(mbox_conf_softmax, [-1, total], name='mbox_conf_flatten') +flattenLayersNames.append('mbox_conf_flatten') +with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + out_nodes = ['mbox_loc', 'mbox_conf_flatten'] + inp_nodes = [inp.name[:inp.name.find(':')]] + np.random.seed(2701) + inputData = np.random.standard_normal([1, 3, 300, 300]).astype(np.float32) + cvNet.setInput(inputData) + cvNet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) + outDNN = cvNet.forward(out_nodes) + outTF = sess.run([mbox_loc, mbox_conf_flatten], feed_dict={inp: inputData.transpose(0, 2, 3, 1)}) + print('Max diff @ locations: %e' % np.max(np.abs(outDNN[0] - outTF[0]))) + print('Max diff @ confidence: %e' % np.max(np.abs(outDNN[1] - outTF[1]))) + graph_def = sess.graph.as_graph_def() + graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, out_nodes) + graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, inp_nodes, out_nodes, dtype.as_datatype_enum) + transforms = ['fold_constants(ignore_errors=True)'] + if args.quantize: + transforms += ['quantize_weights(minimum_size=0)'] + transforms += ['sort_by_execution_order'] + graph_def = TransformGraph(graph_def, inp_nodes, out_nodes, transforms) + for node in graph_def.node: + if 'value' in node.attr: + halfs = node.attr['value'].tensor.half_val + if not node.attr['value'].tensor.tensor_content and halfs: + node.attr['value'].tensor.tensor_content = struct.pack('H' * len(halfs), *halfs) + node.attr['value'].tensor.ClearField('half_val') + with tf.gfile.FastGFile(args.pb, 'wb') as f: + f.write(graph_def.SerializeToString()) + +def tensorMsg(values): + msg = 'tensor { dtype: DT_FLOAT tensor_shape { dim { size: %d } }' % len(values) + for value in values: + msg += 'float_val: %f ' % value + return msg + '}' +for i in reversed(range(len(graph_def.node))): + if graph_def.node[i].op in ['Const', 'Dequantize']: + del graph_def.node[i] + for attr in ['T', 'data_format', 'Tshape', 'N', 'Tidx', 'Tdim', 'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training', 'Tpaddings', 'Tblock_shape', 'Tcrops']: + if attr in graph_def.node[i].attr: + del graph_def.node[i].attr[attr] +min_sizes = [30, 60, 111, 162, 213, 264] +max_sizes = [60, 111, 162, 213, 264, 315] +steps = [8, 16, 32, 64, 100, 300] +aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] +layers = [conv4_3_norm, fc7, conv6_2_h, conv7_2_h, conv8_2_h, conv9_2_h] +for i in range(6): + priorBox = NodeDef() + priorBox.name = 'PriorBox_%d' % i + priorBox.op = 'PriorBox' + priorBox.input.append(layers[i].name[:layers[i].name.find(':')]) + priorBox.input.append(inp_nodes[0]) + text_format.Merge('i: %d' % min_sizes[i], priorBox.attr['min_size']) + text_format.Merge('i: %d' % max_sizes[i], priorBox.attr['max_size']) + text_format.Merge('b: true', priorBox.attr['flip']) + text_format.Merge('b: false', priorBox.attr['clip']) + text_format.Merge(tensorMsg(aspect_ratios[i]), priorBox.attr['aspect_ratio']) + text_format.Merge(tensorMsg([0.1, 0.1, 0.2, 0.2]), priorBox.attr['variance']) + text_format.Merge('f: %f' % steps[i], priorBox.attr['step']) + text_format.Merge('f: 0.5', priorBox.attr['offset']) + graph_def.node.extend([priorBox]) +concat = NodeDef() +concat.name = 'mbox_priorbox' +concat.op = 'ConcatV2' +for i in range(6): + concat.input.append('PriorBox_%d' % i) +concat.input.append('mbox_loc/axis') +graph_def.node.extend([concat]) +detectionOut = NodeDef() +detectionOut.name = 'detection_out' +detectionOut.op = 'DetectionOutput' +detectionOut.input.append('mbox_loc') +detectionOut.input.append('mbox_conf_flatten') +detectionOut.input.append('mbox_priorbox') +text_format.Merge('i: 2', detectionOut.attr['num_classes']) +text_format.Merge('b: true', detectionOut.attr['share_location']) +text_format.Merge('i: 0', detectionOut.attr['background_label_id']) +text_format.Merge('f: 0.45', detectionOut.attr['nms_threshold']) +text_format.Merge('i: 400', detectionOut.attr['top_k']) +text_format.Merge('s: "CENTER_SIZE"', detectionOut.attr['code_type']) +text_format.Merge('i: 200', detectionOut.attr['keep_top_k']) +text_format.Merge('f: 0.01', detectionOut.attr['confidence_threshold']) +graph_def.node.extend([detectionOut]) +for i in reversed(range(len(graph_def.node))): + if graph_def.node[i].name in ['conv4_3_norm/l2_normalize/Square', 'conv4_3_norm/l2_normalize/Sum', 'conv4_3_norm/l2_normalize/Maximum', 'conv4_3_norm/l2_normalize/Rsqrt']: + del graph_def.node[i] +for node in graph_def.node: + if node.name == 'conv4_3_norm/l2_normalize': + node.op = 'L2Normalize' + node.input.pop() + node.input.pop() + node.input.append(layer_256_1_relu1.name) + node.input.append('conv4_3_norm/l2_normalize/Sum/reduction_indices') + break +softmaxShape = NodeDef() +softmaxShape.name = 'reshape_before_softmax' +softmaxShape.op = 'Const' +text_format.Merge('tensor { dtype: DT_INT32 tensor_shape { dim { size: 3 } } int_val: 0 int_val: -1 int_val: 2}', softmaxShape.attr['value']) +graph_def.node.extend([softmaxShape]) +for node in graph_def.node: + if node.name == 'mbox_conf_reshape': + node.input[1] = softmaxShape.name + elif node.name == 'mbox_conf_softmax': + text_format.Merge('i: 2', node.attr['axis']) + elif node.name in flattenLayersNames: + node.op = 'Flatten' + inpName = node.input[0] + node.input.pop() + node.input.pop() + node.input.append(inpName) +tf.train.write_graph(graph_def, '', args.pbtxt, as_text=True) + +# File: opencv-master/modules/dnn/src/vkcom/shader/spirv_generator.py +import re +import os +import sys +dir = './' +license_decl = '// This file is part of OpenCV project.\n// It is subject to the license terms in the LICENSE file found in the top-level directory\n// of this distribution and at http://opencv.org/license.html.\n\n' +precomp = '#include "../../precomp.hpp"\n' +ns_head = '\nnamespace cv { namespace dnn { namespace vkcom {\n\n' +ns_tail = '\n}}} // namespace cv::dnn::vkcom\n' +headfile = open('spv_shader.hpp', 'w') +headfile.write(license_decl) +headfile.write('#ifndef OPENCV_DNN_SPV_SHADER_HPP\n') +headfile.write('#define OPENCV_DNN_SPV_SHADER_HPP\n\n') +headfile.write(ns_head) +cppfile = open('spv_shader.cpp', 'w') +cppfile.write(license_decl) +cppfile.write(precomp) +cppfile.write('#include "spv_shader.hpp"\n') +cppfile.write(ns_head) +cmd_remove = '' +null_out = '' +if sys.platform.find('win32') != -1: + cmd_remove = 'del' + null_out = ' >>nul 2>nul' +elif sys.platform.find('linux') != -1: + cmd_remove = 'rm' + null_out = ' > /dev/null 2>&1' +else: + cmd_remove = 'rm' +insertList = [] +externList = [] +list = os.listdir(dir) +for i in range(0, len(list)): + if os.path.splitext(list[i])[-1] != '.comp': + continue + prefix = os.path.splitext(list[i])[0] + path = os.path.join(dir, list[i]) + bin_file = prefix + '.tmp' + cmd = ' glslangValidator -V ' + path + ' -S comp -o ' + bin_file + print('Run cmd = ', cmd) + if os.system(cmd) != 0: + continue + size = os.path.getsize(bin_file) + spv_txt_file = prefix + '.spv' + cmd = 'glslangValidator -V ' + path + ' -S comp -o ' + spv_txt_file + ' -x' + os.system(cmd) + infile_name = spv_txt_file + outfile_name = prefix + '_spv.cpp' + array_name = prefix + '_spv' + infile = open(infile_name, 'r') + outfile = open(outfile_name, 'w') + outfile.write(license_decl) + outfile.write(precomp) + outfile.write(ns_head) + fmt = 'extern const unsigned int %s[%d] = {\n' % (array_name, size / 4) + outfile.write(fmt) + for eachLine in infile: + if re.match('^.*\\/\\/', eachLine): + continue + newline = ' ' + eachLine.replace('\t', '') + outfile.write(newline) + infile.close() + outfile.write('};\n') + outfile.write(ns_tail) + fmt = 'extern const unsigned int %s[%d];\n' % (array_name, size / 4) + externList.append(fmt) + fmt = ' SPVMaps.insert(std::make_pair("%s", std::make_pair(%s, %d)));\n' % (array_name, array_name, size / 4) + insertList.append(fmt) + os.system(cmd_remove + ' ' + bin_file) + os.system(cmd_remove + ' ' + spv_txt_file) +for fmt in externList: + headfile.write(fmt) +headfile.write('\n') +headfile.write('extern std::map > SPVMaps;\n\n') +headfile.write('void initSPVMaps();\n') +headfile.write(ns_tail) +headfile.write('\n#endif /* OPENCV_DNN_SPV_SHADER_HPP */\n') +headfile.close() +cppfile.write('std::map > SPVMaps;\n\n') +cppfile.write('void initSPVMaps()\n{\n') +for fmt in insertList: + cppfile.write(fmt) +cppfile.write('}\n') +cppfile.write(ns_tail) +cppfile.close() + +# File: opencv-master/modules/gapi/misc/python/package/gapi/__init__.py +__all__ = ['op', 'kernel'] +import sys +import cv2 as cv + +def register(mname): + + def parameterized(func): + sys.modules[mname].__dict__[func.__name__] = func + return func + return parameterized + +@register('cv2.gapi') +def networks(*args): + return cv.gapi_GNetPackage(list(map(cv.detail.strip, args))) + +@register('cv2.gapi') +def compile_args(*args): + return list(map(cv.GCompileArg, args)) + +@register('cv2') +def GIn(*args): + return [*args] + +@register('cv2') +def GOut(*args): + return [*args] + +@register('cv2') +def gin(*args): + return [*args] + +@register('cv2.gapi') +def descr_of(*args): + return [*args] + +@register('cv2') +class GOpaque: + + def __new__(cls, argtype): + return cv.GOpaqueT(argtype) + + class Bool: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_BOOL) + + class Int: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_INT) + + class Int64: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_INT64) + + class UInt64: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_UINT64) + + class Double: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_DOUBLE) + + class Float: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_FLOAT) + + class String: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_STRING) + + class Point: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT) + + class Point2f: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT2F) + + class Point3f: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT3F) + + class Size: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_SIZE) + + class Rect: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_RECT) + + class Prim: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_DRAW_PRIM) + + class Any: + + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_ANY) + +@register('cv2') +class GArray: + + def __new__(cls, argtype): + return cv.GArrayT(argtype) + + class Bool: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_BOOL) + + class Int: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_INT) + + class Int64: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_INT64) + + class UInt64: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_UINT64) + + class Double: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_DOUBLE) + + class Float: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_FLOAT) + + class String: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_STRING) + + class Point: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT) + + class Point2f: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT2F) + + class Point3f: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT3F) + + class Size: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_SIZE) + + class Rect: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_RECT) + + class Scalar: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_SCALAR) + + class Mat: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_MAT) + + class GMat: + + def __new__(self): + return cv.GArrayT(cv.gapi.CV_GMAT) + + class Prim: + + def __new__(self): + return cv.GArray(cv.gapi.CV_DRAW_PRIM) + + class Any: + + def __new__(self): + return cv.GArray(cv.gapi.CV_ANY) + +def op(op_id, in_types, out_types): + garray_types = {cv.GArray.Bool: cv.gapi.CV_BOOL, cv.GArray.Int: cv.gapi.CV_INT, cv.GArray.Int64: cv.gapi.CV_INT64, cv.GArray.UInt64: cv.gapi.CV_UINT64, cv.GArray.Double: cv.gapi.CV_DOUBLE, cv.GArray.Float: cv.gapi.CV_FLOAT, cv.GArray.String: cv.gapi.CV_STRING, cv.GArray.Point: cv.gapi.CV_POINT, cv.GArray.Point2f: cv.gapi.CV_POINT2F, cv.GArray.Point3f: cv.gapi.CV_POINT3F, cv.GArray.Size: cv.gapi.CV_SIZE, cv.GArray.Rect: cv.gapi.CV_RECT, cv.GArray.Scalar: cv.gapi.CV_SCALAR, cv.GArray.Mat: cv.gapi.CV_MAT, cv.GArray.GMat: cv.gapi.CV_GMAT, cv.GArray.Prim: cv.gapi.CV_DRAW_PRIM, cv.GArray.Any: cv.gapi.CV_ANY} + gopaque_types = {cv.GOpaque.Size: cv.gapi.CV_SIZE, cv.GOpaque.Rect: cv.gapi.CV_RECT, cv.GOpaque.Bool: cv.gapi.CV_BOOL, cv.GOpaque.Int: cv.gapi.CV_INT, cv.GOpaque.Int64: cv.gapi.CV_INT64, cv.GOpaque.UInt64: cv.gapi.CV_UINT64, cv.GOpaque.Double: cv.gapi.CV_DOUBLE, cv.GOpaque.Float: cv.gapi.CV_FLOAT, cv.GOpaque.String: cv.gapi.CV_STRING, cv.GOpaque.Point: cv.gapi.CV_POINT, cv.GOpaque.Point2f: cv.gapi.CV_POINT2F, cv.GOpaque.Point3f: cv.gapi.CV_POINT3F, cv.GOpaque.Size: cv.gapi.CV_SIZE, cv.GOpaque.Rect: cv.gapi.CV_RECT, cv.GOpaque.Prim: cv.gapi.CV_DRAW_PRIM, cv.GOpaque.Any: cv.gapi.CV_ANY} + type2str = {cv.gapi.CV_BOOL: 'cv.gapi.CV_BOOL', cv.gapi.CV_INT: 'cv.gapi.CV_INT', cv.gapi.CV_INT64: 'cv.gapi.CV_INT64', cv.gapi.CV_UINT64: 'cv.gapi.CV_UINT64', cv.gapi.CV_DOUBLE: 'cv.gapi.CV_DOUBLE', cv.gapi.CV_FLOAT: 'cv.gapi.CV_FLOAT', cv.gapi.CV_STRING: 'cv.gapi.CV_STRING', cv.gapi.CV_POINT: 'cv.gapi.CV_POINT', cv.gapi.CV_POINT2F: 'cv.gapi.CV_POINT2F', cv.gapi.CV_POINT3F: 'cv.gapi.CV_POINT3F', cv.gapi.CV_SIZE: 'cv.gapi.CV_SIZE', cv.gapi.CV_RECT: 'cv.gapi.CV_RECT', cv.gapi.CV_SCALAR: 'cv.gapi.CV_SCALAR', cv.gapi.CV_MAT: 'cv.gapi.CV_MAT', cv.gapi.CV_GMAT: 'cv.gapi.CV_GMAT', cv.gapi.CV_DRAW_PRIM: 'cv.gapi.CV_DRAW_PRIM'} + + def op_with_params(cls): + if not in_types: + raise Exception('{} operation should have at least one input!'.format(cls.__name__)) + if not out_types: + raise Exception('{} operation should have at least one output!'.format(cls.__name__)) + for (i, t) in enumerate(out_types): + if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]: + raise Exception('{} unsupported output type: {} in position: {}'.format(cls.__name__, t.__name__, i)) + + def on(*args): + if len(in_types) != len(args): + raise Exception('Invalid number of input elements!\nExpected: {}, Actual: {}'.format(len(in_types), len(args))) + for (i, (t, a)) in enumerate(zip(in_types, args)): + if t in garray_types: + if not isinstance(a, cv.GArrayT): + raise Exception('{} invalid type for argument {}.\nExpected: {}, Actual: {}'.format(cls.__name__, i, cv.GArrayT.__name__, type(a).__name__)) + elif a.type() != garray_types[t]: + raise Exception('{} invalid GArrayT type for argument {}.\nExpected: {}, Actual: {}'.format(cls.__name__, i, type2str[garray_types[t]], type2str[a.type()])) + elif t in gopaque_types: + if not isinstance(a, cv.GOpaqueT): + raise Exception('{} invalid type for argument {}.\nExpected: {}, Actual: {}'.format(cls.__name__, i, cv.GOpaqueT.__name__, type(a).__name__)) + elif a.type() != gopaque_types[t]: + raise Exception('{} invalid GOpaque type for argument {}.\nExpected: {}, Actual: {}'.format(cls.__name__, i, type2str[gopaque_types[t]], type2str[a.type()])) + elif t != type(a): + raise Exception('{} invalid input type for argument {}.\nExpected: {}, Actual: {}'.format(cls.__name__, i, t.__name__, type(a).__name__)) + op = cv.gapi.__op(op_id, cls.outMeta, *args) + out_protos = [] + for (i, out_type) in enumerate(out_types): + if out_type == cv.GMat: + out_protos.append(op.getGMat()) + elif out_type == cv.GScalar: + out_protos.append(op.getGScalar()) + elif out_type in gopaque_types: + out_protos.append(op.getGOpaque(gopaque_types[out_type])) + elif out_type in garray_types: + out_protos.append(op.getGArray(garray_types[out_type])) + else: + raise Exception("In {}: G-API operation can't produce the output with type: {} in position: {}".format(cls.__name__, out_type.__name__, i)) + return tuple(out_protos) if len(out_protos) != 1 else out_protos[0] + cls.id = op_id + cls.on = staticmethod(on) + return cls + return op_with_params + +def kernel(op_cls): + + def kernel_with_params(cls): + cls.id = op_cls.id + cls.outMeta = op_cls.outMeta + return cls + return kernel_with_params +cv.gapi.wip.GStreamerPipeline = cv.gapi_wip_gst_GStreamerPipeline + +# File: opencv-master/modules/gapi/misc/python/samples/gaze_estimation.py +import argparse +import time +import numpy as np +import cv2 as cv + +def weight_path(model_path): + assert model_path.endswith('.xml'), 'Wrong topology path was provided' + return model_path[:-3] + 'bin' + +def build_argparser(): + parser = argparse.ArgumentParser(description='This is an OpenCV-based version of Gaze Estimation example') + parser.add_argument('--input', help='Path to the input video file or camera device number') + parser.add_argument('--out', help='Path to the output video file') + parser.add_argument('--facem', default='face-detection-retail-0005.xml', help='Path to OpenVINO face detection model (.xml)') + parser.add_argument('--faced', default='CPU', help='Target device for the face detection' + '(e.g. CPU, GPU, VPU, ...)') + parser.add_argument('--headm', default='head-pose-estimation-adas-0001.xml', help='Path to OpenVINO head pose estimation model (.xml)') + parser.add_argument('--headd', default='CPU', help='Target device for the head pose estimation inference ' + '(e.g. CPU, GPU, VPU, ...)') + parser.add_argument('--landm', default='facial-landmarks-35-adas-0002.xml', help='Path to OpenVINO landmarks detector model (.xml)') + parser.add_argument('--landd', default='CPU', help='Target device for the landmarks detector (e.g. CPU, GPU, VPU, ...)') + parser.add_argument('--gazem', default='gaze-estimation-adas-0002.xml', help='Path to OpenVINO gaze vector estimaiton model (.xml)') + parser.add_argument('--gazed', default='CPU', help='Target device for the gaze vector estimation inference ' + '(e.g. CPU, GPU, VPU, ...)') + parser.add_argument('--eyem', default='open-closed-eye-0001.xml', help='Path to OpenVINO open closed eye model (.xml)') + parser.add_argument('--eyed', default='CPU', help='Target device for the eyes state inference (e.g. CPU, GPU, VPU, ...)') + return parser + +def intersection(surface, rect): + l_x = max(surface[0], rect[0]) + l_y = max(surface[1], rect[1]) + width = min(surface[0] + surface[2], rect[0] + rect[2]) - l_x + height = min(surface[1] + surface[3], rect[1] + rect[3]) - l_y + if width < 0 or height < 0: + return (0, 0, 0, 0) + return (l_x, l_y, width, height) + +def process_landmarks(r_x, r_y, r_w, r_h, landmarks): + lmrks = landmarks[0] + raw_x = lmrks[::2] * r_w + r_x + raw_y = lmrks[1::2] * r_h + r_y + return np.array([[int(x), int(y)] for (x, y) in zip(raw_x, raw_y)]) + +def eye_box(p_1, p_2, scale=1.8): + size = np.linalg.norm(p_1 - p_2) + midpoint = (p_1 + p_2) / 2 + width = scale * size + height = width + p_x = midpoint[0] - width / 2 + p_y = midpoint[1] - height / 2 + return ((int(p_x), int(p_y), int(width), int(height)), list(map(int, midpoint))) + +@cv.gapi.op('custom.GProcessPoses', in_types=[cv.GArray.GMat, cv.GArray.GMat, cv.GArray.GMat], out_types=[cv.GArray.GMat]) +class GProcessPoses: + + @staticmethod + def outMeta(arr_desc0, arr_desc1, arr_desc2): + return cv.empty_array_desc() + +@cv.gapi.op('custom.GParseEyes', in_types=[cv.GArray.GMat, cv.GArray.Rect, cv.GOpaque.Size], out_types=[cv.GArray.Rect, cv.GArray.Rect, cv.GArray.Point, cv.GArray.Point]) +class GParseEyes: + + @staticmethod + def outMeta(arr_desc0, arr_desc1, arr_desc2): + return (cv.empty_array_desc(), cv.empty_array_desc(), cv.empty_array_desc(), cv.empty_array_desc()) + +@cv.gapi.op('custom.GGetStates', in_types=[cv.GArray.GMat, cv.GArray.GMat], out_types=[cv.GArray.Int, cv.GArray.Int]) +class GGetStates: + + @staticmethod + def outMeta(arr_desc0, arr_desc1): + return (cv.empty_array_desc(), cv.empty_array_desc()) + +@cv.gapi.kernel(GProcessPoses) +class GProcessPosesImpl: + + @staticmethod + def run(in_ys, in_ps, in_rs): + return [np.array([ys[0], ps[0], rs[0]]).T for (ys, ps, rs) in zip(in_ys, in_ps, in_rs)] + +@cv.gapi.kernel(GParseEyes) +class GParseEyesImpl: + + @staticmethod + def run(in_landm_per_face, in_face_rcs, frame_size): + left_eyes = [] + right_eyes = [] + midpoints = [] + lmarks = [] + surface = (0, 0, *frame_size) + for (landm_face, rect) in zip(in_landm_per_face, in_face_rcs): + points = process_landmarks(*rect, landm_face) + lmarks.extend(points) + (rect, midpoint_l) = eye_box(points[0], points[1]) + left_eyes.append(intersection(surface, rect)) + (rect, midpoint_r) = eye_box(points[2], points[3]) + right_eyes.append(intersection(surface, rect)) + midpoints.append(midpoint_l) + midpoints.append(midpoint_r) + return (left_eyes, right_eyes, midpoints, lmarks) + +@cv.gapi.kernel(GGetStates) +class GGetStatesImpl: + + @staticmethod + def run(eyesl, eyesr): + out_l_st = [int(st) for eye_l in eyesl for st in (eye_l[:, 0] < eye_l[:, 1]).ravel()] + out_r_st = [int(st) for eye_r in eyesr for st in (eye_r[:, 0] < eye_r[:, 1]).ravel()] + return (out_l_st, out_r_st) +if __name__ == '__main__': + ARGUMENTS = build_argparser().parse_args() + g_in = cv.GMat() + face_inputs = cv.GInferInputs() + face_inputs.setInput('data', g_in) + face_outputs = cv.gapi.infer('face-detection', face_inputs) + faces = face_outputs.at('detection_out') + sz = cv.gapi.streaming.size(g_in) + faces_rc = cv.gapi.parseSSD(faces, sz, 0.5, False, False) + head_inputs = cv.GInferInputs() + head_inputs.setInput('data', g_in) + face_outputs = cv.gapi.infer('head-pose', faces_rc, head_inputs) + angles_y = face_outputs.at('angle_y_fc') + angles_p = face_outputs.at('angle_p_fc') + angles_r = face_outputs.at('angle_r_fc') + heads_pos = GProcessPoses.on(angles_y, angles_p, angles_r) + landmark_inputs = cv.GInferInputs() + landmark_inputs.setInput('data', g_in) + landmark_outputs = cv.gapi.infer('facial-landmarks', faces_rc, landmark_inputs) + landmark = landmark_outputs.at('align_fc3') + (left_eyes, right_eyes, mids, lmarks) = GParseEyes.on(landmark, faces_rc, sz) + eyes_inputs = cv.GInferInputs() + eyes_inputs.setInput('input.1', g_in) + eyesl_outputs = cv.gapi.infer('open-closed-eye', left_eyes, eyes_inputs) + eyesr_outputs = cv.gapi.infer('open-closed-eye', right_eyes, eyes_inputs) + eyesl = eyesl_outputs.at('19') + eyesr = eyesr_outputs.at('19') + (l_eye_st, r_eye_st) = GGetStates.on(eyesl, eyesr) + gaze_inputs = cv.GInferListInputs() + gaze_inputs.setInput('left_eye_image', left_eyes) + gaze_inputs.setInput('right_eye_image', right_eyes) + gaze_inputs.setInput('head_pose_angles', heads_pos) + gaze_outputs = cv.gapi.infer2('gaze-estimation', g_in, gaze_inputs) + gaze_vectors = gaze_outputs.at('gaze_vector') + out = cv.gapi.copy(g_in) + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(out, faces_rc, left_eyes, right_eyes, gaze_vectors, angles_y, angles_p, angles_r, l_eye_st, r_eye_st, mids, lmarks)) + face_net = cv.gapi.ie.params('face-detection', ARGUMENTS.facem, weight_path(ARGUMENTS.facem), ARGUMENTS.faced) + head_pose_net = cv.gapi.ie.params('head-pose', ARGUMENTS.headm, weight_path(ARGUMENTS.headm), ARGUMENTS.headd) + landmarks_net = cv.gapi.ie.params('facial-landmarks', ARGUMENTS.landm, weight_path(ARGUMENTS.landm), ARGUMENTS.landd) + gaze_net = cv.gapi.ie.params('gaze-estimation', ARGUMENTS.gazem, weight_path(ARGUMENTS.gazem), ARGUMENTS.gazed) + eye_net = cv.gapi.ie.params('open-closed-eye', ARGUMENTS.eyem, weight_path(ARGUMENTS.eyem), ARGUMENTS.eyed) + nets = cv.gapi.networks(face_net, head_pose_net, landmarks_net, gaze_net, eye_net) + kernels = cv.gapi.kernels(GParseEyesImpl, GProcessPosesImpl, GGetStatesImpl) + ccomp = comp.compileStreaming(args=cv.gapi.compile_args(kernels, nets)) + if ARGUMENTS.input.isdigit(): + source = cv.gapi.wip.make_capture_src(int(ARGUMENTS.input)) + else: + source = cv.gapi.wip.make_capture_src(ARGUMENTS.input) + ccomp.setSource(cv.gin(source)) + ccomp.start() + frames = 0 + fps = 0 + print('Processing') + START_TIME = time.time() + while True: + start_time_cycle = time.time() + (has_frame, (oimg, outr, l_eyes, r_eyes, outg, out_y, out_p, out_r, out_st_l, out_st_r, out_mids, outl)) = ccomp.pull() + if not has_frame: + break + GREEN = (0, 255, 0) + RED = (0, 0, 255) + WHITE = (255, 255, 255) + BLUE = (255, 0, 0) + PINK = (255, 0, 255) + YELLOW = (0, 255, 255) + M_PI_180 = np.pi / 180 + M_PI_2 = np.pi / 2 + M_PI = np.pi + FACES_SIZE = len(outr) + for (i, out_rect) in enumerate(outr): + cv.rectangle(oimg, out_rect, WHITE, 1) + (rx, ry, rwidth, rheight) = out_rect + lm_radius = int(0.01 * rwidth + 1) + lmsize = int(len(outl) / FACES_SIZE) + for j in range(lmsize): + cv.circle(oimg, outl[j + i * lmsize], lm_radius, YELLOW, -1) + yaw = out_y[i] + pitch = out_p[i] + roll = out_r[i] + sin_y = np.sin(yaw[:] * M_PI_180) + sin_p = np.sin(pitch[:] * M_PI_180) + sin_r = np.sin(roll[:] * M_PI_180) + cos_y = np.cos(yaw[:] * M_PI_180) + cos_p = np.cos(pitch[:] * M_PI_180) + cos_r = np.cos(roll[:] * M_PI_180) + axis_length = 0.4 * rwidth + x_center = int(rx + rwidth / 2) + y_center = int(ry + rheight / 2) + cv.line(oimg, [x_center, y_center], [int(x_center + axis_length * (cos_r * cos_y + sin_y * sin_p * sin_r)), int(y_center + axis_length * cos_p * sin_r)], RED, 2) + cv.line(oimg, [x_center, y_center], [int(x_center + axis_length * (cos_r * sin_y * sin_p + cos_y * sin_r)), int(y_center - axis_length * cos_p * cos_r)], GREEN, 2) + cv.line(oimg, [x_center, y_center], [int(x_center + axis_length * sin_y * cos_p), int(y_center + axis_length * sin_p)], PINK, 2) + scale_box = 0.002 * rwidth + cv.putText(oimg, 'head pose: (y=%0.0f, p=%0.0f, r=%0.0f)' % (np.round(yaw), np.round(pitch), np.round(roll)), [int(rx), int(ry + rheight + 5 * rwidth / 100)], cv.FONT_HERSHEY_PLAIN, scale_box * 2, WHITE, 1) + color_l = GREEN if out_st_l[i] else RED + cv.rectangle(oimg, l_eyes[i], color_l, 1) + color_r = GREEN if out_st_r[i] else RED + cv.rectangle(oimg, r_eyes[i], color_r, 1) + norm_gazes = np.linalg.norm(outg[i][0]) + gaze_vector = outg[i][0] / norm_gazes + arrow_length = 0.4 * rwidth + gaze_arrow = [arrow_length * gaze_vector[0], -arrow_length * gaze_vector[1]] + left_arrow = [int(a + b) for (a, b) in zip(out_mids[0 + i * 2], gaze_arrow)] + right_arrow = [int(a + b) for (a, b) in zip(out_mids[1 + i * 2], gaze_arrow)] + if out_st_l[i]: + cv.arrowedLine(oimg, out_mids[0 + i * 2], left_arrow, BLUE, 2) + if out_st_r[i]: + cv.arrowedLine(oimg, out_mids[1 + i * 2], right_arrow, BLUE, 2) + (v0, v1, v2) = outg[i][0] + gaze_angles = [180 / M_PI * (M_PI_2 + np.arctan2(v2, v0)), 180 / M_PI * (M_PI_2 - np.arccos(v1 / norm_gazes))] + cv.putText(oimg, 'gaze angles: (h=%0.0f, v=%0.0f)' % (np.round(gaze_angles[0]), np.round(gaze_angles[1])), [int(rx), int(ry + rheight + 12 * rwidth / 100)], cv.FONT_HERSHEY_PLAIN, scale_box * 2, WHITE, 1) + cv.putText(oimg, 'FPS: %0i' % fps, [int(20), int(40)], cv.FONT_HERSHEY_PLAIN, 2, RED, 2) + cv.imshow('Gaze Estimation', oimg) + cv.waitKey(1) + fps = int(1.0 / (time.time() - start_time_cycle)) + frames += 1 + EXECUTION_TIME = time.time() - START_TIME + print('Execution successful') + print('Mean FPS is ', int(frames / EXECUTION_TIME)) + +# File: opencv-master/modules/java/generator/gen_java.py +import sys, re, os.path, errno, fnmatch +import json +import logging +import codecs +from shutil import copyfile +from pprint import pformat +from string import Template +if sys.version_info[0] >= 3: + from io import StringIO +else: + import io + + class StringIO(io.StringIO): + + def write(self, s): + if isinstance(s, str): + s = unicode(s) + return super(StringIO, self).write(s) +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +config = None +ROOT_DIR = None +FILES_REMAP = {} + +def checkFileRemap(path): + path = os.path.realpath(path) + if path in FILES_REMAP: + return FILES_REMAP[path] + assert path[-3:] != '.in', path + return path +total_files = 0 +updated_files = 0 +module_imports = [] +module_j_code = None +module_jn_code = None +class_ignore_list = [] +const_ignore_list = [] +const_private_list = [] +missing_consts = {} +type_dict = {'': {'j_type': '', 'jn_type': 'long', 'jni_type': 'jlong'}, 'void': {'j_type': 'void', 'jn_type': 'void', 'jni_type': 'void'}, 'env': {'j_type': '', 'jn_type': '', 'jni_type': 'JNIEnv*'}, 'cls': {'j_type': '', 'jn_type': '', 'jni_type': 'jclass'}, 'bool': {'j_type': 'boolean', 'jn_type': 'boolean', 'jni_type': 'jboolean', 'suffix': 'Z'}, 'char': {'j_type': 'char', 'jn_type': 'char', 'jni_type': 'jchar', 'suffix': 'C'}, 'int': {'j_type': 'int', 'jn_type': 'int', 'jni_type': 'jint', 'suffix': 'I'}, 'long': {'j_type': 'int', 'jn_type': 'int', 'jni_type': 'jint', 'suffix': 'I'}, 'float': {'j_type': 'float', 'jn_type': 'float', 'jni_type': 'jfloat', 'suffix': 'F'}, 'double': {'j_type': 'double', 'jn_type': 'double', 'jni_type': 'jdouble', 'suffix': 'D'}, 'size_t': {'j_type': 'long', 'jn_type': 'long', 'jni_type': 'jlong', 'suffix': 'J'}, '__int64': {'j_type': 'long', 'jn_type': 'long', 'jni_type': 'jlong', 'suffix': 'J'}, 'int64': {'j_type': 'long', 'jn_type': 'long', 'jni_type': 'jlong', 'suffix': 'J'}, 'double[]': {'j_type': 'double[]', 'jn_type': 'double[]', 'jni_type': 'jdoubleArray', 'suffix': '_3D'}, 'string': {'j_type': 'String', 'jn_type': 'String', 'jni_name': 'n_%(n)s', 'jni_type': 'jstring', 'jni_var': 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); std::string n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)', 'suffix': 'Ljava_lang_String_2', 'j_import': 'java.lang.String'}, 'vector_string': {'j_type': 'List', 'jn_type': 'List', 'jni_type': 'jobject', 'jni_var': 'std::vector< std::string > %(n)s', 'suffix': 'Ljava_util_List', 'v_type': 'string', 'j_import': 'java.lang.String'}} +namespaces_dict = {} +ManualFuncs = {} +func_arg_fix = {} + +def read_contents(fname): + with open(fname, 'r') as f: + data = f.read() + return data + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise +T_JAVA_START_INHERITED = read_contents(os.path.join(SCRIPT_DIR, 'templates/java_class_inherited.prolog')) +T_JAVA_START_ORPHAN = read_contents(os.path.join(SCRIPT_DIR, 'templates/java_class.prolog')) +T_JAVA_START_MODULE = read_contents(os.path.join(SCRIPT_DIR, 'templates/java_module.prolog')) +T_CPP_MODULE = Template(read_contents(os.path.join(SCRIPT_DIR, 'templates/cpp_module.template'))) + +class GeneralInfo: + + def __init__(self, type, decl, namespaces): + (self.symbol_id, self.parent_id, self.namespace, self.classpath, self.classname, self.name) = self.parseName(decl[0], namespaces) + self.cname = get_cname(self.symbol_id) + self.params = {} + self.annotation = [] + if type == 'class': + docstring = '// C++: class ' + self.name + '\n' + else: + docstring = '' + if len(decl) > 5 and decl[5]: + doc = decl[5] + if re.search('(@|\\\\)deprecated', doc): + self.annotation.append('@Deprecated') + docstring += sanitize_java_documentation_string(doc, type) + self.docstring = docstring + + def parseName(self, name, namespaces): + name = name[name.find(' ') + 1:].strip() + parent = name[:name.rfind('.')].strip() + if len(parent) == 0: + parent = None + spaceName = '' + localName = name + for namespace in sorted(namespaces, key=len, reverse=True): + if name.startswith(namespace + '.'): + spaceName = namespace + localName = name.replace(namespace + '.', '') + break + pieces = localName.split('.') + if len(pieces) > 2: + return (name, parent, spaceName, '.'.join(pieces[:-1]), pieces[-2], pieces[-1]) + elif len(pieces) == 2: + return (name, parent, spaceName, pieces[0], pieces[0], pieces[1]) + elif len(pieces) == 1: + return (name, parent, spaceName, '', '', pieces[0]) + else: + return (name, parent, spaceName, '', '') + + def fullNameOrigin(self): + result = self.symbol_id + return result + + def fullNameJAVA(self): + result = '.'.join([self.fullParentNameJAVA(), self.jname]) + return result + + def fullNameCPP(self): + result = self.cname + return result + + def fullParentNameJAVA(self): + result = '.'.join([f for f in [self.namespace] + self.classpath.split('.') if len(f) > 0]) + return result + + def fullParentNameCPP(self): + result = get_cname(self.parent_id) + return result + +class ConstInfo(GeneralInfo): + + def __init__(self, decl, addedManually=False, namespaces=[], enumType=None): + GeneralInfo.__init__(self, 'const', decl, namespaces) + self.value = decl[1] + self.enumType = enumType + self.addedManually = addedManually + if self.namespace in namespaces_dict: + prefix = namespaces_dict[self.namespace] + if prefix: + self.name = '%s_%s' % (prefix, self.name) + + def __repr__(self): + return Template('CONST $name=$value$manual').substitute(name=self.name, value=self.value, manual='(manual)' if self.addedManually else '') + + def isIgnored(self): + for c in const_ignore_list: + if re.match(c, self.name): + return True + return False + +def normalize_field_name(name): + return name.replace('.', '_').replace('[', '').replace(']', '').replace('_getNativeObjAddr()', '_nativeObj') + +def normalize_class_name(name): + return re.sub('^cv\\.', '', name).replace('.', '_') + +def get_cname(name): + return name.replace('.', '::') + +def cast_from(t): + if t in type_dict and 'cast_from' in type_dict[t]: + return type_dict[t]['cast_from'] + return t + +def cast_to(t): + if t in type_dict and 'cast_to' in type_dict[t]: + return type_dict[t]['cast_to'] + return t + +class ClassPropInfo: + + def __init__(self, decl): + self.ctype = decl[0] + self.name = decl[1] + self.rw = '/RW' in decl[3] + + def __repr__(self): + return Template('PROP $ctype $name').substitute(ctype=self.ctype, name=self.name) + +class ClassInfo(GeneralInfo): + + def __init__(self, decl, namespaces=[]): + GeneralInfo.__init__(self, 'class', decl, namespaces) + self.methods = [] + self.methods_suffixes = {} + self.consts = [] + self.private_consts = [] + self.imports = set() + self.props = [] + self.jname = self.name + self.smart = None + self.j_code = None + self.jn_code = None + self.cpp_code = None + for m in decl[2]: + if m.startswith('='): + self.jname = m[1:] + if m == '/Simple': + self.smart = False + if self.classpath: + prefix = self.classpath.replace('.', '_') + self.name = '%s_%s' % (prefix, self.name) + self.jname = '%s_%s' % (prefix, self.jname) + if self.namespace in namespaces_dict: + prefix = namespaces_dict[self.namespace] + if prefix: + self.name = '%s_%s' % (prefix, self.name) + self.jname = '%s_%s' % (prefix, self.jname) + self.base = '' + if decl[1]: + base_class = re.sub('^: ', '', decl[1]) + base_class = re.sub('^cv::', '', base_class) + base_class = base_class.replace('::', '.') + base_info = ClassInfo(('class {}'.format(base_class), '', [], [], None, None), [self.namespace]) + base_type_name = base_info.name + if not base_type_name in type_dict: + base_type_name = re.sub('^.*:', '', decl[1].split(',')[0]).strip().replace(self.jname, '') + self.base = base_type_name + self.addImports(self.base) + + def __repr__(self): + return Template('CLASS $namespace::$classpath.$name : $base').substitute(**self.__dict__) + + def getAllImports(self, module): + return ['import %s;' % c for c in sorted(self.imports) if not c.startswith('org.opencv.' + module) and (not c.startswith('java.lang.') or c.count('.') != 2)] + + def addImports(self, ctype): + if ctype in type_dict: + if 'j_import' in type_dict[ctype]: + self.imports.add(type_dict[ctype]['j_import']) + if 'v_type' in type_dict[ctype]: + self.imports.add('java.util.List') + self.imports.add('java.util.ArrayList') + self.imports.add('org.opencv.utils.Converters') + if type_dict[ctype]['v_type'] in ('Mat', 'vector_Mat'): + self.imports.add('org.opencv.core.Mat') + + def getAllMethods(self): + result = [] + result += [fi for fi in self.methods if fi.isconstructor] + result += [fi for fi in self.methods if not fi.isconstructor] + return result + + def addMethod(self, fi): + self.methods.append(fi) + + def getConst(self, name): + for cand in self.consts + self.private_consts: + if cand.name == name: + return cand + return None + + def addConst(self, constinfo): + consts = self.consts + for c in const_private_list: + if re.match(c, constinfo.name): + consts = self.private_consts + break + consts.append(constinfo) + + def initCodeStreams(self, Module): + self.j_code = StringIO() + self.jn_code = StringIO() + self.cpp_code = StringIO() + if self.base: + self.j_code.write(T_JAVA_START_INHERITED) + elif self.name != Module: + self.j_code.write(T_JAVA_START_ORPHAN) + else: + self.j_code.write(T_JAVA_START_MODULE) + if self.name == Module: + for i in module_imports or []: + self.imports.add(i) + if module_j_code: + self.j_code.write(module_j_code) + if module_jn_code: + self.jn_code.write(module_jn_code) + + def cleanupCodeStreams(self): + self.j_code.close() + self.jn_code.close() + self.cpp_code.close() + + def generateJavaCode(self, m, M): + return Template(self.j_code.getvalue() + '\n\n' + self.jn_code.getvalue() + '\n}\n').substitute(module=m, name=self.name, jname=self.jname, imports='\n'.join(self.getAllImports(M)), docs=self.docstring, annotation='\n' + '\n'.join(self.annotation) if self.annotation else '', base=self.base) + + def generateCppCode(self): + return self.cpp_code.getvalue() + +class ArgInfo: + + def __init__(self, arg_tuple): + self.pointer = False + ctype = arg_tuple[0] + if ctype.endswith('*'): + ctype = ctype[:-1] + self.pointer = True + self.ctype = ctype + self.name = arg_tuple[1] + self.defval = arg_tuple[2] + self.out = '' + if '/O' in arg_tuple[3]: + self.out = 'O' + if '/IO' in arg_tuple[3]: + self.out = 'IO' + + def __repr__(self): + return Template('ARG $ctype$p $name=$defval').substitute(ctype=self.ctype, p=' *' if self.pointer else '', name=self.name, defval=self.defval) + +class FuncInfo(GeneralInfo): + + def __init__(self, decl, namespaces=[]): + GeneralInfo.__init__(self, 'func', decl, namespaces) + self.cname = get_cname(decl[0]) + self.jname = self.name + self.isconstructor = self.name == self.classname + if '[' in self.name: + self.jname = 'getelem' + for m in decl[2]: + if m.startswith('='): + self.jname = m[1:] + if self.classpath and self.classname != self.classpath: + prefix = self.classpath.replace('.', '_') + self.classname = prefix + if self.isconstructor: + self.name = prefix + self.jname = prefix + if self.namespace in namespaces_dict: + prefix = namespaces_dict[self.namespace] + if prefix: + if self.classname: + self.classname = '%s_%s' % (prefix, self.classname) + if self.isconstructor: + self.jname = '%s_%s' % (prefix, self.jname) + else: + self.jname = '%s_%s' % (prefix, self.jname) + self.static = ['', 'static']['/S' in decl[2]] + self.ctype = re.sub('^CvTermCriteria', 'TermCriteria', decl[1] or '') + self.args = [] + func_fix_map = func_arg_fix.get(self.jname, {}) + for a in decl[3]: + arg = a[:] + arg_fix_map = func_fix_map.get(arg[1], {}) + arg[0] = arg_fix_map.get('ctype', arg[0]) + arg[3] = arg_fix_map.get('attrib', arg[3]) + self.args.append(ArgInfo(arg)) + + def fullClassJAVA(self): + return self.fullParentNameJAVA() + + def fullClassCPP(self): + return self.fullParentNameCPP() + + def __repr__(self): + return Template('FUNC <$ctype $namespace.$classpath.$name $args>').substitute(**self.__dict__) + + def __lt__(self, other): + return self.__repr__() < other.__repr__() + +class JavaWrapperGenerator(object): + + def __init__(self): + self.cpp_files = [] + self.clear() + + def clear(self): + self.namespaces = ['cv'] + classinfo_Mat = ClassInfo(['class cv.Mat', '', ['/Simple'], []], self.namespaces) + self.classes = {'Mat': classinfo_Mat} + self.module = '' + self.Module = '' + self.ported_func_list = [] + self.skipped_func_list = [] + self.def_args_hist = {} + + def add_class(self, decl): + classinfo = ClassInfo(decl, namespaces=self.namespaces) + if classinfo.name in class_ignore_list: + logging.info('ignored: %s', classinfo) + return + name = classinfo.name + if self.isWrapped(name) and (not classinfo.base): + logging.warning('duplicated: %s', classinfo) + return + self.classes[name] = classinfo + if name in type_dict and (not classinfo.base): + logging.warning('duplicated: %s', classinfo) + return + if self.isSmartClass(classinfo): + jni_name = '*((*(Ptr<' + classinfo.fullNameCPP() + '>*)%(n)s_nativeObj).get())' + else: + jni_name = '(*(' + classinfo.fullNameCPP() + '*)%(n)s_nativeObj)' + type_dict.setdefault(name, {}).update({'j_type': classinfo.jname, 'jn_type': 'long', 'jn_args': (('__int64', '.nativeObj'),), 'jni_name': jni_name, 'jni_type': 'jlong', 'suffix': 'J', 'j_import': 'org.opencv.%s.%s' % (self.module, classinfo.jname)}) + type_dict.setdefault(name + '*', {}).update({'j_type': classinfo.jname, 'jn_type': 'long', 'jn_args': (('__int64', '.nativeObj'),), 'jni_name': '&(' + jni_name + ')', 'jni_type': 'jlong', 'suffix': 'J', 'j_import': 'org.opencv.%s.%s' % (self.module, classinfo.jname)}) + if name in missing_consts: + if 'private' in missing_consts[name]: + for (n, val) in missing_consts[name]['private']: + classinfo.private_consts.append(ConstInfo([n, val], addedManually=True)) + if 'public' in missing_consts[name]: + for (n, val) in missing_consts[name]['public']: + classinfo.consts.append(ConstInfo([n, val], addedManually=True)) + for p in decl[3]: + if True: + classinfo.props.append(ClassPropInfo(p)) + else: + logging.warning('Skipped property: [%s]' % name, p) + if classinfo.base: + classinfo.addImports(classinfo.base) + type_dict.setdefault('Ptr_' + name, {}).update({'j_type': classinfo.jname, 'jn_type': 'long', 'jn_args': (('__int64', '.getNativeObjAddr()'),), 'jni_name': '*((Ptr<' + classinfo.fullNameCPP() + '>*)%(n)s_nativeObj)', 'jni_type': 'jlong', 'suffix': 'J', 'j_import': 'org.opencv.%s.%s' % (self.module, classinfo.jname)}) + logging.info('ok: class %s, name: %s, base: %s', classinfo, name, classinfo.base) + + def add_const(self, decl, enumType=None): + constinfo = ConstInfo(decl, namespaces=self.namespaces, enumType=enumType) + if constinfo.isIgnored(): + logging.info('ignored: %s', constinfo) + else: + if not self.isWrapped(constinfo.classname): + logging.info('class not found: %s', constinfo) + constinfo.name = constinfo.classname + '_' + constinfo.name + constinfo.classname = '' + ci = self.getClass(constinfo.classname) + duplicate = ci.getConst(constinfo.name) + if duplicate: + if duplicate.addedManually: + logging.info('manual: %s', constinfo) + else: + logging.warning('duplicated: %s', constinfo) + else: + ci.addConst(constinfo) + logging.info('ok: %s', constinfo) + + def add_enum(self, decl): + enumType = decl[0].rsplit(' ', 1)[1] + if enumType.endswith(''): + enumType = None + else: + ctype = normalize_class_name(enumType) + type_dict[ctype] = {'cast_from': 'int', 'cast_to': get_cname(enumType), 'j_type': 'int', 'jn_type': 'int', 'jni_type': 'jint', 'suffix': 'I'} + const_decls = decl[3] + for decl in const_decls: + self.add_const(decl, enumType) + + def add_func(self, decl): + fi = FuncInfo(decl, namespaces=self.namespaces) + classname = fi.classname or self.Module + class_symbol_id = classname if self.isWrapped(classname) else fi.classpath.replace('.', '_') + if classname in class_ignore_list: + logging.info('ignored: %s', fi) + elif classname in ManualFuncs and fi.jname in ManualFuncs[classname]: + logging.info('manual: %s', fi) + elif not self.isWrapped(class_symbol_id): + logging.warning('not found: %s', fi) + else: + self.getClass(class_symbol_id).addMethod(fi) + logging.info('ok: %s', fi) + cnt = len([a for a in fi.args if a.defval]) + self.def_args_hist[cnt] = self.def_args_hist.get(cnt, 0) + 1 + + def save(self, path, buf): + global total_files, updated_files + total_files += 1 + if os.path.exists(path): + with open(path, 'rt') as f: + content = f.read() + if content == buf: + return + with codecs.open(path, 'w', 'utf-8') as f: + f.write(buf) + updated_files += 1 + + def gen(self, srcfiles, module, output_path, output_jni_path, output_java_path, common_headers): + self.clear() + self.module = module + self.Module = module.capitalize() + parser = hdr_parser.CppHeaderParser(generate_umat_decls=False) + self.add_class(['class cv.' + self.Module, '', [], []]) + includes = [] + for hdr in common_headers: + logging.info('\n===== Common header : %s =====', hdr) + includes.append('#include "' + hdr + '"') + for hdr in srcfiles: + decls = parser.parse(hdr) + self.namespaces = sorted(parser.namespaces) + logging.info('\n\n===== Header: %s =====', hdr) + logging.info('Namespaces: %s', sorted(parser.namespaces)) + if decls: + includes.append('#include "' + hdr + '"') + else: + logging.info('Ignore header: %s', hdr) + for decl in decls: + logging.info('\n--- Incoming ---\n%s', pformat(decl[:5], 4)) + name = decl[0] + if name.startswith('struct') or name.startswith('class'): + self.add_class(decl) + elif name.startswith('const'): + self.add_const(decl) + elif name.startswith('enum'): + self.add_enum(decl) + else: + self.add_func(decl) + logging.info('\n\n===== Generating... =====') + moduleCppCode = StringIO() + package_path = os.path.join(output_java_path, module) + mkdir_p(package_path) + for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id): + if ci.name == 'Mat': + continue + ci.initCodeStreams(self.Module) + self.gen_class(ci) + classJavaCode = ci.generateJavaCode(self.module, self.Module) + self.save('%s/%s/%s.java' % (output_java_path, module, ci.jname), classJavaCode) + moduleCppCode.write(ci.generateCppCode()) + ci.cleanupCodeStreams() + cpp_file = os.path.abspath(os.path.join(output_jni_path, module + '.inl.hpp')) + self.cpp_files.append(cpp_file) + self.save(cpp_file, T_CPP_MODULE.substitute(m=module, M=module.upper(), code=moduleCppCode.getvalue(), includes='\n'.join(includes))) + self.save(os.path.join(output_path, module + '.txt'), self.makeReport()) + + def makeReport(self): + report = StringIO() + total_count = len(self.ported_func_list) + len(self.skipped_func_list) + report.write('PORTED FUNCs LIST (%i of %i):\n\n' % (len(self.ported_func_list), total_count)) + report.write('\n'.join(self.ported_func_list)) + report.write('\n\nSKIPPED FUNCs LIST (%i of %i):\n\n' % (len(self.skipped_func_list), total_count)) + report.write(''.join(self.skipped_func_list)) + for i in sorted(self.def_args_hist.keys()): + report.write('\n%i def args - %i funcs' % (i, self.def_args_hist[i])) + return report.getvalue() + + def fullTypeNameCPP(self, t): + if self.isWrapped(t): + return self.getClass(t).fullNameCPP() + else: + return cast_from(t) + + def gen_func(self, ci, fi, prop_name=''): + logging.info('%s', fi) + j_code = ci.j_code + jn_code = ci.jn_code + cpp_code = ci.cpp_code + if prop_name: + c_decl = '%s %s::%s' % (fi.ctype, fi.classname, prop_name) + else: + decl_args = [] + for a in fi.args: + s = a.ctype or ' _hidden_ ' + if a.pointer: + s += '*' + elif a.out: + s += '&' + s += ' ' + a.name + if a.defval: + s += ' = ' + a.defval + decl_args.append(s) + c_decl = '%s %s %s(%s)' % (fi.static, fi.ctype, fi.cname, ', '.join(decl_args)) + j_code.write('\n //\n // C++: %s\n //\n\n' % c_decl) + if fi.ctype not in type_dict: + msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype + self.skipped_func_list.append(c_decl + '\n' + msg) + j_code.write(' ' * 4 + msg) + logging.info('SKIP:' + c_decl.strip() + '\t due to RET type ' + fi.ctype) + return + for a in fi.args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or 'I') + self.skipped_func_list.append(c_decl + '\n' + msg) + j_code.write(' ' * 4 + msg) + logging.info('SKIP:' + c_decl.strip() + '\t due to ARG type ' + a.ctype + '/' + (a.out or 'I')) + return + self.ported_func_list.append(c_decl) + jn_code.write('\n // C++: %s\n' % c_decl) + cpp_code.write('\n//\n// %s\n//\n' % c_decl) + args = fi.args[:] + j_signatures = [] + suffix_counter = int(ci.methods_suffixes.get(fi.jname, -1)) + while True: + suffix_counter += 1 + ci.methods_suffixes[fi.jname] = suffix_counter + jn_args = [] + jni_args = [ArgInfo(['env', 'env', '', [], '']), ArgInfo(['cls', '', '', [], ''])] + j_prologue = [] + j_epilogue = [] + c_prologue = [] + c_epilogue = [] + if type_dict[fi.ctype]['jni_type'] == 'jdoubleArray': + fields = type_dict[fi.ctype]['jn_args'] + c_epilogue.append(('jdoubleArray _da_retval_ = env->NewDoubleArray(%(cnt)i); ' + 'jdouble _tmp_retval_[%(cnt)i] = {%(args)s}; ' + 'env->SetDoubleArrayRegion(_da_retval_, 0, %(cnt)i, _tmp_retval_);') % {'cnt': len(fields), 'args': ', '.join(['(jdouble)_retval_' + f[1] for f in fields])}) + if fi.classname and fi.ctype and (not fi.static): + jn_args.append(ArgInfo(['__int64', 'nativeObj', '', [], ''])) + jni_args.append(ArgInfo(['__int64', 'self', '', [], ''])) + ci.addImports(fi.ctype) + for a in args: + if not a.ctype: + continue + ci.addImports(a.ctype) + if 'v_type' in type_dict[a.ctype]: + if type_dict[a.ctype]['v_type'] in ('Mat', 'vector_Mat'): + jn_args.append(ArgInfo(['__int64', '%s_mat.nativeObj' % a.name, '', [], ''])) + jni_args.append(ArgInfo(['__int64', '%s_mat_nativeObj' % a.name, '', [], ''])) + c_prologue.append(type_dict[a.ctype]['jni_var'] % {'n': a.name} + ';') + c_prologue.append('Mat& %(n)s_mat = *((Mat*)%(n)s_mat_nativeObj)' % {'n': a.name} + ';') + if 'I' in a.out or not a.out: + if type_dict[a.ctype]['v_type'] == 'vector_Mat': + j_prologue.append('List %(n)s_tmplm = new ArrayList((%(n)s != null) ? %(n)s.size() : 0);' % {'n': a.name}) + j_prologue.append('Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s, %(n)s_tmplm);' % {'n': a.name, 't': a.ctype}) + elif not type_dict[a.ctype]['j_type'].startswith('MatOf'): + j_prologue.append('Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s);' % {'n': a.name, 't': a.ctype}) + else: + j_prologue.append('Mat %s_mat = %s;' % (a.name, a.name)) + c_prologue.append('Mat_to_%(t)s( %(n)s_mat, %(n)s );' % {'n': a.name, 't': a.ctype}) + elif not type_dict[a.ctype]['j_type'].startswith('MatOf'): + j_prologue.append('Mat %s_mat = new Mat();' % a.name) + else: + j_prologue.append('Mat %s_mat = %s;' % (a.name, a.name)) + if 'O' in a.out: + if not type_dict[a.ctype]['j_type'].startswith('MatOf'): + j_epilogue.append('Converters.Mat_to_%(t)s(%(n)s_mat, %(n)s);' % {'t': a.ctype, 'n': a.name}) + j_epilogue.append('%s_mat.release();' % a.name) + c_epilogue.append('%(t)s_to_Mat( %(n)s, %(n)s_mat );' % {'n': a.name, 't': a.ctype}) + else: + jn_args.append(ArgInfo([a.ctype, a.name, '', [], ''])) + jni_args.append(ArgInfo([a.ctype, '%s_list' % a.name, '', [], ''])) + c_prologue.append(type_dict[a.ctype]['jni_var'] % {'n': a.name} + ';') + if 'I' in a.out or not a.out: + c_prologue.append('%(n)s = List_to_%(t)s(env, %(n)s_list);' % {'n': a.name, 't': a.ctype}) + if 'O' in a.out: + c_epilogue.append('Copy_%s_to_List(env,%s,%s_list);' % (a.ctype, a.name, a.name)) + else: + fields = type_dict[a.ctype].get('jn_args', ((a.ctype, ''),)) + if 'I' in a.out or not a.out or self.isWrapped(a.ctype): + for f in fields: + jn_args.append(ArgInfo([f[0], a.name + f[1], '', [], ''])) + jni_args.append(ArgInfo([f[0], a.name + normalize_field_name(f[1]), '', [], ''])) + if 'O' in a.out and (not self.isWrapped(a.ctype)): + jn_args.append(ArgInfo(['double[]', '%s_out' % a.name, '', [], ''])) + jni_args.append(ArgInfo(['double[]', '%s_out' % a.name, '', [], ''])) + j_prologue.append('double[] %s_out = new double[%i];' % (a.name, len(fields))) + c_epilogue.append('jdouble tmp_%(n)s[%(cnt)i] = {%(args)s}; env->SetDoubleArrayRegion(%(n)s_out, 0, %(cnt)i, tmp_%(n)s);' % {'n': a.name, 'cnt': len(fields), 'args': ', '.join(['(jdouble)' + a.name + f[1] for f in fields])}) + if type_dict[a.ctype]['j_type'] in ('bool', 'int', 'long', 'float', 'double'): + j_epilogue.append('if(%(n)s!=null) %(n)s[0] = (%(t)s)%(n)s_out[0];' % {'n': a.name, 't': type_dict[a.ctype]['j_type']}) + else: + set_vals = [] + i = 0 + for f in fields: + set_vals.append('%(n)s%(f)s = %(t)s%(n)s_out[%(i)i]' % {'n': a.name, 't': ('(' + type_dict[f[0]]['j_type'] + ')', '')[f[0] == 'double'], 'f': f[1], 'i': i}) + i += 1 + j_epilogue.append('if(' + a.name + '!=null){ ' + '; '.join(set_vals) + '; } ') + j_args = [] + for a in args: + if not a.ctype: + continue + jt = type_dict[a.ctype]['j_type'] + if a.out and jt in ('bool', 'int', 'long', 'float', 'double'): + jt += '[]' + j_args.append(jt + ' ' + a.name) + j_signature = type_dict[fi.ctype]['j_type'] + ' ' + fi.jname + '(' + ', '.join(j_args) + ')' + logging.info('java: ' + j_signature) + if j_signature in j_signatures: + if args: + args.pop() + continue + else: + break + jn_code.write(Template(' private static native $type $name($args);\n').substitute(type=type_dict[fi.ctype].get('jn_type', 'double[]'), name=fi.jname + '_' + str(suffix_counter), args=', '.join(['%s %s' % (type_dict[a.ctype]['jn_type'], normalize_field_name(a.name)) for a in jn_args]))) + if fi.docstring: + lines = fi.docstring.splitlines() + returnTag = False + javadocParams = [] + toWrite = [] + inCode = False + for (index, line) in enumerate(lines): + p0 = line.find('@param') + if p0 != -1: + p0 += 7 + p1 = line.find(' ', p0) + p1 = len(line) if p1 == -1 else p1 + name = line[p0:p1] + javadocParams.append(name) + for arg in j_args: + if arg.endswith(' ' + name): + toWrite.append(line) + break + else: + if '' in line: + inCode = True + if '' in line: + inCode = False + line = line.replace('@result ', '@return ') + if '@return ' in line: + returnTag = True + if not inCode and toWrite and (not toWrite[-1]) and line and (not line.startswith('\\')) and (not line.startswith('
    ')) and (not line.startswith('@param')): + toWrite.append('

    ') + if index == len(lines) - 1: + for arg in j_args: + name = arg[arg.rfind(' ') + 1:] + if not name in javadocParams: + toWrite.append(' * @param ' + name + ' automatically generated') + if type_dict[fi.ctype]['j_type'] and (not returnTag) and (fi.ctype != 'void'): + toWrite.append(' * @return automatically generated') + toWrite.append(line) + for line in toWrite: + j_code.write(' ' * 4 + line + '\n') + if fi.annotation: + j_code.write(' ' * 4 + '\n'.join(fi.annotation) + '\n') + ret_type = fi.ctype + if fi.ctype.endswith('*'): + ret_type = ret_type[:-1] + ret_val = type_dict[ret_type]['j_type'] + ' retVal = ' if j_epilogue else 'return ' + tail = '' + ret = 'return retVal;' if j_epilogue else '' + if 'v_type' in type_dict[ret_type]: + j_type = type_dict[ret_type]['j_type'] + if type_dict[ret_type]['v_type'] in ('Mat', 'vector_Mat'): + tail = ')' + if j_type.startswith('MatOf'): + ret_val += j_type + '.fromNativeAddr(' + else: + ret_val = 'Mat retValMat = new Mat(' + j_prologue.append(j_type + ' retVal = new Array' + j_type + '();') + j_epilogue.append('Converters.Mat_to_' + ret_type + '(retValMat, retVal);') + ret = 'return retVal;' + elif ret_type.startswith('Ptr_'): + constructor = type_dict[ret_type]['j_type'] + '.__fromPtr__(' + if j_epilogue: + ret_val = type_dict[fi.ctype]['j_type'] + ' retVal = ' + constructor + else: + ret_val = 'return ' + constructor + tail = ')' + elif ret_type == 'void': + ret_val = '' + ret = '' + elif ret_type == '': + if fi.classname and ci.base: + ret_val = 'super(' + tail = ')' + else: + ret_val = 'nativeObj = ' + ret = '' + elif self.isWrapped(ret_type): + constructor = self.getClass(ret_type).jname + '(' + if j_epilogue: + ret_val = type_dict[ret_type]['j_type'] + ' retVal = new ' + constructor + else: + ret_val = 'return new ' + constructor + tail = ')' + elif 'jn_type' not in type_dict[ret_type]: + constructor = type_dict[ret_type]['j_type'] + '(' + if j_epilogue: + ret_val = type_dict[fi.ctype]['j_type'] + ' retVal = new ' + constructor + else: + ret_val = 'return new ' + constructor + tail = ')' + static = 'static' + if fi.classname: + static = fi.static + j_code.write(Template(' public $static$j_type$j_name($j_args) {$prologue\n $ret_val$jn_name($jn_args_call)$tail;$epilogue$ret\n }\n\n').substitute(ret='\n ' + ret if ret else '', ret_val=ret_val, tail=tail, prologue='\n ' + '\n '.join(j_prologue) if j_prologue else '', epilogue='\n ' + '\n '.join(j_epilogue) if j_epilogue else '', static=static + ' ' if static else '', j_type=type_dict[fi.ctype]['j_type'] + ' ' if type_dict[fi.ctype]['j_type'] else '', j_name=fi.jname, j_args=', '.join(j_args), jn_name=fi.jname + '_' + str(suffix_counter), jn_args_call=', '.join([a.name for a in jn_args]))) + ret = 'return _retval_;' if c_epilogue else '' + default = 'return 0;' + if fi.ctype == 'void': + ret = '' + default = '' + elif not fi.ctype: + if self.isSmartClass(ci): + ret = 'return (jlong)(new Ptr<%(ctype)s>(_retval_));' % {'ctype': fi.fullClassCPP()} + else: + ret = 'return (jlong) _retval_;' + elif 'v_type' in type_dict[fi.ctype]: + if type_dict[fi.ctype]['v_type'] in ('Mat', 'vector_Mat'): + ret = 'return (jlong) _retval_;' + elif fi.ctype in ['String', 'string']: + ret = 'return env->NewStringUTF(_retval_.c_str());' + default = 'return env->NewStringUTF("");' + elif self.isWrapped(fi.ctype): + ret = None + if fi.ctype in self.classes: + ret_ci = self.classes[fi.ctype] + if self.isSmartClass(ret_ci): + ret = 'return (jlong)(new Ptr<%(ctype)s>(new %(ctype)s(_retval_)));' % {'ctype': ret_ci.fullNameCPP()} + if ret is None: + ret = 'return (jlong) new %s(_retval_);' % self.fullTypeNameCPP(fi.ctype) + elif fi.ctype.startswith('Ptr_'): + c_prologue.append('typedef Ptr<%s> %s;' % (self.fullTypeNameCPP(fi.ctype[4:]), fi.ctype)) + ret = 'return (jlong)(new %(ctype)s(_retval_));' % {'ctype': fi.ctype} + elif self.isWrapped(ret_type): + ret = 'return (jlong) _retval_;' + elif type_dict[fi.ctype]['jni_type'] == 'jdoubleArray': + ret = 'return _da_retval_;' + name = fi.name + if prop_name: + if args: + name = prop_name + ' = ' + else: + name = prop_name + ';//' + cvname = fi.fullNameCPP() + retval = self.fullTypeNameCPP(fi.ctype) + ' _retval_ = ' if ret else 'return ' + if fi.ctype == 'void': + retval = '' + elif fi.ctype == 'String': + retval = 'cv::' + self.fullTypeNameCPP(fi.ctype) + ' _retval_ = ' + elif fi.ctype == 'string': + retval = 'std::string _retval_ = ' + elif 'v_type' in type_dict[fi.ctype]: + retval = type_dict[fi.ctype]['jni_var'] % {'n': '_ret_val_vector_'} + ' = ' + if type_dict[fi.ctype]['v_type'] in ('Mat', 'vector_Mat'): + c_epilogue.append('Mat* _retval_ = new Mat();') + c_epilogue.append(fi.ctype + '_to_Mat(_ret_val_vector_, *_retval_);') + elif ret: + c_epilogue.append('jobject _retval_ = ' + fi.ctype + '_to_List(env, _ret_val_vector_);') + else: + c_epilogue.append('return ' + fi.ctype + '_to_List(env, _ret_val_vector_);') + if fi.classname: + if not fi.ctype: + if self.isSmartClass(ci): + retval = self.smartWrap(ci, fi.fullClassCPP()) + ' _retval_ = ' + cvname = 'makePtr<' + fi.fullClassCPP() + '>' + else: + retval = fi.fullClassCPP() + '* _retval_ = ' + cvname = 'new ' + fi.fullClassCPP() + elif fi.static: + cvname = fi.fullNameCPP() + else: + cvname = ('me->' if not self.isSmartClass(ci) else '(*me)->') + name + c_prologue.append('%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL' % {'cls': self.smartWrap(ci, fi.fullClassCPP())}) + cvargs = [] + for a in args: + if a.pointer: + jni_name = '&%(n)s' + else: + jni_name = '%(n)s' + if not a.out and (not 'jni_var' in type_dict[a.ctype]): + jni_name = '(%s)%s' % (cast_to(a.ctype), jni_name) + if not a.ctype: + jni_name = a.defval + cvargs.append(type_dict[a.ctype].get('jni_name', jni_name) % {'n': a.name}) + if 'v_type' not in type_dict[a.ctype]: + if ('I' in a.out or not a.out or self.isWrapped(a.ctype)) and 'jni_var' in type_dict[a.ctype]: + c_prologue.append(type_dict[a.ctype]['jni_var'] % {'n': a.name} + ';') + if a.out and 'I' not in a.out and (not self.isWrapped(a.ctype)) and a.ctype: + c_prologue.append('%s %s;' % (a.ctype, a.name)) + rtype = type_dict[fi.ctype].get('jni_type', 'jdoubleArray') + clazz = ci.jname + cpp_code.write(Template('\nJNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($argst);\n\nJNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname\n ($args)\n{\n ${namespace}\n static const char method_name[] = "$module::$fname()";\n try {\n LOGD("%s", method_name);$prologue\n $retval$cvname($cvargs);$epilogue$ret\n } catch(const std::exception &e) {\n throwJavaException(env, &e, method_name);\n } catch (...) {\n throwJavaException(env, 0, method_name);\n }$default\n}\n\n\n').substitute(rtype=rtype, module=self.module.replace('_', '_1'), clazz=clazz.replace('_', '_1'), fname=(fi.jname + '_' + str(suffix_counter)).replace('_', '_1'), args=', '.join(['%s %s' % (type_dict[a.ctype].get('jni_type'), a.name) for a in jni_args]), argst=', '.join([type_dict[a.ctype].get('jni_type') for a in jni_args]), prologue='\n ' + '\n '.join(c_prologue) if c_prologue else '', epilogue='\n ' + '\n '.join(c_epilogue) if c_epilogue else '', ret='\n ' + ret if ret else '', cvname=cvname, cvargs=' ' + ', '.join(cvargs) + ' ' if cvargs else '', default='\n ' + default if default else '', retval=retval, namespace='using namespace ' + ci.namespace.replace('.', '::') + ';' if ci.namespace and ci.namespace != 'cv' else '')) + j_signatures.append(j_signature) + if args and args[-1].defval: + args.pop() + else: + break + + def gen_class(self, ci): + logging.info('%s', ci) + consts_map = {c.name: c for c in ci.private_consts} + consts_map.update({c.name: c for c in ci.consts}) + + def const_value(v): + if v in consts_map: + target = consts_map[v] + assert target.value != v + return const_value(target.value) + return v + if ci.private_consts: + logging.info('%s', ci.private_consts) + ci.j_code.write('\n private static final int\n %s;\n\n' % (',\n' + ' ' * 12).join(['%s = %s' % (c.name, const_value(c.value)) for c in ci.private_consts])) + if ci.consts: + enumTypes = set(map(lambda c: c.enumType, ci.consts)) + grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} + for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ''): + consts = grouped_consts[typeName] + logging.info('%s', consts) + if typeName: + typeNameShort = typeName.rsplit('.', 1)[-1] + ci.j_code.write('\n // C++: enum {1} ({2})\n public static final int\n {0};\n\n'.format((',\n' + ' ' * 12).join(['%s = %s' % (c.name, c.value) for c in consts]), typeNameShort, typeName)) + else: + ci.j_code.write('\n // C++: enum \n public static final int\n {0};\n\n'.format((',\n' + ' ' * 12).join(['%s = %s' % (c.name, c.value) for c in consts]))) + for fi in ci.getAllMethods(): + self.gen_func(ci, fi) + for pi in ci.props: + basename = ci.fullNameOrigin() + getter_name = basename + '.get_' + pi.name + fi = FuncInfo([getter_name, pi.ctype, [], []], self.namespaces) + self.gen_func(ci, fi, pi.name) + if pi.rw: + setter_name = basename + '.set_' + pi.name + fi = FuncInfo([setter_name, 'void', [], [[pi.ctype, pi.name, '', [], '']]], self.namespaces) + self.gen_func(ci, fi, pi.name) + if ci.name in ManualFuncs: + for func in sorted(ManualFuncs[ci.name].keys()): + logging.info('manual function: %s', func) + fn = ManualFuncs[ci.name][func] + ci.j_code.write('\n'.join(fn['j_code'])) + ci.jn_code.write('\n'.join(fn['jn_code'])) + ci.cpp_code.write('\n'.join(fn['cpp_code'])) + if ci.name != self.Module or ci.base: + ci.j_code.write('\n @Override\n protected void finalize() throws Throwable {\n delete(nativeObj);\n }\n') + ci.jn_code.write('\n // native support for java finalize()\n private static native void delete(long nativeObj);\n') + ci.cpp_code.write('\n//\n// native support for java finalize()\n// static void %(cls)s::delete( __int64 self )\n//\nJNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete(JNIEnv*, jclass, jlong);\n\nJNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete\n (JNIEnv*, jclass, jlong self)\n{\n delete (%(cls)s*) self;\n}\n\n' % {'module': module.replace('_', '_1'), 'cls': self.smartWrap(ci, ci.fullNameCPP()), 'j_cls': ci.jname.replace('_', '_1')}) + + def getClass(self, classname): + return self.classes[classname or self.Module] + + def isWrapped(self, classname): + name = classname or self.Module + return name in self.classes + + def isSmartClass(self, ci): + if ci.smart != None: + return ci.smart + ci.smart = True + return ci.smart + + def smartWrap(self, ci, fullname): + if self.isSmartClass(ci): + return 'Ptr<' + fullname + '>' + return fullname + + def finalize(self, output_jni_path): + list_file = os.path.join(output_jni_path, 'opencv_jni.hpp') + self.save(list_file, '\n'.join(['#include "%s"' % f for f in self.cpp_files])) + +def copy_java_files(java_files_dir, java_base_path, default_package_path='org/opencv/'): + global total_files, updated_files + java_files = [] + re_filter = re.compile('^.+\\.(java|kt)(.in)?$') + for (root, dirnames, filenames) in os.walk(java_files_dir): + java_files += [os.path.join(root, filename) for filename in filenames if re_filter.match(filename)] + java_files = [f.replace('\\', '/') for f in java_files] + re_package = re.compile('^package +(.+);') + re_prefix = re.compile('^.+[\\+/]([^\\+]+).(java|kt)(.in)?$') + for java_file in java_files: + src = checkFileRemap(java_file) + with open(src, 'r') as f: + package_line = f.readline() + m = re_prefix.match(java_file) + target_fname = m.group(1) + '.' + m.group(2) if m else os.path.basename(java_file) + m = re_package.match(package_line) + if m: + package = m.group(1) + package_path = package.replace('.', '/') + else: + package_path = default_package_path + dest = os.path.join(java_base_path, os.path.join(package_path, target_fname)) + assert dest[-3:] != '.in', dest + ' | ' + target_fname + mkdir_p(os.path.dirname(dest)) + total_files += 1 + if not os.path.exists(dest) or os.stat(src).st_mtime - os.stat(dest).st_mtime > 1: + copyfile(src, dest) + updated_files += 1 + +def sanitize_java_documentation_string(doc, type): + if type == 'class': + doc = doc.replace('@param ', '') + doc = re.sub(re.compile('\\\\f\\$(.*?)\\\\f\\$', re.DOTALL), '\\(' + '\\1' + '\\)', doc) + doc = re.sub(re.compile('\\\\f\\[(.*?)\\\\f\\]', re.DOTALL), '\\(' + '\\1' + '\\)', doc) + doc = re.sub(re.compile('\\\\f\\{(.*?)\\\\f\\}', re.DOTALL), '\\(' + '\\1' + '\\)', doc) + doc = doc.replace('&', '&').replace('\\<', '<').replace('\\>', '>').replace('<', '<').replace('>', '>').replace('$', '$$').replace('@anchor', '').replace('@brief ', '').replace('\\brief ', '').replace('@cite', 'CITE:').replace('@code{.cpp}', '').replace('@code{.txt}', '').replace('@code', '').replace('@copydoc', '').replace('@copybrief', '').replace('@date', '').replace('@defgroup', '').replace('@details ', '').replace('@endcode', '').replace('@endinternal', '').replace('@file', '').replace('@include', 'INCLUDE:').replace('@ingroup', '').replace('@internal', '').replace('@overload', '').replace('@param[in]', '@param').replace('@param[out]', '@param').replace('@ref', 'REF:').replace('@returns', '@return').replace('@sa', 'SEE:').replace('@see', 'SEE:').replace('@snippet', 'SNIPPET:').replace('@todo', 'TODO:').replace('@warning ', 'WARNING: ') + doc = re.sub(re.compile('\\*\\*([^\\*]+?)\\*\\*', re.DOTALL), '' + '\\1' + '', doc) + lines = doc.splitlines() + lines = list(map(lambda x: x[x.find('*'):].strip() if x.lstrip().startswith('*') else x, lines)) + listInd = [] + indexDiff = 0 + for (index, line) in enumerate(lines[:]): + if line.strip().startswith('-'): + i = line.find('-') + if not listInd or i > listInd[-1]: + lines.insert(index + indexDiff, ' ' * len(listInd) + '

      ') + indexDiff += 1 + listInd.append(i) + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    • ') + indexDiff += 1 + elif i == listInd[-1]: + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    • ') + indexDiff += 1 + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    • ') + indexDiff += 1 + elif len(listInd) > 1 and i == listInd[-2]: + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    • ') + indexDiff += 1 + del listInd[-1] + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    ') + indexDiff += 1 + lines.insert(index + indexDiff, ' ' * len(listInd) + '
  • ') + indexDiff += 1 + else: + lines.insert(index + indexDiff, ' ' * len(listInd) + '
  • ') + indexDiff += 1 + del listInd[-1] + lines.insert(index + indexDiff, ' ' * len(listInd) + '
') + indexDiff += 1 + lines.insert(index + indexDiff, ' ' * len(listInd) + '
    ') + indexDiff += 1 + listInd.append(i) + lines.insert(index + indexDiff, ' ' * len(listInd) + '
  • ') + indexDiff += 1 + lines[index + indexDiff] = lines[index + indexDiff][0:i] + lines[index + indexDiff][i + 1:] + elif listInd and (not line or line == '*' or line.strip().startswith('@note') or line.strip().startswith('@param')): + lines.insert(index + indexDiff, ' ' * len(listInd) + '
  • ') + indexDiff += 1 + del listInd[-1] + lines.insert(index + indexDiff, ' ' * len(listInd) + '
') + indexDiff += 1 + i = len(listInd) - 1 + for value in enumerate(listInd): + lines.append(' ' * i + ' ') + lines.append(' ' * i + '') + i -= 1 + lines = list(map(lambda x: '* ' + x[1:].strip() if x.startswith('*') and x != '*' else x, lines)) + lines = list(map(lambda x: x if x.startswith('*') else '* ' + x if x and x != '*' else '*', lines)) + lines = list(map(lambda x: x.replace('@note', 'Note:'), lines)) + lines = list(map(lambda x: re.sub('@b ([\\w:]+?)\\b', '' + '\\1' + '', x), lines)) + lines = list(map(lambda x: re.sub('@c ([\\w:]+?)\\b', '' + '\\1' + '', x), lines)) + lines = list(map(lambda x: re.sub('`(.*?)`', '{@code ' + '\\1' + '}', x), lines)) + lines = list(map(lambda x: re.sub('@p ([\\w:]+?)\\b', '{@code ' + '\\1' + '}', x), lines)) + hasValues = False + for line in lines: + if line != '*': + hasValues = True + break + return '/**\n ' + '\n '.join(lines) + '\n */' if hasValues else '' +if __name__ == '__main__': + logging.basicConfig(filename='gen_java.log', format=None, filemode='w', level=logging.INFO) + handler = logging.StreamHandler() + handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING)) + logging.getLogger().addHandler(handler) + import argparse + arg_parser = argparse.ArgumentParser(description='OpenCV Java Wrapper Generator') + arg_parser.add_argument('-p', '--parser', required=True, help='OpenCV header parser') + arg_parser.add_argument('-c', '--config', required=True, help='OpenCV modules config') + args = arg_parser.parse_args() + hdr_parser_path = os.path.abspath(args.parser) + if hdr_parser_path.endswith('.py'): + hdr_parser_path = os.path.dirname(hdr_parser_path) + sys.path.append(hdr_parser_path) + import hdr_parser + with open(args.config) as f: + config = json.load(f) + ROOT_DIR = config['rootdir'] + assert os.path.exists(ROOT_DIR) + FILES_REMAP = {os.path.realpath(os.path.join(ROOT_DIR, f['src'])): f['target'] for f in config['files_remap']} + logging.info('\nRemapped configured files (%d):\n%s', len(FILES_REMAP), pformat(FILES_REMAP)) + dstdir = './gen' + jni_path = os.path.join(dstdir, 'cpp') + mkdir_p(jni_path) + java_base_path = os.path.join(dstdir, 'java') + mkdir_p(java_base_path) + java_test_base_path = os.path.join(dstdir, 'test') + mkdir_p(java_test_base_path) + for (subdir, target_subdir) in [('src/java', 'java'), ('android/java', None), ('android-21/java', None), ('android-24/java', None)]: + if target_subdir is None: + target_subdir = subdir + java_files_dir = os.path.join(SCRIPT_DIR, subdir) + if os.path.exists(java_files_dir): + target_path = os.path.join(dstdir, target_subdir) + mkdir_p(target_path) + copy_java_files(java_files_dir, target_path) + generator = JavaWrapperGenerator() + gen_dict_files = [] + print('JAVA: Processing OpenCV modules: %d' % len(config['modules'])) + for e in config['modules']: + (module, module_location) = (e['name'], os.path.join(ROOT_DIR, e['location'])) + logging.info('\n=== MODULE: %s (%s) ===\n' % (module, module_location)) + java_path = os.path.join(java_base_path, 'org/opencv') + mkdir_p(java_path) + module_imports = [] + module_j_code = None + module_jn_code = None + srcfiles = [] + common_headers = [] + misc_location = os.path.join(module_location, 'misc/java') + srcfiles_fname = os.path.join(misc_location, 'filelist') + if os.path.exists(srcfiles_fname): + with open(srcfiles_fname) as f: + srcfiles = [os.path.join(module_location, str(l).strip()) for l in f.readlines() if str(l).strip()] + else: + re_bad = re.compile('(private|.inl.hpp$|_inl.hpp$|.details.hpp$|_winrt.hpp$|/cuda/|/legacy/)') + h_files = [] + hpp_files = [] + for (root, dirnames, filenames) in os.walk(os.path.join(module_location, 'include')): + h_files += [os.path.join(root, filename) for filename in fnmatch.filter(filenames, '*.h')] + hpp_files += [os.path.join(root, filename) for filename in fnmatch.filter(filenames, '*.hpp')] + srcfiles = h_files + hpp_files + srcfiles = [f for f in srcfiles if not re_bad.search(f.replace('\\', '/'))] + logging.info('\nFiles (%d):\n%s', len(srcfiles), pformat(srcfiles)) + common_headers_fname = os.path.join(misc_location, 'filelist_common') + if os.path.exists(common_headers_fname): + with open(common_headers_fname) as f: + common_headers = [os.path.join(module_location, str(l).strip()) for l in f.readlines() if str(l).strip()] + logging.info('\nCommon headers (%d):\n%s', len(common_headers), pformat(common_headers)) + gendict_fname = os.path.join(misc_location, 'gen_dict.json') + if os.path.exists(gendict_fname): + with open(gendict_fname) as f: + gen_type_dict = json.load(f) + class_ignore_list += gen_type_dict.get('class_ignore_list', []) + const_ignore_list += gen_type_dict.get('const_ignore_list', []) + const_private_list += gen_type_dict.get('const_private_list', []) + missing_consts.update(gen_type_dict.get('missing_consts', {})) + type_dict.update(gen_type_dict.get('type_dict', {})) + ManualFuncs.update(gen_type_dict.get('ManualFuncs', {})) + func_arg_fix.update(gen_type_dict.get('func_arg_fix', {})) + namespaces_dict.update(gen_type_dict.get('namespaces_dict', {})) + if 'module_j_code' in gen_type_dict: + module_j_code = read_contents(checkFileRemap(os.path.join(misc_location, gen_type_dict['module_j_code']))) + if 'module_jn_code' in gen_type_dict: + module_jn_code = read_contents(checkFileRemap(os.path.join(misc_location, gen_type_dict['module_jn_code']))) + module_imports += gen_type_dict.get('module_imports', []) + java_files_dir = os.path.join(misc_location, 'src/java') + if os.path.exists(java_files_dir): + copy_java_files(java_files_dir, java_base_path, 'org/opencv/' + module) + java_test_files_dir = os.path.join(misc_location, 'test') + if os.path.exists(java_test_files_dir): + copy_java_files(java_test_files_dir, java_test_base_path, 'org/opencv/test/' + module) + if len(srcfiles) > 0: + generator.gen(srcfiles, module, dstdir, jni_path, java_path, common_headers) + else: + logging.info('No generated code for module: %s', module) + generator.finalize(jni_path) + print('Generated files: %d (updated %d)' % (total_files, updated_files)) + +# File: opencv-master/modules/js/generator/embindgen.py +from __future__ import print_function +import sys, re, os +from templates import * +if sys.version_info[0] >= 3: + from io import StringIO +else: + from cStringIO import StringIO +import json +func_table = {} +ignore_list = ['locate', 'minEnclosingCircle', 'checkRange', 'minMaxLoc', 'floodFill', 'phaseCorrelate', 'randShuffle', 'calibrationMatrixValues', 'undistortPoints', 'CamShift', 'meanShift'] + +def makeWhiteList(module_list): + wl = {} + for m in module_list: + for k in m.keys(): + if k in wl: + wl[k] += m[k] + else: + wl[k] = m[k] + return wl + +def makeWhiteListJson(module_list): + wl = {} + for (n, gen_dict) in module_list.items(): + m = gen_dict['whitelist'] + for k in m.keys(): + if k in wl: + wl[k] += m[k] + else: + wl[k] = m[k] + return wl + +def makeNamespacePrefixOverride(module_list): + wl = {} + for (n, gen_dict) in module_list.items(): + if 'namespace_prefix_override' in gen_dict: + m = gen_dict['namespace_prefix_override'] + for k in m.keys(): + if k in wl: + wl[k] += m[k] + else: + wl[k] = m[k] + return wl +white_list = None +namespace_prefix_override = None +export_enums = False +export_consts = True +with_wrapped_functions = True +with_default_params = True +with_vec_from_js_array = True +wrapper_namespace = 'Wrappers' +type_dict = {'InputArray': 'const cv::Mat&', 'OutputArray': 'cv::Mat&', 'InputOutputArray': 'cv::Mat&', 'InputArrayOfArrays': 'const std::vector&', 'OutputArrayOfArrays': 'std::vector&', 'string': 'std::string', 'String': 'std::string', 'const String&': 'const std::string&'} + +def normalize_class_name(name): + return re.sub('^cv\\.', '', name).replace('.', '_') + +class ClassProp(object): + + def __init__(self, decl): + self.tp = decl[0].replace('*', '_ptr').strip() + self.name = decl[1] + self.readonly = True + if '/RW' in decl[3]: + self.readonly = False + +class ClassInfo(object): + + def __init__(self, name, decl=None): + self.cname = name.replace('.', '::') + self.name = self.wname = normalize_class_name(name) + self.ismap = False + self.issimple = False + self.isalgorithm = False + self.methods = {} + self.ext_constructors = {} + self.props = [] + self.consts = {} + customname = False + self.jsfuncs = {} + self.constructor_arg_num = set() + self.has_smart_ptr = False + if decl: + self.bases = decl[1].split()[1:] + if len(self.bases) > 1: + self.bases = [self.bases[0].strip(',')] + if self.bases and self.bases[0].startswith('cv::'): + self.bases[0] = self.bases[0][4:] + if self.bases and self.bases[0] == 'Algorithm': + self.isalgorithm = True + for m in decl[2]: + if m.startswith('='): + self.wname = m[1:] + customname = True + elif m == '/Map': + self.ismap = True + elif m == '/Simple': + self.issimple = True + self.props = [ClassProp(p) for p in decl[3]] + if not customname and self.wname.startswith('Cv'): + self.wname = self.wname[2:] + +def handle_ptr(tp): + if tp.startswith('Ptr_'): + tp = 'Ptr<' + '::'.join(tp.split('_')[1:]) + '>' + return tp + +def handle_vector(tp): + if tp.startswith('vector_'): + tp = handle_vector(tp[tp.find('_') + 1:]) + tp = 'std::vector<' + '::'.join(tp.split('_')) + '>' + return tp + +class ArgInfo(object): + + def __init__(self, arg_tuple): + self.tp = handle_ptr(arg_tuple[0]).strip() + self.name = arg_tuple[1] + self.defval = arg_tuple[2] + self.isarray = False + self.arraylen = 0 + self.arraycvt = None + self.inputarg = True + self.outputarg = False + self.returnarg = False + self.const = False + self.reference = False + for m in arg_tuple[3]: + if m == '/O': + self.inputarg = False + self.outputarg = True + self.returnarg = True + elif m == '/IO': + self.inputarg = True + self.outputarg = True + self.returnarg = True + elif m.startswith('/A'): + self.isarray = True + self.arraylen = m[2:].strip() + elif m.startswith('/CA'): + self.isarray = True + self.arraycvt = m[2:].strip() + elif m == '/C': + self.const = True + elif m == '/Ref': + self.reference = True + if self.tp == 'Mat': + if self.outputarg: + self.tp = 'cv::Mat&' + elif self.inputarg: + self.tp = 'const cv::Mat&' + if self.tp == 'vector_Mat': + if self.outputarg: + self.tp = 'std::vector&' + elif self.inputarg: + self.tp = 'const std::vector&' + self.tp = handle_vector(self.tp).strip() + if self.const: + self.tp = 'const ' + self.tp + if self.reference: + self.tp = self.tp + '&' + self.py_inputarg = False + self.py_outputarg = False + +class FuncVariant(object): + + def __init__(self, class_name, name, decl, is_constructor, is_class_method, is_const, is_virtual, is_pure_virtual, ref_return, const_return): + self.class_name = class_name + self.name = self.wname = name + self.is_constructor = is_constructor + self.is_class_method = is_class_method + self.is_const = is_const + self.is_virtual = is_virtual + self.is_pure_virtual = is_pure_virtual + self.refret = ref_return + self.constret = const_return + self.rettype = handle_vector(handle_ptr(decl[1]).strip()).strip() + if self.rettype == 'void': + self.rettype = '' + self.args = [] + self.array_counters = {} + for a in decl[3]: + ainfo = ArgInfo(a) + if ainfo.isarray and (not ainfo.arraycvt): + c = ainfo.arraylen + c_arrlist = self.array_counters.get(c, []) + if c_arrlist: + c_arrlist.append(ainfo.name) + else: + self.array_counters[c] = [ainfo.name] + self.args.append(ainfo) + +class FuncInfo(object): + + def __init__(self, class_name, name, cname, namespace, isconstructor): + self.name_id = '_'.join([namespace] + ([class_name] if class_name else []) + [name]) + self.class_name = class_name + self.name = name + self.cname = cname + self.namespace = namespace + self.variants = [] + self.is_constructor = isconstructor + + def add_variant(self, variant): + self.variants.append(variant) + +class Namespace(object): + + def __init__(self): + self.funcs = {} + self.enums = {} + self.consts = {} + +class JSWrapperGenerator(object): + + def __init__(self): + self.bindings = [] + self.wrapper_funcs = [] + self.classes = {} + self.namespaces = {} + self.enums = {} + self.parser = hdr_parser.CppHeaderParser() + self.class_idx = 0 + + def add_class(self, stype, name, decl): + class_info = ClassInfo(name, decl) + class_info.decl_idx = self.class_idx + self.class_idx += 1 + if class_info.name in self.classes: + print('Generator error: class %s (cpp_name=%s) already exists' % (class_info.name, class_info.cname)) + sys.exit(-1) + self.classes[class_info.name] = class_info + + def resolve_class_inheritance(self): + new_classes = {} + for (name, class_info) in self.classes.items(): + if not hasattr(class_info, 'bases'): + new_classes[name] = class_info + continue + if class_info.bases: + chunks = class_info.bases[0].split('::') + base = '_'.join(chunks) + while base not in self.classes and len(chunks) > 1: + del chunks[-2] + base = '_'.join(chunks) + if base not in self.classes: + print('Generator error: unable to resolve base %s for %s' % (class_info.bases[0], class_info.name)) + sys.exit(-1) + else: + class_info.bases[0] = '::'.join(chunks) + class_info.isalgorithm |= self.classes[base].isalgorithm + new_classes[name] = class_info + self.classes = new_classes + + def split_decl_name(self, name): + chunks = name.split('.') + namespace = chunks[:-1] + classes = [] + while namespace and '.'.join(namespace) not in self.parser.namespaces: + classes.insert(0, namespace.pop()) + return (namespace, classes, chunks[-1]) + + def add_enum(self, decl): + name = decl[0].rsplit(' ', 1)[1] + (namespace, classes, val) = self.split_decl_name(name) + namespace = '.'.join(namespace) + ns = self.namespaces.setdefault(namespace, Namespace()) + if len(name) == 0: + name = '' + if name.endswith(''): + i = 0 + while True: + i += 1 + candidate_name = name.replace('', 'unnamed_%u' % i) + if candidate_name not in ns.enums: + name = candidate_name + break + cname = name.replace('.', '::') + type_dict[normalize_class_name(name)] = cname + if name in ns.enums: + print('Generator warning: enum %s (cname=%s) already exists' % (name, cname)) + else: + ns.enums[name] = [] + for item in decl[3]: + ns.enums[name].append(item) + const_decls = decl[3] + for decl in const_decls: + name = decl[0] + self.add_const(name.replace('const ', '').strip(), decl) + + def add_const(self, name, decl): + cname = name.replace('.', '::') + (namespace, classes, name) = self.split_decl_name(name) + namespace = '.'.join(namespace) + name = '_'.join(classes + [name]) + ns = self.namespaces.setdefault(namespace, Namespace()) + if name in ns.consts: + print('Generator error: constant %s (cname=%s) already exists' % (name, cname)) + sys.exit(-1) + ns.consts[name] = cname + + def add_func(self, decl): + (namespace, classes, barename) = self.split_decl_name(decl[0]) + cpp_name = '::'.join(namespace + classes + [barename]) + name = barename + class_name = '' + bare_class_name = '' + if classes: + class_name = normalize_class_name('.'.join(namespace + classes)) + bare_class_name = classes[-1] + namespace = '.'.join(namespace) + is_constructor = name == bare_class_name + is_class_method = False + is_const_method = False + is_virtual_method = False + is_pure_virtual_method = False + const_return = False + ref_return = False + for m in decl[2]: + if m == '/S': + is_class_method = True + elif m == '/C': + is_const_method = True + elif m == '/V': + is_virtual_method = True + elif m == '/PV': + is_pure_virtual_method = True + elif m == '/Ref': + ref_return = True + elif m == '/CRet': + const_return = True + elif m.startswith('='): + name = m[1:] + if class_name: + cpp_name = barename + func_map = self.classes[class_name].methods + else: + func_map = self.namespaces.setdefault(namespace, Namespace()).funcs + fi = FuncInfo(class_name, name, cpp_name, namespace, is_constructor) + func = func_map.setdefault(fi.name_id, fi) + variant = FuncVariant(class_name, name, decl, is_constructor, is_class_method, is_const_method, is_virtual_method, is_pure_virtual_method, ref_return, const_return) + func.add_variant(variant) + + def save(self, path, name, buf): + f = open(path + '/' + name, 'wt') + f.write(buf.getvalue()) + f.close() + + def gen_function_binding_with_wrapper(self, func, ns_name, class_info): + binding_text = None + wrapper_func_text = None + bindings = [] + wrappers = [] + for (index, variant) in enumerate(func.variants): + factory = False + if class_info and 'Ptr<' in variant.rettype: + factory = True + base_class_name = variant.rettype + base_class_name = base_class_name.replace('Ptr<', '').replace('>', '').strip() + if base_class_name in self.classes: + self.classes[base_class_name].has_smart_ptr = True + else: + print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead') + self.classes[class_info.name].has_smart_ptr = True + def_args = [] + has_def_param = False + ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype + if ret_type.startswith('Ptr'): + ptr_type = ret_type.replace('Ptr<', '').replace('>', '') + if ptr_type in type_dict: + ret_type = type_dict[ptr_type] + for key in type_dict: + if key in ret_type: + ret_type = re.sub('\\b' + key + '\\b', type_dict[key], ret_type) + arg_types = [] + unwrapped_arg_types = [] + for arg in variant.args: + arg_type = None + if arg.tp in type_dict: + arg_type = type_dict[arg.tp] + else: + arg_type = arg.tp + if with_default_params and arg.defval != '': + def_args.append(arg.defval) + arg_types.append(arg_type) + unwrapped_arg_types.append(arg_type) + func_attribs = '' + if '*' in ''.join(arg_types): + func_attribs += ', allow_raw_pointers()' + if variant.is_pure_virtual: + func_attribs += ', pure_virtual()' + if ns_name != None and ns_name != 'cv': + ns_parts = ns_name.split('.') + if ns_parts[0] == 'cv': + ns_parts = ns_parts[1:] + ns_part = '_'.join(ns_parts) + '_' + ns_id = '_'.join(ns_parts) + ns_prefix = namespace_prefix_override.get(ns_id, ns_id) + if ns_prefix: + ns_prefix = ns_prefix + '_' + else: + ns_prefix = '' + if class_info == None: + js_func_name = ns_prefix + func.name + wrap_func_name = js_func_name + '_wrapper' + else: + wrap_func_name = ns_prefix + func.class_name + '_' + func.name + '_wrapper' + js_func_name = func.name + if index > 0: + wrap_func_name += str(index) + js_func_name += str(index) + c_func_name = 'Wrappers::' + wrap_func_name + raw_arg_names = ['arg' + str(i + 1) for i in range(0, len(variant.args))] + arg_names = [] + w_signature = [] + casted_arg_types = [] + for (arg_type, arg_name) in zip(arg_types, raw_arg_names): + casted_arg_name = arg_name + if with_vec_from_js_array: + match = re.search('const std::vector<(.*)>&', arg_type) + if match: + type_in_vect = match.group(1) + if type_in_vect in ['int', 'float', 'double', 'char', 'uchar', 'String', 'std::string']: + casted_arg_name = 'emscripten::vecFromJSArray<' + type_in_vect + '>(' + arg_name + ')' + arg_type = re.sub('std::vector<(.*)>', 'emscripten::val', arg_type) + w_signature.append(arg_type + ' ' + arg_name) + arg_names.append(casted_arg_name) + casted_arg_types.append(arg_type) + arg_types = casted_arg_types + arg_names_casted = [c if a == b else c + '.as<' + a + '>()' for (a, b, c) in zip(unwrapped_arg_types, arg_types, arg_names)] + if class_info and (not factory): + arg_types = [class_info.cname + '&'] + arg_types + w_signature = [class_info.cname + '& arg0 '] + w_signature + for j in range(0, len(def_args) + 1): + postfix = '' + if j > 0: + postfix = '_' + str(j) + if factory: + name = class_info.cname + '::' if variant.class_name else '' + cpp_call_text = static_class_call_template.substitute(scope=name, func=func.cname, args=', '.join(arg_names[:len(arg_names) - j])) + elif class_info: + cpp_call_text = class_call_template.substitute(obj='arg0', func=func.cname, args=', '.join(arg_names[:len(arg_names) - j])) + else: + cpp_call_text = call_template.substitute(func=func.cname, args=', '.join(arg_names[:len(arg_names) - j])) + wrapper_func_text = wrapper_function_template.substitute(ret_val=ret_type, func=wrap_func_name + postfix, signature=', '.join(w_signature[:len(w_signature) - j]), cpp_call=cpp_call_text, const='' if variant.is_const else '') + if class_info: + if factory: + if variant.is_pure_virtual: + continue + args_num = len(variant.args) - j + if args_num in class_info.constructor_arg_num: + continue + class_info.constructor_arg_num.add(args_num) + binding_text = ctr_template.substitute(const='const' if variant.is_const else '', cpp_name=c_func_name + postfix, ret=ret_type, args=','.join(arg_types[:len(arg_types) - j]), optional=func_attribs) + else: + binding_template = overload_class_static_function_template if variant.is_class_method else overload_class_function_template + binding_text = binding_template.substitute(js_name=js_func_name, const='' if variant.is_const else '', cpp_name=c_func_name + postfix, ret=ret_type, args=','.join(arg_types[:len(arg_types) - j]), optional=func_attribs) + else: + binding_text = overload_function_template.substitute(js_name=js_func_name, cpp_name=c_func_name + postfix, const='const' if variant.is_const else '', ret=ret_type, args=', '.join(arg_types[:len(arg_types) - j]), optional=func_attribs) + bindings.append(binding_text) + wrappers.append(wrapper_func_text) + return [bindings, wrappers] + + def gen_function_binding(self, func, class_info): + if not class_info == None: + func_name = class_info.cname + '::' + func.cname + else: + func_name = func.cname + binding_text = None + binding_text_list = [] + for (index, variant) in enumerate(func.variants): + factory = False + if not class_info == None and variant.rettype == 'Ptr<' + class_info.name + '>' or (func.name.startswith('create') and variant.rettype): + factory = True + base_class_name = variant.rettype + base_class_name = base_class_name.replace('Ptr<', '').replace('>', '').strip() + if base_class_name in self.classes: + self.classes[base_class_name].has_smart_ptr = True + else: + print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead') + self.classes[class_info.name].has_smart_ptr = True + ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype + ret_type = ret_type.strip() + if ret_type.startswith('Ptr'): + ptr_type = ret_type.replace('Ptr<', '').replace('>', '') + if ptr_type in type_dict: + ret_type = type_dict[ptr_type] + for key in type_dict: + if key in ret_type: + ret_type = re.sub('\\b' + key + '\\b', type_dict[key], ret_type) + if variant.constret and ret_type.startswith('const') == False: + ret_type = 'const ' + ret_type + if variant.refret and ret_type.endswith('&') == False: + ret_type += '&' + arg_types = [] + orig_arg_types = [] + def_args = [] + for arg in variant.args: + if arg.tp in type_dict: + arg_type = type_dict[arg.tp] + else: + arg_type = arg.tp + orig_arg_types.append(arg_type) + if with_default_params and arg.defval != '': + def_args.append(arg.defval) + arg_types.append(orig_arg_types[-1]) + func_attribs = '' + if '*' in ''.join(orig_arg_types): + func_attribs += ', allow_raw_pointers()' + if variant.is_pure_virtual: + func_attribs += ', pure_virtual()' + js_func_name = variant.name + c_func_name = func.cname if factory and variant.is_class_method == False else func_name + for j in range(0, len(def_args) + 1): + postfix = '' + if j > 0: + postfix = '_' + str(j) + if factory: + binding_text = ctr_template.substitute(const='const' if variant.is_const else '', cpp_name=c_func_name + postfix, ret=ret_type, args=','.join(arg_types[:len(arg_types) - j]), optional=func_attribs) + else: + binding_template = overload_class_static_function_template if variant.is_class_method else overload_function_template if class_info == None else overload_class_function_template + binding_text = binding_template.substitute(js_name=js_func_name, const='const' if variant.is_const else '', cpp_name=c_func_name + postfix, ret=ret_type, args=','.join(arg_types[:len(arg_types) - 1]), optional=func_attribs) + binding_text_list.append(binding_text) + return binding_text_list + + def print_decls(self, decls): + for d in decls: + print(d[0], d[1], ';'.join(d[2])) + for a in d[3]: + print(' ', a[0], a[1], a[2], end='') + if a[3]: + print('; '.join(a[3])) + else: + print() + + def gen(self, dst_file, src_files, core_bindings): + headers = [] + for hdr in src_files: + decls = self.parser.parse(hdr) + if len(decls) == 0: + continue + headers.append(hdr[hdr.rindex('opencv2/'):]) + for decl in decls: + name = decl[0] + type = name[:name.find(' ')] + if type == 'struct' or type == 'class': + name = name[name.find(' ') + 1:].strip() + self.add_class(type, name, decl) + elif name.startswith('enum'): + self.add_enum(decl) + elif name.startswith('const'): + self.add_const(name.replace('const ', '').strip(), decl) + else: + self.add_func(decl) + self.resolve_class_inheritance() + for (ns_name, ns) in sorted(self.namespaces.items()): + ns_parts = ns_name.split('.') + if ns_parts[0] != 'cv': + print('Ignore namespace: {}'.format(ns_name)) + continue + else: + ns_parts = ns_parts[1:] + ns_id = '_'.join(ns_parts) + ns_prefix = namespace_prefix_override.get(ns_id, ns_id) + for (name_id, func) in sorted(ns.funcs.items()): + name = func.name + if ns_prefix: + name = ns_prefix + '_' + name + if name in ignore_list: + continue + if not name in white_list['']: + continue + ext_cnst = False + for variant in func.variants: + if 'Ptr<' in variant.rettype: + base_class_name = variant.rettype + base_class_name = base_class_name.replace('Ptr<', '').replace('>', '').strip() + self.classes[base_class_name].has_smart_ptr = True + class_name = func.name.replace('create', '') + if not class_name in self.classes: + self.classes[base_class_name].methods[func.cname] = func + else: + self.classes[class_name].methods[func.cname] = func + ext_cnst = True + if ext_cnst: + continue + if with_wrapped_functions: + (binding, wrapper) = self.gen_function_binding_with_wrapper(func, ns_name, class_info=None) + self.bindings += binding + self.wrapper_funcs += wrapper + else: + binding = self.gen_function_binding(func, class_info=None) + self.bindings += binding + for (name, class_info) in sorted(self.classes.items()): + class_bindings = [] + if not name in white_list: + continue + for (method_name, method) in sorted(class_info.methods.items()): + if method.cname in ignore_list: + continue + if not method.name in white_list[method.class_name]: + continue + if method.is_constructor: + for variant in method.variants: + args = [] + for arg in variant.args: + arg_type = type_dict[arg.tp] if arg.tp in type_dict else arg.tp + args.append(arg_type) + args_num = len(variant.args) + if args_num in class_info.constructor_arg_num: + continue + class_info.constructor_arg_num.add(args_num) + class_bindings.append(constructor_template.substitute(signature=', '.join(args))) + elif with_wrapped_functions and (len(method.variants) > 1 or len(method.variants[0].args) > 0 or 'String' in method.variants[0].rettype): + (binding, wrapper) = self.gen_function_binding_with_wrapper(method, None, class_info=class_info) + self.wrapper_funcs = self.wrapper_funcs + wrapper + class_bindings = class_bindings + binding + else: + binding = self.gen_function_binding(method, class_info=class_info) + class_bindings = class_bindings + binding + if class_info.has_smart_ptr: + class_bindings.append(smart_ptr_reg_template.substitute(cname=class_info.cname, name=class_info.name)) + for property in class_info.props: + _class_property = class_property_enum_template if property.tp in type_dict else class_property_template + class_bindings.append(_class_property.substitute(js_name=property.name, cpp_name='::'.join([class_info.cname, property.name]))) + dv = '' + base = Template('base<$base>') + assert len(class_info.bases) <= 1, 'multiple inheritance not supported' + if len(class_info.bases) == 1: + dv = ',' + base.substitute(base=', '.join(class_info.bases)) + self.bindings.append(class_template.substitute(cpp_name=class_info.cname, js_name=name, class_templates=''.join(class_bindings), derivation=dv)) + if export_enums: + for (ns_name, ns) in sorted(self.namespaces.items()): + if ns_name.split('.')[0] != 'cv': + continue + for (name, enum) in sorted(ns.enums.items()): + if not name.endswith('.anonymous'): + name = name.replace('cv.', '') + enum_values = [] + for enum_val in enum: + value = enum_val[0][enum_val[0].rfind('.') + 1:] + enum_values.append(enum_item_template.substitute(val=value, cpp_val=name.replace('.', '::') + '::' + value)) + self.bindings.append(enum_template.substitute(cpp_name=name.replace('.', '::'), js_name=name.replace('.', '_'), enum_items=''.join(enum_values))) + else: + print(name) + if export_consts: + for (ns_name, ns) in sorted(self.namespaces.items()): + if ns_name.split('.')[0] != 'cv': + continue + for (name, const) in sorted(ns.consts.items()): + self.bindings.append(const_template.substitute(js_name=name, value=const)) + with open(core_bindings) as f: + ret = f.read() + header_includes = '\n'.join(['#include "{}"'.format(hdr) for hdr in headers]) + ret = ret.replace('@INCLUDES@', header_includes) + defis = '\n'.join(self.wrapper_funcs) + ret += wrapper_codes_template.substitute(ns=wrapper_namespace, defs=defis) + ret += emscripten_binding_template.substitute(binding_name='testBinding', bindings=''.join(self.bindings)) + text_file = open(dst_file, 'w') + text_file.write(ret) + text_file.close() +if __name__ == '__main__': + if len(sys.argv) < 5: + print('Usage:\n', os.path.basename(sys.argv[0]), ' ') + print('Current args are: ', ', '.join(["'" + a + "'" for a in sys.argv])) + exit(1) + dstdir = '.' + hdr_parser_path = os.path.abspath(sys.argv[1]) + if hdr_parser_path.endswith('.py'): + hdr_parser_path = os.path.dirname(hdr_parser_path) + sys.path.append(hdr_parser_path) + import hdr_parser + bindingsCpp = sys.argv[2] + headers = open(sys.argv[3], 'r').read().split(';') + coreBindings = sys.argv[4] + whiteListFile = sys.argv[5] + if whiteListFile.endswith('.json') or whiteListFile.endswith('.JSON'): + with open(whiteListFile) as f: + gen_dict = json.load(f) + f.close() + white_list = makeWhiteListJson(gen_dict) + namespace_prefix_override = makeNamespacePrefixOverride(gen_dict) + elif whiteListFile.endswith('.py') or whiteListFile.endswith('.PY'): + exec(open(whiteListFile).read()) + assert white_list + namespace_prefix_override = {'dnn': '', 'aruco': ''} + else: + print('Unexpected format of OpenCV config file', whiteListFile) + exit(1) + generator = JSWrapperGenerator() + generator.gen(bindingsCpp, headers, coreBindings) + +# File: opencv-master/modules/js/generator/templates.py +from string import Template +wrapper_codes_template = Template('namespace $ns {\n$defs\n}') +call_template = Template('$func($args)') +class_call_template = Template('$obj.$func($args)') +static_class_call_template = Template('$scope$func($args)') +wrapper_function_template = Template(' $ret_val $func($signature)$const {\n return $cpp_call;\n }\n ') +wrapper_function_with_def_args_template = Template(' $ret_val $func($signature)$const {\n $check_args\n }\n ') +wrapper_overload_def_values = [Template('return $cpp_call;'), Template('if ($arg0.isUndefined())\n return $cpp_call;\n else\n $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined() && $arg5.isUndefined() )\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined() && $arg5.isUndefined() && $arg6.isUndefined() )\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined() &&\n $arg8.isUndefined())\n return $cpp_call;\n else $next'), Template('if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&\n $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined()&&\n $arg8.isUndefined() && $arg9.isUndefined())\n return $cpp_call;\n else $next')] +emscripten_binding_template = Template('\n\nEMSCRIPTEN_BINDINGS($binding_name) {$bindings\n}\n') +simple_function_template = Template('\n emscripten::function("$js_name", &$cpp_name);\n') +smart_ptr_reg_template = Template('\n .smart_ptr>("Ptr<$name>")\n') +overload_function_template = Template('\n function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional);\n') +overload_class_function_template = Template('\n .function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)') +overload_class_static_function_template = Template('\n .class_function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)') +class_property_template = Template('\n .property("$js_name", &$cpp_name)') +class_property_enum_template = Template('\n .property("$js_name", binding_utils::underlying_ptr(&$cpp_name))') +ctr_template = Template('\n .constructor(select_overload<$ret($args)$const>(&$cpp_name)$optional)') +smart_ptr_ctr_overload_template = Template('\n .smart_ptr_constructor("$ptr_type", select_overload<$ret($args)$const>(&$cpp_name)$optional)') +function_template = Template('\n .function("$js_name", &$cpp_name)') +static_function_template = Template('\n .class_function("$js_name", &$cpp_name)') +constructor_template = Template('\n .constructor<$signature>()') +enum_item_template = Template('\n .value("$val", $cpp_val)') +enum_template = Template('\n emscripten::enum_<$cpp_name>("$js_name")$enum_items;\n') +const_template = Template('\n constant("$js_name", static_cast($value));\n') +vector_template = Template('\n emscripten::register_vector<$cType>("$js_name");\n') +map_template = Template('\n emscripten::register_map("$js_name");\n') +class_template = Template('\n emscripten::class_<$cpp_name $derivation>("$js_name")$class_templates;\n') + +# File: opencv-master/modules/js/src/make_umd.py +import os, sys, re, json, shutil +from subprocess import Popen, PIPE, STDOUT +PY3 = sys.version_info >= (3, 0) + +def make_umd(opencvjs, cvjs): + with open(opencvjs, 'r+b') as src: + content = src.read() + if PY3: + content = content.decode('utf-8') + with open(cvjs, 'w+b') as dst: + dst.write(("\n(function (root, factory) {\n if (typeof define === 'function' && define.amd) {\n // AMD. Register as an anonymous module.\n define(function () {\n return (root.cv = factory());\n });\n } else if (typeof module === 'object' && module.exports) {\n // Node. Does not work with strict CommonJS, but\n // only CommonJS-like environments that support module.exports,\n // like Node.\n module.exports = factory();\n } else if (typeof window === 'object') {\n // Browser globals\n root.cv = factory();\n } else if (typeof importScripts === 'function') {\n // Web worker\n root.cv = factory();\n } else {\n // Other shells, e.g. d8\n root.cv = factory();\n }\n}(this, function () {\n %s\n if (typeof Module === 'undefined')\n Module = {};\n return cv(Module);\n}));\n " % content).lstrip().encode('utf-8')) +if __name__ == '__main__': + if len(sys.argv) > 2: + opencvjs = sys.argv[1] + cvjs = sys.argv[2] + if not os.path.isfile(opencvjs): + print('opencv.js file not found! Have you compiled the opencv_js module?') + exit() + make_umd(opencvjs, cvjs) + +# File: opencv-master/modules/objc/generator/gen_objc.py +from __future__ import print_function, unicode_literals +import sys, re, os.path, errno, fnmatch +import json +import logging +import codecs +import io +from shutil import copyfile +from pprint import pformat +from string import Template +if sys.version_info >= (3, 8): + from shutil import copytree + + def copy_tree(src, dst): + copytree(src, dst, dirs_exist_ok=True) +else: + from distutils.dir_util import copy_tree +try: + from io import StringIO +except: + from io import BytesIO as StringIO +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +config = None +ROOT_DIR = None +total_files = 0 +updated_files = 0 +module_imports = [] +namespace_ignore_list = [] +class_ignore_list = [] +enum_ignore_list = [] +const_ignore_list = [] +const_private_list = [] +missing_consts = {} +type_dict = {'': {'objc_type': ''}, 'void': {'objc_type': 'void', 'is_primitive': True, 'swift_type': 'Void'}, 'bool': {'objc_type': 'BOOL', 'is_primitive': True, 'to_cpp': '(bool)%(n)s', 'swift_type': 'Bool'}, 'char': {'objc_type': 'char', 'is_primitive': True, 'swift_type': 'Int8'}, 'int': {'objc_type': 'int', 'is_primitive': True, 'out_type': 'int*', 'out_type_ptr': '%(n)s', 'out_type_ref': '*(int*)(%(n)s)', 'swift_type': 'Int32'}, 'long': {'objc_type': 'long', 'is_primitive': True, 'swift_type': 'Int'}, 'float': {'objc_type': 'float', 'is_primitive': True, 'out_type': 'float*', 'out_type_ptr': '%(n)s', 'out_type_ref': '*(float*)(%(n)s)', 'swift_type': 'Float'}, 'double': {'objc_type': 'double', 'is_primitive': True, 'out_type': 'double*', 'out_type_ptr': '%(n)s', 'out_type_ref': '*(double*)(%(n)s)', 'swift_type': 'Double'}, 'size_t': {'objc_type': 'size_t', 'is_primitive': True}, 'int64': {'objc_type': 'long', 'is_primitive': True, 'swift_type': 'Int'}, 'string': {'objc_type': 'NSString*', 'is_primitive': True, 'from_cpp': '[NSString stringWithUTF8String:%(n)s.c_str()]', 'cast_to': 'std::string', 'swift_type': 'String'}} +namespaces_dict = {} +AdditionalImports = {} +ManualFuncs = {} +func_arg_fix = {} +header_fix = {} +enum_fix = {} +const_fix = {} +method_dict = {('Mat', 'convertTo'): '-convertTo:rtype:alpha:beta:', ('Mat', 'setTo'): '-setToScalar:mask:', ('Mat', 'zeros'): '+zeros:cols:type:', ('Mat', 'ones'): '+ones:cols:type:', ('Mat', 'dot'): '-dot:'} +modules = [] + +class SkipSymbolException(Exception): + + def __init__(self, text): + self.t = text + + def __str__(self): + return self.t + +def read_contents(fname): + with open(fname, 'r') as f: + data = f.read() + return data + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + +def header_import(hdr): + pos = hdr.find('/include/') + hdr = hdr[pos + 9 if pos >= 0 else 0:] + return hdr + +def make_objcname(m): + return 'Cv' + m if m[0] in '0123456789' else m + +def make_objcmodule(m): + return 'cv' + m if m[0] in '0123456789' else m +T_OBJC_CLASS_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_header.template')) +T_OBJC_CLASS_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_body.template')) +T_OBJC_MODULE_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_module_header.template')) +T_OBJC_MODULE_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_module_body.template')) + +class GeneralInfo: + + def __init__(self, type, decl, namespaces): + (self.symbol_id, self.namespace, self.classpath, self.classname, self.name) = self.parseName(decl[0], namespaces) + for ns_ignore in namespace_ignore_list: + if self.symbol_id.startswith(ns_ignore + '.'): + raise SkipSymbolException('ignored namespace ({}): {}'.format(ns_ignore, self.symbol_id)) + self.params = {} + self.deprecated = False + if type == 'class': + docstring = '// C++: class ' + self.name + '\n' + else: + docstring = '' + if len(decl) > 5 and decl[5]: + doc = decl[5] + if re.search('(@|\\\\)deprecated', doc): + self.deprecated = True + docstring += sanitize_documentation_string(doc, type) + elif type == 'class': + docstring += '/**\n * The ' + self.name + ' module\n */\n' + self.docstring = docstring + + def parseName(self, name, namespaces): + name = name[name.find(' ') + 1:].strip() + spaceName = '' + localName = name + for namespace in sorted(namespaces, key=len, reverse=True): + if name.startswith(namespace + '.'): + spaceName = namespace + localName = name.replace(namespace + '.', '') + break + pieces = localName.split('.') + if len(pieces) > 2: + return (name, spaceName, '.'.join(pieces[:-1]), pieces[-2], pieces[-1]) + elif len(pieces) == 2: + return (name, spaceName, pieces[0], pieces[0], pieces[1]) + elif len(pieces) == 1: + return (name, spaceName, '', '', pieces[0]) + else: + return (name, spaceName, '', '') + + def fullName(self, isCPP=False): + result = '.'.join([self.fullClass(), self.name]) + return result if not isCPP else get_cname(result) + + def fullClass(self, isCPP=False): + result = '.'.join([f for f in [self.namespace] + self.classpath.split('.') if len(f) > 0]) + return result if not isCPP else get_cname(result) + +class ConstInfo(GeneralInfo): + + def __init__(self, decl, addedManually=False, namespaces=[], enumType=None): + GeneralInfo.__init__(self, 'const', decl, namespaces) + self.cname = get_cname(self.name) + self.swift_name = None + self.value = decl[1] + self.enumType = enumType + self.addedManually = addedManually + if self.namespace in namespaces_dict: + self.name = '%s_%s' % (namespaces_dict[self.namespace], self.name) + + def __repr__(self): + return Template('CONST $name=$value$manual').substitute(name=self.name, value=self.value, manual='(manual)' if self.addedManually else '') + + def isIgnored(self): + for c in const_ignore_list: + if re.match(c, self.name): + return True + return False + +def normalize_field_name(name): + return name.replace('.', '_').replace('[', '').replace(']', '').replace('_getNativeObjAddr()', '_nativeObj') + +def normalize_class_name(name): + return re.sub('^cv\\.', '', name).replace('.', '_') + +def get_cname(name): + return name.replace('.', '::') + +def cast_from(t): + if t in type_dict and 'cast_from' in type_dict[t]: + return type_dict[t]['cast_from'] + return t + +def cast_to(t): + if t in type_dict and 'cast_to' in type_dict[t]: + return type_dict[t]['cast_to'] + return t + +def gen_class_doc(docstring, module, members, enums): + lines = docstring.splitlines() + lines.insert(len(lines) - 1, ' *') + if len(members) > 0: + lines.insert(len(lines) - 1, ' * Member classes: ' + ', '.join(['`' + m + '`' for m in members])) + lines.insert(len(lines) - 1, ' *') + else: + lines.insert(len(lines) - 1, ' * Member of `' + module + '`') + if len(enums) > 0: + lines.insert(len(lines) - 1, ' * Member enums: ' + ', '.join(['`' + m + '`' for m in enums])) + return '\n'.join(lines) + +class ClassPropInfo: + + def __init__(self, decl): + self.ctype = decl[0] + self.name = decl[1] + self.rw = '/RW' in decl[3] + + def __repr__(self): + return Template('PROP $ctype $name').substitute(ctype=self.ctype, name=self.name) + +class ClassInfo(GeneralInfo): + + def __init__(self, decl, namespaces=[]): + GeneralInfo.__init__(self, 'class', decl, namespaces) + self.cname = self.name if not self.classname else self.classname + '_' + self.name + self.real_cname = self.name if not self.classname else self.classname + '::' + self.name + self.methods = [] + self.methods_suffixes = {} + self.consts = [] + self.private_consts = [] + self.imports = set() + self.props = [] + self.objc_name = self.name if not self.classname else self.classname + self.name + self.smart = None + self.additionalImports = None + self.enum_declarations = None + self.method_declarations = None + self.method_implementations = None + self.objc_header_template = None + self.objc_body_template = None + for m in decl[2]: + if m.startswith('='): + self.objc_name = m[1:] + self.base = '' + self.is_base_class = True + self.native_ptr_name = 'nativePtr' + self.member_classes = [] + self.member_enums = [] + if decl[1]: + self.base = re.sub('^.*:', '', decl[1].split(',')[0]).strip() + if self.base: + self.is_base_class = False + self.native_ptr_name = 'nativePtr' + self.objc_name + + def __repr__(self): + return Template('CLASS $namespace::$classpath.$name : $base').substitute(**self.__dict__) + + def getImports(self, module): + return ['#import "%s.h"' % make_objcname(c) for c in sorted([m for m in [type_dict[m]['import_module'] if m in type_dict and 'import_module' in type_dict[m] else m for m in self.imports] if m != self.name])] + + def isEnum(self, c): + return c in type_dict and type_dict[c].get('is_enum', False) + + def getForwardDeclarations(self, module): + enum_decl = [x for x in self.imports if self.isEnum(x) and type_dict[x]['import_module'] != module] + enum_imports = sorted(list(set([type_dict[m]['import_module'] for m in enum_decl]))) + class_decl = [x for x in self.imports if not self.isEnum(x)] + return ['#import "%s.h"' % make_objcname(c) for c in enum_imports] + [''] + ['@class %s;' % c for c in sorted(class_decl)] + + def addImports(self, ctype, is_out_type): + if ctype == self.cname: + return + if ctype in type_dict: + objc_import = None + if 'v_type' in type_dict[ctype]: + objc_import = type_dict[type_dict[ctype]['v_type']]['objc_type'] + elif 'v_v_type' in type_dict[ctype]: + objc_import = type_dict[type_dict[ctype]['v_v_type']]['objc_type'] + elif not type_dict[ctype].get('is_primitive', False): + objc_import = type_dict[ctype]['objc_type'] + if objc_import is not None and objc_import not in ['NSNumber*', 'NSString*'] and (not (objc_import in type_dict and type_dict[objc_import].get('is_primitive', False))): + objc_import = objc_import[:-1] if objc_import[-1] == '*' else objc_import + if objc_import != self.cname: + self.imports.add(objc_import) + + def getAllMethods(self): + result = [] + result += [fi for fi in self.methods if fi.isconstructor] + result += [fi for fi in self.methods if not fi.isconstructor] + return result + + def addMethod(self, fi): + self.methods.append(fi) + + def getConst(self, name): + for cand in self.consts + self.private_consts: + if cand.name == name: + return cand + return None + + def addConst(self, constinfo): + consts = self.consts + for c in const_private_list: + if re.match(c, constinfo.name): + consts = self.private_consts + break + consts.append(constinfo) + + def initCodeStreams(self, Module): + self.additionalImports = StringIO() + self.enum_declarations = StringIO() + self.method_declarations = StringIO() + self.method_implementations = StringIO() + if self.base: + self.objc_header_template = T_OBJC_CLASS_HEADER + self.objc_body_template = T_OBJC_CLASS_BODY + else: + self.base = 'NSObject' + if self.name != Module: + self.objc_header_template = T_OBJC_CLASS_HEADER + self.objc_body_template = T_OBJC_CLASS_BODY + else: + self.objc_header_template = T_OBJC_MODULE_HEADER + self.objc_body_template = T_OBJC_MODULE_BODY + if self.name == Module: + for i in module_imports or []: + self.imports.add(i) + + def cleanupCodeStreams(self): + self.additionalImports.close() + self.enum_declarations.close() + self.method_declarations.close() + self.method_implementations.close() + + def generateObjcHeaderCode(self, m, M, objcM): + return Template(self.objc_header_template + '\n\n').substitute(module=M, additionalImports=self.additionalImports.getvalue(), importBaseClass='#import "' + make_objcname(self.base) + '.h"' if not self.is_base_class else '', forwardDeclarations='\n'.join([_f for _f in self.getForwardDeclarations(objcM) if _f]), enumDeclarations=self.enum_declarations.getvalue(), nativePointerHandling=Template('\n#ifdef __cplusplus\n@property(readonly)cv::Ptr<$cName> $native_ptr_name;\n#endif\n\n#ifdef __cplusplus\n- (instancetype)initWithNativePtr:(cv::Ptr<$cName>)nativePtr;\n+ (instancetype)fromNative:(cv::Ptr<$cName>)nativePtr;\n#endif\n').substitute(cName=self.fullName(isCPP=True), native_ptr_name=self.native_ptr_name), manualMethodDeclations='', methodDeclarations=self.method_declarations.getvalue(), name=self.name, objcName=make_objcname(self.objc_name), cName=self.cname, imports='\n'.join(self.getImports(M)), docs=gen_class_doc(self.docstring, M, self.member_classes, self.member_enums), base=self.base) + + def generateObjcBodyCode(self, m, M): + return Template(self.objc_body_template + '\n\n').substitute(module=M, nativePointerHandling=Template('\n- (instancetype)initWithNativePtr:(cv::Ptr<$cName>)nativePtr {\n self = [super $init_call];\n if (self) {\n _$native_ptr_name = nativePtr;\n }\n return self;\n}\n\n+ (instancetype)fromNative:(cv::Ptr<$cName>)nativePtr {\n return [[$objcName alloc] initWithNativePtr:nativePtr];\n}\n').substitute(cName=self.fullName(isCPP=True), objcName=self.objc_name, native_ptr_name=self.native_ptr_name, init_call='init' if self.is_base_class else 'initWithNativePtr:nativePtr'), manualMethodDeclations='', methodImplementations=self.method_implementations.getvalue(), name=self.name, objcName=self.objc_name, cName=self.cname, imports='\n'.join(self.getImports(M)), docs=gen_class_doc(self.docstring, M, self.member_classes, self.member_enums), base=self.base) + +class ArgInfo: + + def __init__(self, arg_tuple): + self.pointer = False + ctype = arg_tuple[0] + if ctype.endswith('*'): + ctype = ctype[:-1] + self.pointer = True + self.ctype = ctype + self.name = arg_tuple[1] + self.defval = arg_tuple[2] + self.out = '' + if '/O' in arg_tuple[3]: + self.out = 'O' + if '/IO' in arg_tuple[3]: + self.out = 'IO' + + def __repr__(self): + return Template('ARG $ctype$p $name=$defval').substitute(ctype=self.ctype, p=' *' if self.pointer else '', name=self.name, defval=self.defval) + +class FuncInfo(GeneralInfo): + + def __init__(self, decl, module, namespaces=[]): + GeneralInfo.__init__(self, 'func', decl, namespaces) + self.cname = get_cname(decl[0]) + nested_type = self.classpath.find('.') != -1 + self.objc_name = self.name if not nested_type else self.classpath.replace('.', '') + self.classname = self.classname if not nested_type else self.classpath.replace('.', '_') + self.swift_name = self.name + self.cv_name = self.fullName(isCPP=True) + self.isconstructor = self.name == self.classname + if '[' in self.name: + self.objc_name = 'getelem' + if self.namespace in namespaces_dict: + self.objc_name = '%s_%s' % (namespaces_dict[self.namespace], self.objc_name) + self.swift_name = '%s_%s' % (namespaces_dict[self.namespace], self.swift_name) + for m in decl[2]: + if m.startswith('='): + self.objc_name = m[1:] + self.static = ['', 'static']['/S' in decl[2]] + self.ctype = re.sub('^CvTermCriteria', 'TermCriteria', decl[1] or '') + self.args = [] + func_fix_map = func_arg_fix.get(self.classname or module, {}).get(self.objc_name, {}) + header_fixes = header_fix.get(self.classname or module, {}).get(self.objc_name, {}) + self.prolog = header_fixes.get('prolog', None) + self.epilog = header_fixes.get('epilog', None) + for a in decl[3]: + arg = a[:] + arg_fix_map = func_fix_map.get(arg[1], {}) + arg[0] = arg_fix_map.get('ctype', arg[0]) + arg[2] = arg_fix_map.get('defval', arg[2]) + arg[3] = arg_fix_map.get('attrib', arg[3]) + self.args.append(ArgInfo(arg)) + if type_complete(self.args, self.ctype): + func_fix_map = func_arg_fix.get(self.classname or module, {}).get(self.signature(self.args), {}) + name_fix_map = func_fix_map.get(self.name, {}) + self.objc_name = name_fix_map.get('name', self.objc_name) + self.swift_name = name_fix_map.get('swift_name', self.swift_name) + for arg in self.args: + arg_fix_map = func_fix_map.get(arg.name, {}) + arg.ctype = arg_fix_map.get('ctype', arg.ctype) + arg.defval = arg_fix_map.get('defval', arg.defval) + arg.name = arg_fix_map.get('name', arg.name) + + def __repr__(self): + return Template('FUNC <$ctype $namespace.$classpath.$name $args>').substitute(**self.__dict__) + + def __lt__(self, other): + return self.__repr__() < other.__repr__() + + def signature(self, args): + objc_args = build_objc_args(args) + return '(' + type_dict[self.ctype]['objc_type'] + ')' + self.objc_name + ' '.join(objc_args) + +def type_complete(args, ctype): + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + return False + if ctype not in type_dict: + return False + return True + +def build_objc_args(args): + objc_args = [] + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + objc_type = type_dict[a.ctype]['objc_type'] + if 'v_type' in type_dict[a.ctype]: + if 'O' in a.out: + objc_type = 'NSMutableArray<' + objc_type + '>*' + else: + objc_type = 'NSArray<' + objc_type + '>*' + elif 'v_v_type' in type_dict[a.ctype]: + if 'O' in a.out: + objc_type = 'NSMutableArray*>*' + else: + objc_type = 'NSArray*>*' + if a.out and type_dict[a.ctype].get('out_type', ''): + objc_type = type_dict[a.ctype]['out_type'] + objc_args.append((a.name if len(objc_args) > 0 else '') + ':(' + objc_type + ')' + a.name) + return objc_args + +def build_objc_method_name(args): + objc_method_name = '' + for a in args[1:]: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + objc_method_name += a.name + ':' + return objc_method_name + +def get_swift_type(ctype): + has_swift_type = 'swift_type' in type_dict[ctype] + swift_type = type_dict[ctype]['swift_type'] if has_swift_type else type_dict[ctype]['objc_type'] + if swift_type[-1:] == '*': + swift_type = swift_type[:-1] + if not has_swift_type: + if 'v_type' in type_dict[ctype]: + swift_type = '[' + swift_type + ']' + elif 'v_v_type' in type_dict[ctype]: + swift_type = '[[' + swift_type + ']]' + return swift_type + +def build_swift_extension_decl(name, args, constructor, static, ret_type): + extension_decl = '@nonobjc ' + ('class ' if static else '') + ('func ' + name if not constructor else 'convenience init') + '(' + swift_args = [] + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + swift_type = get_swift_type(a.ctype) + if 'O' in a.out: + if type_dict[a.ctype].get('primitive_type', False): + swift_type = 'UnsafeMutablePointer<' + swift_type + '>' + elif 'v_type' in type_dict[a.ctype] or 'v_v_type' in type_dict[a.ctype] or type_dict[a.ctype].get('primitive_vector', False) or type_dict[a.ctype].get('primitive_vector_vector', False): + swift_type = 'inout ' + swift_type + swift_args.append(a.name + ': ' + swift_type) + extension_decl += ', '.join(swift_args) + ')' + if ret_type: + extension_decl += ' -> ' + get_swift_type(ret_type) + return extension_decl + +def extension_arg(a): + return a.ctype in type_dict and (type_dict[a.ctype].get('primitive_vector', False) or type_dict[a.ctype].get('primitive_vector_vector', False) or (('v_type' in type_dict[a.ctype] or 'v_v_type' in type_dict[a.ctype]) and 'O' in a.out)) + +def extension_tmp_arg(a): + if a.ctype in type_dict: + if type_dict[a.ctype].get('primitive_vector', False) or type_dict[a.ctype].get('primitive_vector_vector', False): + return a.name + 'Vector' + elif ('v_type' in type_dict[a.ctype] or 'v_v_type' in type_dict[a.ctype]) and 'O' in a.out: + return a.name + 'Array' + return a.name + +def make_swift_extension(args): + for a in args: + if extension_arg(a): + return True + return False + +def build_swift_signature(args): + swift_signature = '' + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + swift_signature += a.name + ':' + return swift_signature + +def build_unrefined_call(name, args, constructor, static, classname, has_ret): + swift_refine_call = ('let ret = ' if has_ret and (not constructor) else '') + (make_objcname(classname) + '.' if static else '') + (name if not constructor else 'self.init') + call_args = [] + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + call_args.append(a.name + ': ' + extension_tmp_arg(a)) + swift_refine_call += '(' + ', '.join(call_args) + ')' + return swift_refine_call + +def build_swift_logues(args): + prologue = [] + epilogue = [] + for a in args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + if not a.ctype: + continue + if a.ctype in type_dict: + if type_dict[a.ctype].get('primitive_vector', False): + prologue.append('let ' + extension_tmp_arg(a) + ' = ' + type_dict[a.ctype]['objc_type'][:-1] + '(' + a.name + ')') + if 'O' in a.out: + unsigned = type_dict[a.ctype].get('unsigned', False) + array_prop = 'array' if not unsigned else 'unsignedArray' + epilogue.append(a.name + '.removeAll()') + epilogue.append(a.name + '.append(contentsOf: ' + extension_tmp_arg(a) + '.' + array_prop + ')') + elif type_dict[a.ctype].get('primitive_vector_vector', False): + if not 'O' in a.out: + prologue.append('let ' + extension_tmp_arg(a) + ' = ' + a.name + '.map {' + type_dict[a.ctype]['objc_type'][:-1] + '($0) }') + else: + prologue.append('let ' + extension_tmp_arg(a) + ' = NSMutableArray(array: ' + a.name + '.map {' + type_dict[a.ctype]['objc_type'][:-1] + '($0) })') + epilogue.append(a.name + '.removeAll()') + epilogue.append(a.name + '.append(contentsOf: ' + extension_tmp_arg(a) + '.map { ($.0 as! ' + type_dict[a.ctype]['objc_type'][:-1] + ').array })') + elif ('v_type' in type_dict[a.ctype] or 'v_v_type' in type_dict[a.ctype]) and 'O' in a.out: + prologue.append('let ' + extension_tmp_arg(a) + ' = NSMutableArray(array: ' + a.name + ')') + epilogue.append(a.name + '.removeAll()') + epilogue.append(a.name + '.append(contentsOf: ' + extension_tmp_arg(a) + ' as! ' + get_swift_type(a.ctype) + ')') + return (prologue, epilogue) + +def add_method_to_dict(class_name, fi): + static = fi.static if fi.classname else True + if (class_name, fi.objc_name) not in method_dict: + objc_method_name = ('+' if static else '-') + fi.objc_name + ':' + build_objc_method_name(fi.args) + method_dict[class_name, fi.objc_name] = objc_method_name + +def see_lookup(objc_class, see): + semi_colon = see.find('::') + see_class = see[:semi_colon] if semi_colon > 0 else objc_class + see_method = see[semi_colon + 2:] if semi_colon != -1 else see + if (see_class, see_method) in method_dict: + method = method_dict[see_class, see_method] + if see_class == objc_class: + return method + else: + return ('-' if method[0] == '-' else '') + '[' + see_class + ' ' + method[1:] + ']' + else: + return see + +class ObjectiveCWrapperGenerator(object): + + def __init__(self): + self.header_files = [] + self.clear() + + def clear(self): + self.namespaces = ['cv'] + mat_class_info = ClassInfo(['class Mat', '', [], []], self.namespaces) + mat_class_info.namespace = 'cv' + self.classes = {'Mat': mat_class_info} + self.classes['Mat'].namespace = 'cv' + self.module = '' + self.Module = '' + self.extension_implementations = None + self.ported_func_list = [] + self.skipped_func_list = [] + self.def_args_hist = {} + + def add_class(self, decl): + classinfo = ClassInfo(decl, namespaces=self.namespaces) + if classinfo.name in class_ignore_list: + logging.info('ignored: %s', classinfo) + return None + if classinfo.name != self.Module: + self.classes[self.Module].member_classes.append(classinfo.objc_name) + name = classinfo.cname + if self.isWrapped(name) and (not classinfo.base): + logging.warning('duplicated: %s', classinfo) + return None + if name in self.classes: + if self.classes[name].symbol_id != classinfo.symbol_id: + logging.warning('duplicated under new id: {} (was {})'.format(classinfo.symbol_id, self.classes[name].symbol_id)) + return None + self.classes[name] = classinfo + if name in type_dict and (not classinfo.base): + logging.warning('duplicated: %s', classinfo) + return None + if name != self.Module: + type_dict.setdefault(name, {}).update({'objc_type': classinfo.objc_name + '*', 'from_cpp': '[' + classinfo.objc_name + ' fromNative:%(n)s]', 'to_cpp': '*(%(n)s.' + classinfo.native_ptr_name + ')'}) + if name in missing_consts: + if 'public' in missing_consts[name]: + for (n, val) in missing_consts[name]['public']: + classinfo.consts.append(ConstInfo([n, val], addedManually=True)) + for p in decl[3]: + classinfo.props.append(ClassPropInfo(p)) + if name != self.Module: + type_dict.setdefault('Ptr_' + name, {}).update({'objc_type': classinfo.objc_name + '*', 'c_type': name, 'real_c_type': classinfo.real_cname, 'to_cpp': '%(n)s.' + classinfo.native_ptr_name, 'from_cpp': '[' + name + ' fromNative:%(n)s]'}) + logging.info('ok: class %s, name: %s, base: %s', classinfo, name, classinfo.base) + return classinfo + + def add_const(self, decl, scope=None, enumType=None): + constinfo = ConstInfo(decl, namespaces=self.namespaces, enumType=enumType) + if constinfo.isIgnored(): + logging.info('ignored: %s', constinfo) + else: + objc_type = enumType.rsplit('.', 1)[-1] if enumType else '' + if constinfo.enumType and constinfo.classpath: + new_name = constinfo.classname + '_' + constinfo.name + const_fix.setdefault(constinfo.classpath, {}).setdefault(objc_type, {})[constinfo.name] = new_name + constinfo.swift_name = constinfo.name + constinfo.name = new_name + logging.info('use outer class prefix: %s', constinfo) + if constinfo.classpath in const_fix and objc_type in const_fix[constinfo.classpath]: + fixed_consts = const_fix[constinfo.classpath][objc_type] + if constinfo.name in fixed_consts: + fixed_const = fixed_consts[constinfo.name] + constinfo.name = fixed_const + constinfo.cname = fixed_const + if constinfo.value in fixed_consts: + constinfo.value = fixed_consts[constinfo.value] + if not self.isWrapped(constinfo.classname): + logging.info('class not found: %s', constinfo) + if not constinfo.name.startswith(constinfo.classname + '_'): + constinfo.swift_name = constinfo.name + constinfo.name = constinfo.classname + '_' + constinfo.name + constinfo.classname = '' + ci = self.getClass(constinfo.classname) + duplicate = ci.getConst(constinfo.name) + if duplicate: + if duplicate.addedManually: + logging.info('manual: %s', constinfo) + else: + logging.warning('duplicated: %s', constinfo) + else: + ci.addConst(constinfo) + logging.info('ok: %s', constinfo) + + def add_enum(self, decl): + enumType = decl[0].rsplit(' ', 1)[1] + if enumType.endswith(''): + enumType = None + else: + ctype = normalize_class_name(enumType) + constinfo = ConstInfo(decl[3][0], namespaces=self.namespaces, enumType=enumType) + objc_type = enumType.rsplit('.', 1)[-1] + if objc_type in enum_ignore_list: + return + if constinfo.classname in enum_fix: + objc_type = enum_fix[constinfo.classname].get(objc_type, objc_type) + import_module = constinfo.classname if constinfo.classname and constinfo.classname != objc_type else self.Module + type_dict[ctype] = {'cast_from': 'int', 'cast_to': get_cname(enumType), 'objc_type': objc_type, 'is_enum': True, 'import_module': import_module, 'from_cpp': '(' + objc_type + ')%(n)s'} + type_dict[objc_type] = {'cast_to': get_cname(enumType), 'objc_type': objc_type, 'is_enum': True, 'import_module': import_module, 'from_cpp': '(' + objc_type + ')%(n)s'} + self.classes[self.Module].member_enums.append(objc_type) + const_decls = decl[3] + for decl in const_decls: + self.add_const(decl, self.Module, enumType) + + def add_func(self, decl): + fi = FuncInfo(decl, self.Module, namespaces=self.namespaces) + classname = fi.classname or self.Module + if classname in class_ignore_list: + logging.info('ignored: %s', fi) + elif classname in ManualFuncs and fi.objc_name in ManualFuncs[classname]: + logging.info('manual: %s', fi) + if 'objc_method_name' in ManualFuncs[classname][fi.objc_name]: + method_dict[classname, fi.objc_name] = ManualFuncs[classname][fi.objc_name]['objc_method_name'] + elif not self.isWrapped(classname): + logging.warning('not found: %s', fi) + else: + ci = self.getClass(classname) + if ci.symbol_id != fi.symbol_id[0:fi.symbol_id.rfind('.')] and ci.symbol_id != self.Module: + logging.warning('SKIP: mismatched class: {} (class: {})'.format(fi.symbol_id, ci.symbol_id)) + return + ci.addMethod(fi) + logging.info('ok: %s', fi) + cnt = len([a for a in fi.args if a.defval]) + self.def_args_hist[cnt] = self.def_args_hist.get(cnt, 0) + 1 + add_method_to_dict(classname, fi) + + def save(self, path, buf): + global total_files, updated_files + if len(buf) == 0: + return + total_files += 1 + if os.path.exists(path): + with open(path, 'rt') as f: + content = f.read() + if content == buf: + return + with codecs.open(path, 'w', 'utf-8') as f: + f.write(buf) + updated_files += 1 + + def get_namespace_prefix(self, cname): + namespace = self.classes[cname].namespace if cname in self.classes else 'cv' + return namespace.replace('.', '::') + '::' + + def gen(self, srcfiles, module, output_path, output_objc_path, common_headers, manual_classes): + self.clear() + self.module = module + self.objcmodule = make_objcmodule(module) + self.Module = module.capitalize() + extension_implementations = StringIO() + extension_signatures = [] + parser = hdr_parser.CppHeaderParser(generate_umat_decls=False) + module_ci = self.add_class(['class ' + self.Module, '', [], []]) + module_ci.header_import = module + '.hpp' + includes = [] + for hdr in common_headers: + logging.info('\n===== Common header : %s =====', hdr) + includes.append(header_import(hdr)) + for hdr in srcfiles: + decls = parser.parse(hdr) + self.namespaces = sorted(parser.namespaces) + logging.info('\n\n===== Header: %s =====', hdr) + logging.info('Namespaces: %s', sorted(parser.namespaces)) + if decls: + includes.append(header_import(hdr)) + else: + logging.info('Ignore header: %s', hdr) + for decl in decls: + logging.info('\n--- Incoming ---\n%s', pformat(decl[:5], 4)) + name = decl[0] + try: + if name.startswith('struct') or name.startswith('class'): + ci = self.add_class(decl) + if ci: + ci.header_import = header_import(hdr) + elif name.startswith('const'): + self.add_const(decl) + elif name.startswith('enum'): + self.add_enum(decl) + else: + self.add_func(decl) + except SkipSymbolException as e: + logging.info('SKIP: {} due to {}'.format(name, e)) + self.classes[self.Module].member_classes += manual_classes + logging.info('\n\n===== Generating... =====') + package_path = os.path.join(output_objc_path, self.objcmodule) + mkdir_p(package_path) + extension_file = '%s/%sExt.swift' % (package_path, make_objcname(self.Module)) + for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id): + if ci.name == 'Mat': + continue + ci.initCodeStreams(self.Module) + self.gen_class(ci, self.module, extension_implementations, extension_signatures) + classObjcHeaderCode = ci.generateObjcHeaderCode(self.module, self.Module, ci.objc_name) + objc_mangled_name = make_objcname(ci.objc_name) + header_file = '%s/%s.h' % (package_path, objc_mangled_name) + self.save(header_file, classObjcHeaderCode) + self.header_files.append(header_file) + classObjcBodyCode = ci.generateObjcBodyCode(self.module, self.Module) + self.save('%s/%s.mm' % (package_path, objc_mangled_name), classObjcBodyCode) + ci.cleanupCodeStreams() + self.save(extension_file, extension_implementations.getvalue()) + extension_implementations.close() + self.save(os.path.join(output_path, self.objcmodule + '.txt'), self.makeReport()) + + def makeReport(self): + report = StringIO() + total_count = len(self.ported_func_list) + len(self.skipped_func_list) + report.write('PORTED FUNCs LIST (%i of %i):\n\n' % (len(self.ported_func_list), total_count)) + report.write('\n'.join(self.ported_func_list)) + report.write('\n\nSKIPPED FUNCs LIST (%i of %i):\n\n' % (len(self.skipped_func_list), total_count)) + report.write(''.join(self.skipped_func_list)) + for i in sorted(self.def_args_hist.keys()): + report.write('\n%i def args - %i funcs' % (i, self.def_args_hist[i])) + return report.getvalue() + + def fullTypeName(self, t): + if not type_dict[t].get('is_primitive', False) or 'cast_to' in type_dict[t]: + if 'cast_to' in type_dict[t]: + return type_dict[t]['cast_to'] + else: + namespace_prefix = self.get_namespace_prefix(t) + return namespace_prefix + t + else: + return t + + def build_objc2cv_prologue(self, prologue, vector_type, vector_full_type, objc_type, vector_name, array_name): + if not (vector_type in type_dict and 'to_cpp' in type_dict[vector_type] and (type_dict[vector_type]['to_cpp'] != '%(n)s.nativeRef')): + prologue.append('OBJC2CV(' + vector_full_type + ', ' + objc_type[:-1] + ', ' + vector_name + ', ' + array_name + ');') + else: + conv_macro = 'CONV_' + array_name + prologue.append('#define ' + conv_macro + '(e) ' + type_dict[vector_type]['to_cpp'] % {'n': 'e'}) + prologue.append('OBJC2CV_CUSTOM(' + vector_full_type + ', ' + objc_type[:-1] + ', ' + vector_name + ', ' + array_name + ', ' + conv_macro + ');') + prologue.append('#undef ' + conv_macro) + + def build_cv2objc_epilogue(self, epilogue, vector_type, vector_full_type, objc_type, vector_name, array_name): + if not (vector_type in type_dict and 'from_cpp' in type_dict[vector_type] and (type_dict[vector_type]['from_cpp'] != '[' + objc_type[:-1] + ' fromNative:%(n)s]')): + epilogue.append('CV2OBJC(' + vector_full_type + ', ' + objc_type[:-1] + ', ' + vector_name + ', ' + array_name + ');') + else: + unconv_macro = 'UNCONV_' + array_name + epilogue.append('#define ' + unconv_macro + '(e) ' + type_dict[vector_type]['from_cpp'] % {'n': 'e'}) + epilogue.append('CV2OBJC_CUSTOM(' + vector_full_type + ', ' + objc_type[:-1] + ', ' + vector_name + ', ' + array_name + ', ' + unconv_macro + ');') + epilogue.append('#undef ' + unconv_macro) + + def gen_func(self, ci, fi, extension_implementations, extension_signatures): + logging.info('%s', fi) + method_declarations = ci.method_declarations + method_implementations = ci.method_implementations + decl_args = [] + for a in fi.args: + s = a.ctype or ' _hidden_ ' + if a.pointer: + s += '*' + elif a.out: + s += '&' + s += ' ' + a.name + if a.defval: + s += ' = ' + str(a.defval) + decl_args.append(s) + c_decl = '%s %s %s(%s)' % (fi.static, fi.ctype, fi.cname, ', '.join(decl_args)) + method_declarations.write('\n//\n// %s\n//\n' % c_decl) + method_implementations.write('\n//\n// %s\n//\n' % c_decl) + if fi.ctype not in type_dict: + msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype + self.skipped_func_list.append(c_decl + '\n' + msg) + method_declarations.write(' ' * 4 + msg) + logging.warning('SKIP:' + c_decl.strip() + '\t due to RET type ' + fi.ctype) + return + for a in fi.args: + if a.ctype not in type_dict: + if not a.defval and a.ctype.endswith('*'): + a.defval = 0 + if a.defval: + a.ctype = '' + continue + msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or 'I') + self.skipped_func_list.append(c_decl + '\n' + msg) + method_declarations.write(msg) + logging.warning('SKIP:' + c_decl.strip() + '\t due to ARG type ' + a.ctype + '/' + (a.out or 'I')) + return + self.ported_func_list.append(c_decl) + args = fi.args[:] + objc_signatures = [] + while True: + cv_args = [] + prologue = [] + epilogue = [] + if fi.ctype: + ci.addImports(fi.ctype, False) + for a in args: + if not 'v_type' in type_dict[a.ctype] and (not 'v_v_type' in type_dict[a.ctype]): + cast = '(' + type_dict[a.ctype]['cast_to'] + ')' if 'cast_to' in type_dict[a.ctype] else '' + cv_name = type_dict[a.ctype].get('to_cpp', cast + '%(n)s') if a.ctype else a.defval + if a.pointer and (not cv_name == '0'): + cv_name = '&(' + cv_name + ')' + if 'O' in a.out and type_dict[a.ctype].get('out_type', ''): + cv_name = type_dict[a.ctype].get('out_type_ptr' if a.pointer else 'out_type_ref', '%(n)s') + cv_args.append(type_dict[a.ctype].get('cv_name', cv_name) % {'n': a.name}) + if not a.ctype: + continue + ci.addImports(a.ctype, 'O' in a.out) + if 'v_type' in type_dict[a.ctype]: + vector_cpp_type = type_dict[a.ctype]['v_type'] + objc_type = type_dict[a.ctype]['objc_type'] + has_namespace = vector_cpp_type.find('::') != -1 + ci.addImports(a.ctype, False) + vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type + vector_cpp_name = a.name + 'Vector' + cv_args.append(vector_cpp_name) + self.build_objc2cv_prologue(prologue, vector_cpp_type, vector_full_cpp_type, objc_type, vector_cpp_name, a.name) + if 'O' in a.out: + self.build_cv2objc_epilogue(epilogue, vector_cpp_type, vector_full_cpp_type, objc_type, vector_cpp_name, a.name) + if 'v_v_type' in type_dict[a.ctype]: + vector_cpp_type = type_dict[a.ctype]['v_v_type'] + objc_type = type_dict[a.ctype]['objc_type'] + ci.addImports(a.ctype, False) + vector_full_cpp_type = self.fullTypeName(vector_cpp_type) + vector_cpp_name = a.name + 'Vector2' + cv_args.append(vector_cpp_name) + prologue.append('OBJC2CV2(' + vector_full_cpp_type + ', ' + objc_type[:-1] + ', ' + vector_cpp_name + ', ' + a.name + ');') + if 'O' in a.out: + epilogue.append('CV2OBJC2(' + vector_full_cpp_type + ', ' + objc_type[:-1] + ', ' + vector_cpp_name + ', ' + a.name + ');') + objc_args = build_objc_args(args) + objc_signature = fi.signature(args) + swift_ext = make_swift_extension(args) + logging.info('Objective-C: ' + objc_signature) + if objc_signature in objc_signatures: + if args: + args.pop() + continue + else: + break + if fi.docstring: + lines = fi.docstring.splitlines() + toWrite = [] + for (index, line) in enumerate(lines): + p0 = line.find('@param') + if p0 != -1: + p0 += 7 + p1 = line.find(' ', p0) + p1 = len(line) if p1 == -1 else p1 + name = line[p0:p1] + for arg in args: + if arg.name == name: + toWrite.append(re.sub('\\*\\s*@param ', '* @param ', line)) + break + else: + s0 = line.find('@see') + if s0 != -1: + sees = line[s0 + 5:].split(',') + toWrite.append(line[:s0 + 5] + ', '.join(['`' + see_lookup(ci.objc_name, see.strip()) + '`' for see in sees])) + else: + toWrite.append(line) + for line in toWrite: + method_declarations.write(line + '\n') + ret_type = fi.ctype + if fi.ctype.endswith('*'): + ret_type = ret_type[:-1] + ret_val = self.fullTypeName(fi.ctype) + ' retVal = ' + ret = 'return retVal;' + tail = '' + constructor = False + if 'v_type' in type_dict[ret_type]: + objc_type = type_dict[ret_type]['objc_type'] + vector_type = type_dict[ret_type]['v_type'] + full_cpp_type = (self.get_namespace_prefix(vector_type) if vector_type.find('::') == -1 else '') + vector_type + prologue.append('NSMutableArray<' + objc_type + '>* retVal = [NSMutableArray new];') + ret_val = 'std::vector<' + full_cpp_type + '> retValVector = ' + self.build_cv2objc_epilogue(epilogue, vector_type, full_cpp_type, objc_type, 'retValVector', 'retVal') + elif 'v_v_type' in type_dict[ret_type]: + objc_type = type_dict[ret_type]['objc_type'] + cpp_type = type_dict[ret_type]['v_v_type'] + if cpp_type.find('::') == -1: + cpp_type = self.get_namespace_prefix(cpp_type) + cpp_type + prologue.append('NSMutableArray*>* retVal = [NSMutableArray new];') + ret_val = 'std::vector< std::vector<' + cpp_type + '> > retValVector = ' + epilogue.append('CV2OBJC2(' + cpp_type + ', ' + objc_type[:-1] + ', retValVector, retVal);') + elif ret_type.startswith('Ptr_'): + cpp_type = type_dict[ret_type]['c_type'] + real_cpp_type = type_dict[ret_type].get('real_c_type', cpp_type) + namespace_prefix = self.get_namespace_prefix(cpp_type) + ret_val = 'cv::Ptr<' + namespace_prefix + real_cpp_type + '> retVal = ' + ret = 'return [' + type_dict[ret_type]['objc_type'][:-1] + ' fromNative:retVal];' + elif ret_type == 'void': + ret_val = '' + ret = '' + elif ret_type == '': + constructor = True + ret_val = 'return [self initWithNativePtr:cv::Ptr<' + fi.fullClass(isCPP=True) + '>(new ' + tail = ')]' + ret = '' + elif self.isWrapped(ret_type): + namespace_prefix = self.get_namespace_prefix(ret_type) + ret_val = 'cv::Ptr<' + namespace_prefix + ret_type + '> retVal = new ' + namespace_prefix + ret_type + '(' + tail = ')' + ret_type_dict = type_dict[ret_type] + from_cpp = ret_type_dict['from_cpp_ptr'] if 'from_cpp_ptr' in ret_type_dict else ret_type_dict['from_cpp'] + ret = 'return ' + from_cpp % {'n': 'retVal'} + ';' + elif 'from_cpp' in type_dict[ret_type]: + ret = 'return ' + type_dict[ret_type]['from_cpp'] % {'n': 'retVal'} + ';' + static = fi.static if fi.classname else True + objc_ret_type = type_dict[fi.ctype]['objc_type'] if type_dict[fi.ctype]['objc_type'] else 'void' if not constructor else 'instancetype' + if 'v_type' in type_dict[ret_type]: + objc_ret_type = 'NSArray<' + objc_ret_type + '>*' + elif 'v_v_type' in type_dict[ret_type]: + objc_ret_type = 'NSArray*>*' + prototype = Template('$static ($objc_ret_type)$objc_name$objc_args').substitute(static='+' if static else '-', objc_ret_type=objc_ret_type, objc_args=' '.join(objc_args), objc_name=fi.objc_name if not constructor else 'init' + ('With' + (args[0].name[0].upper() + args[0].name[1:]) if len(args) > 0 else '')) + if fi.prolog is not None: + method_declarations.write('\n%s\n\n' % fi.prolog) + method_declarations.write(Template('$prototype$swift_name$deprecation_decl;\n\n').substitute(prototype=prototype, swift_name=' NS_SWIFT_NAME(' + fi.swift_name + '(' + build_swift_signature(args) + '))' if not constructor else '', deprecation_decl=' DEPRECATED_ATTRIBUTE' if fi.deprecated else '')) + if fi.epilog is not None: + method_declarations.write('%s\n\n' % fi.epilog) + method_implementations.write(Template('$prototype {$prologue\n $ret_val$obj_deref$cv_name($cv_args)$tail;$epilogue$ret\n}\n\n').substitute(prototype=prototype, ret='\n ' + ret if ret else '', ret_val=ret_val, prologue='\n ' + '\n '.join(prologue) if prologue else '', epilogue='\n ' + '\n '.join(epilogue) if epilogue else '', static='+' if static else '-', obj_deref='self.' + ci.native_ptr_name + '->' if not static and (not constructor) else '', cv_name=fi.cv_name if static else fi.fullClass(isCPP=True) if constructor else fi.name, cv_args=', '.join(cv_args), tail=tail)) + if swift_ext: + prototype = build_swift_extension_decl(fi.swift_name, args, constructor, static, ret_type) + if not (ci.name, prototype) in extension_signatures and (not (ci.base, prototype) in extension_signatures): + (pro, epi) = build_swift_logues(args) + extension_implementations.write(Template('public extension $classname {\n $deprecation_decl$prototype {\n$prologue\n$unrefined_call$epilogue$ret\n }\n}\n\n').substitute(classname=make_objcname(ci.name), deprecation_decl='@available(*, deprecated)\n ' if fi.deprecated else '', prototype=prototype, prologue=' ' + '\n '.join(pro), unrefined_call=' ' + build_unrefined_call(fi.swift_name, args, constructor, static, ci.name, ret_type is not None and ret_type != 'void'), epilogue='\n ' + '\n '.join(epi) if len(epi) > 0 else '', ret='\n return ret' if ret_type is not None and ret_type != 'void' and (not constructor) else '')) + extension_signatures.append((ci.name, prototype)) + objc_signatures.append(objc_signature) + if args and args[-1].defval: + args.pop() + else: + break + + def gen_class(self, ci, module, extension_implementations, extension_signatures): + logging.info('%s', ci) + additional_imports = [] + if module in AdditionalImports: + if '*' in AdditionalImports[module]: + additional_imports += AdditionalImports[module]['*'] + if ci.name in AdditionalImports[module]: + additional_imports += AdditionalImports[module][ci.name] + if hasattr(ci, 'header_import'): + h = '"{}"'.format(ci.header_import) + if not h in additional_imports: + additional_imports.append(h) + h = '"{}.hpp"'.format(module) + if h in additional_imports: + additional_imports.remove(h) + h = '"opencv2/{}.hpp"'.format(module) + if not h in additional_imports: + additional_imports.insert(0, h) + if additional_imports: + ci.additionalImports.write('\n'.join(['#import %s' % make_objcname(h) for h in additional_imports])) + wrote_consts_pragma = False + consts_map = {c.name: c for c in ci.private_consts} + consts_map.update({c.name: c for c in ci.consts}) + + def const_value(v): + if v in consts_map: + target = consts_map[v] + assert target.value != v + return const_value(target.value) + return v + if ci.consts: + enumTypes = set([c.enumType for c in ci.consts]) + grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} + for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ''): + consts = grouped_consts[typeName] + logging.info('%s', consts) + if typeName: + typeNameShort = typeName.rsplit('.', 1)[-1] + if ci.cname in enum_fix: + typeNameShort = enum_fix[ci.cname].get(typeNameShort, typeNameShort) + ci.enum_declarations.write('\n// C++: enum {1} ({2})\ntypedef NS_ENUM(int, {1}) {{\n {0}\n}};\n\n'.format(',\n '.join(['%s = %s' % (c.name + (' NS_SWIFT_NAME(' + c.swift_name + ')' if c.swift_name else ''), c.value) for c in consts]), typeNameShort, typeName)) + else: + if not wrote_consts_pragma: + ci.method_declarations.write('#pragma mark - Class Constants\n\n') + wrote_consts_pragma = True + ci.method_declarations.write('\n{0}\n\n'.format('\n'.join(['@property (class, readonly) int %s NS_SWIFT_NAME(%s);' % (c.name, c.name) for c in consts]))) + declared_consts = [] + match_alphabet = re.compile('[a-zA-Z]') + for c in consts: + value = str(c.value) + if match_alphabet.search(value): + for declared_const in sorted(declared_consts, key=len, reverse=True): + regex = re.compile('(?' if not ci.is_base_class else 'self.nativePtr->' + if 'v_type' in type_data: + vector_cpp_type = type_data['v_type'] + has_namespace = vector_cpp_type.find('::') != -1 + vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type + ret_val = 'std::vector<' + vector_full_cpp_type + '> retValVector = ' + ci.method_implementations.write('-(NSArray<' + objc_type + '>*)' + pi.name + ' {\n') + ci.method_implementations.write('\tNSMutableArray<' + objc_type + '>* retVal = [NSMutableArray new];\n') + ci.method_implementations.write('\t' + ret_val + ptr_ref + pi.name + ';\n') + epilogue = [] + self.build_cv2objc_epilogue(epilogue, vector_cpp_type, vector_full_cpp_type, objc_type, 'retValVector', 'retVal') + ci.method_implementations.write('\t' + '\n\t'.join(epilogue) + '\n') + ci.method_implementations.write('\treturn retVal;\n}\n\n') + elif 'v_v_type' in type_data: + vector_cpp_type = type_data['v_v_type'] + has_namespace = vector_cpp_type.find('::') != -1 + vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type + ret_val = 'std::vector> retValVectorVector = ' + ci.method_implementations.write('-(NSArray*>*)' + pi.name + ' {\n') + ci.method_implementations.write('\tNSMutableArray*>* retVal = [NSMutableArray new];\n') + ci.method_implementations.write('\t' + ret_val + ptr_ref + pi.name + ';\n') + ci.method_implementations.write('\tCV2OBJC2(' + vector_full_cpp_type + ', ' + objc_type[:-1] + ', retValVectorVector, retVal);\n') + ci.method_implementations.write('\treturn retVal;\n}\n\n') + elif self.isWrapped(pi.ctype): + namespace_prefix = self.get_namespace_prefix(pi.ctype) + ci.method_implementations.write('-(' + objc_type + ')' + pi.name + ' {\n') + ci.method_implementations.write('\tcv::Ptr<' + namespace_prefix + pi.ctype + '> retVal = new ' + namespace_prefix + pi.ctype + '(' + ptr_ref + pi.name + ');\n') + from_cpp = type_data['from_cpp_ptr'] if 'from_cpp_ptr' in type_data else type_data['from_cpp'] + ci.method_implementations.write('\treturn ' + from_cpp % {'n': 'retVal'} + ';\n}\n\n') + else: + from_cpp = type_data.get('from_cpp', '%(n)s') + retVal = from_cpp % {'n': ptr_ref + pi.name} + ci.method_implementations.write('-(' + objc_type + ')' + pi.name + ' {\n\treturn ' + retVal + ';\n}\n\n') + if pi.rw: + if 'v_type' in type_data: + vector_cpp_type = type_data['v_type'] + has_namespace = vector_cpp_type.find('::') != -1 + vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type + ci.method_implementations.write('-(void)set' + pi.name[0].upper() + pi.name[1:] + ':(NSArray<' + objc_type + '>*)' + pi.name + '{\n') + prologue = [] + self.build_objc2cv_prologue(prologue, vector_cpp_type, vector_full_cpp_type, objc_type, 'valVector', pi.name) + ci.method_implementations.write('\t' + '\n\t'.join(prologue) + '\n') + ci.method_implementations.write('\t' + ptr_ref + pi.name + ' = valVector;\n}\n\n') + else: + to_cpp = type_data.get('to_cpp', '(' + type_data.get('cast_to') + ')%(n)s' if 'cast_to' in type_data else '%(n)s') + val = to_cpp % {'n': pi.name} + ci.method_implementations.write('-(void)set' + pi.name[0].upper() + pi.name[1:] + ':(' + objc_type + ')' + pi.name + ' {\n\t' + ptr_ref + pi.name + ' = ' + val + ';\n}\n\n') + if ci.name in ManualFuncs: + for func in sorted(ManualFuncs[ci.name].keys()): + logging.info('manual function: %s', func) + fn = ManualFuncs[ci.name][func] + ci.method_declarations.write('\n'.join(fn['declaration'])) + ci.method_implementations.write('\n'.join(fn['implementation'])) + + def getClass(self, classname): + return self.classes[classname or self.Module] + + def isWrapped(self, classname): + name = classname or self.Module + return name in self.classes + + def isSmartClass(self, ci): + if ci.smart != None: + return ci.smart + ci.smart = False + if ci.base or ci.name == 'Algorithm': + ci.smart = True + else: + for fi in ci.methods: + if fi.name == 'create': + ci.smart = True + break + return ci.smart + + def smartWrap(self, ci, fullname): + if self.isSmartClass(ci): + return 'Ptr<' + fullname + '>' + return fullname + + def finalize(self, objc_target, output_objc_path, output_objc_build_path): + opencv_header_file = os.path.join(output_objc_path, framework_name + '.h') + opencv_header = '#import \n\n' + opencv_header += '// ! Project version number\nFOUNDATION_EXPORT double ' + framework_name + 'VersionNumber;\n\n' + opencv_header += '// ! Project version string\nFOUNDATION_EXPORT const unsigned char ' + framework_name + 'VersionString[];\n\n' + opencv_header += '\n'.join(['#define AVAILABLE_' + m['name'].upper() for m in config['modules']]) + opencv_header += '\n\n' + opencv_header += '\n'.join(['#import <' + framework_name + '/%s>' % os.path.basename(f) for f in self.header_files]) + self.save(opencv_header_file, opencv_header) + opencv_modulemap_file = os.path.join(output_objc_path, framework_name + '.modulemap') + opencv_modulemap = 'framework module ' + framework_name + ' {\n' + opencv_modulemap += ' umbrella header "' + framework_name + '.h"\n' + opencv_modulemap += '\n'.join([' header "%s"' % os.path.basename(f) for f in self.header_files]) + opencv_modulemap += '\n export *\n module * {export *}\n}\n' + self.save(opencv_modulemap_file, opencv_modulemap) + available_modules = ' '.join(['-DAVAILABLE_' + m['name'].upper() for m in config['modules']]) + cmakelist_template = read_contents(os.path.join(SCRIPT_DIR, 'templates/cmakelists.template')) + cmakelist = Template(cmakelist_template).substitute(modules=';'.join(modules), framework=framework_name, objc_target=objc_target, module_availability_defines=available_modules) + self.save(os.path.join(dstdir, 'CMakeLists.txt'), cmakelist) + mkdir_p(os.path.join(output_objc_build_path, 'framework_build')) + mkdir_p(os.path.join(output_objc_build_path, 'test_build')) + mkdir_p(os.path.join(output_objc_build_path, 'doc_build')) + with open(os.path.join(SCRIPT_DIR, '../doc/README.md')) as readme_in: + readme_body = readme_in.read() + readme_body += '\n\n\n##Modules\n\n' + ', '.join(['`' + m.capitalize() + '`' for m in modules]) + with open(os.path.join(output_objc_build_path, 'doc_build/README.md'), 'w') as readme_out: + readme_out.write(readme_body) + if framework_name != 'OpenCV': + for (dirname, dirs, files) in os.walk(os.path.join(testdir, 'test')): + if dirname.endswith('/resources'): + continue + for filename in files: + filepath = os.path.join(dirname, filename) + with io.open(filepath, encoding='utf-8', errors='ignore') as file: + body = file.read() + body = body.replace('import OpenCV', 'import ' + framework_name) + body = body.replace('#import ', '#import <' + framework_name + '/' + framework_name + '.h>') + with codecs.open(filepath, 'w', 'utf-8') as file: + file.write(body) + +def copy_objc_files(objc_files_dir, objc_base_path, module_path, include=False): + global total_files, updated_files + objc_files = [] + re_filter = re.compile('^.+\\.(h|m|mm|swift)$') + for (root, dirnames, filenames) in os.walk(objc_files_dir): + objc_files += [os.path.join(root, filename) for filename in filenames if re_filter.match(filename)] + objc_files = [f.replace('\\', '/') for f in objc_files] + re_prefix = re.compile('^.+/(.+)\\.(h|m|mm|swift)$') + for objc_file in objc_files: + src = objc_file + m = re_prefix.match(objc_file) + target_fname = m.group(1) + '.' + m.group(2) if m else os.path.basename(objc_file) + dest = os.path.join(objc_base_path, os.path.join(module_path, target_fname)) + mkdir_p(os.path.dirname(dest)) + total_files += 1 + if include and m.group(2) == 'h': + generator.header_files.append(dest) + if not os.path.exists(dest) or os.stat(src).st_mtime - os.stat(dest).st_mtime > 1: + copyfile(src, dest) + updated_files += 1 + return objc_files + +def unescape(str): + return str.replace('<', '<').replace('>', '>').replace('&', '&') + +def escape_underscore(str): + return str.replace('_', '\\_') + +def escape_texttt(str): + return re.sub(re.compile('texttt{(.*?)\\}', re.DOTALL), lambda x: 'texttt{' + escape_underscore(x.group(1)) + '}', str) + +def get_macros(tex): + out = '' + if re.search('\\\\fork\\s*{', tex): + out += '\\newcommand{\\fork}[4]{ \\left\\{ \\begin{array}{l l} #1 & \\text{#2}\\\\\\\\ #3 & \\text{#4}\\\\\\\\ \\end{array} \\right.} ' + if re.search('\\\\vecthreethree\\s*{', tex): + out += '\\newcommand{\\vecthreethree}[9]{ \\begin{bmatrix} #1 & #2 & #3\\\\\\\\ #4 & #5 & #6\\\\\\\\ #7 & #8 & #9 \\end{bmatrix} } ' + return out + +def fix_tex(tex): + macros = get_macros(tex) + fix_escaping = escape_texttt(unescape(tex)) + return macros + fix_escaping + +def sanitize_documentation_string(doc, type): + if type == 'class': + doc = doc.replace('@param ', '') + doc = re.sub(re.compile('`\\$\\$(.*?)\\$\\$`', re.DOTALL), lambda x: '`$$' + fix_tex(x.group(1)) + '$$`', doc) + doc = re.sub(re.compile('\\\\f\\{align\\*\\}\\{?(.*?)\\\\f\\}', re.DOTALL), lambda x: '`$$\\begin{aligned} ' + fix_tex(x.group(1)) + ' \\end{aligned}$$`', doc) + doc = re.sub(re.compile('\\\\f\\{equation\\*\\}\\{(.*?)\\\\f\\}', re.DOTALL), lambda x: '`$$\\begin{aligned} ' + fix_tex(x.group(1)) + ' \\end{aligned}$$`', doc) + doc = re.sub(re.compile('\\\\f\\$(.*?)\\\\f\\$', re.DOTALL), lambda x: '`$$' + fix_tex(x.group(1)) + '$$`', doc) + doc = re.sub(re.compile('\\\\f\\[(.*?)\\\\f\\]', re.DOTALL), lambda x: '`$$' + fix_tex(x.group(1)) + '$$`', doc) + doc = re.sub(re.compile('\\\\f\\{(.*?)\\\\f\\}', re.DOTALL), lambda x: '`$$' + fix_tex(x.group(1)) + '$$`', doc) + doc = doc.replace('@anchor', '').replace('@brief ', '').replace('\\brief ', '').replace('@cite', 'CITE:').replace('@code{.cpp}', '').replace('@code{.txt}', '').replace('@code', '').replace('@copydoc', '').replace('@copybrief', '').replace('@date', '').replace('@defgroup', '').replace('@details ', '').replace('@endcode', '').replace('@endinternal', '').replace('@file', '').replace('@include', 'INCLUDE:').replace('@ingroup', '').replace('@internal', '').replace('@overload', '').replace('@param[in]', '@param').replace('@param[out]', '@param').replace('@ref', 'REF:').replace('@note', 'NOTE:').replace('@returns', '@return').replace('@sa ', '@see ').replace('@snippet', 'SNIPPET:').replace('@todo', 'TODO:') + lines = doc.splitlines() + in_code = False + for (i, line) in enumerate(lines): + if line.find('') != -1: + in_code = False + lines[i] = line.replace('', '') + if in_code: + lines[i] = unescape(line) + if line.find('') != -1: + in_code = True + lines[i] = line.replace('', '') + lines = list([x[x.find('*'):].strip() if x.lstrip().startswith('*') else x for x in lines]) + lines = list(['* ' + x[1:].strip() if x.startswith('*') and x != '*' else x for x in lines]) + lines = list([x if x.startswith('*') else '* ' + x if x and x != '*' else '*' for x in lines]) + hasValues = False + for line in lines: + if line != '*': + hasValues = True + break + return '/**\n ' + '\n '.join(lines) + '\n */' if hasValues else '' +if __name__ == '__main__': + logging.basicConfig(filename='gen_objc.log', format=None, filemode='w', level=logging.INFO) + handler = logging.StreamHandler() + handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING)) + logging.getLogger().addHandler(handler) + import argparse + arg_parser = argparse.ArgumentParser(description='OpenCV Objective-C Wrapper Generator') + arg_parser.add_argument('-p', '--parser', required=True, help='OpenCV header parser') + arg_parser.add_argument('-c', '--config', required=True, help='OpenCV modules config') + arg_parser.add_argument('-t', '--target', required=True, help='Target (either ios or osx or visionos)') + arg_parser.add_argument('-f', '--framework', required=True, help='Framework name') + args = arg_parser.parse_args() + hdr_parser_path = os.path.abspath(args.parser) + if hdr_parser_path.endswith('.py'): + hdr_parser_path = os.path.dirname(hdr_parser_path) + sys.path.append(hdr_parser_path) + import hdr_parser + with open(args.config) as f: + config = json.load(f) + ROOT_DIR = config['rootdir'] + assert os.path.exists(ROOT_DIR) + if 'objc_build_dir' in config: + objc_build_dir = config['objc_build_dir'] + assert os.path.exists(objc_build_dir), objc_build_dir + else: + objc_build_dir = os.getcwd() + dstdir = './gen' + testdir = './test' + objc_base_path = os.path.join(dstdir, 'objc') + mkdir_p(objc_base_path) + objc_test_base_path = testdir + mkdir_p(objc_test_base_path) + copy_objc_files(os.path.join(SCRIPT_DIR, '../test/test'), objc_test_base_path, 'test', False) + copy_objc_files(os.path.join(SCRIPT_DIR, '../test/dummy'), objc_test_base_path, 'dummy', False) + copyfile(os.path.join(SCRIPT_DIR, '../test/cmakelists.template'), os.path.join(objc_test_base_path, 'CMakeLists.txt')) + generator = ObjectiveCWrapperGenerator() + gen_dict_files = [] + framework_name = args.framework + print('Objective-C: Processing OpenCV modules: %d' % len(config['modules'])) + for e in config['modules']: + (module, module_location) = (e['name'], os.path.join(ROOT_DIR, e['location'])) + logging.info('\n=== MODULE: %s (%s) ===\n' % (module, module_location)) + modules.append(module) + module_imports = [] + srcfiles = [] + common_headers = [] + misc_location = os.path.join(module_location, 'misc/objc') + srcfiles_fname = os.path.join(misc_location, 'filelist') + if os.path.exists(srcfiles_fname): + with open(srcfiles_fname) as f: + srcfiles = [os.path.join(module_location, str(l).strip()) for l in f.readlines() if str(l).strip()] + else: + re_bad = re.compile('(private|.inl.hpp$|_inl.hpp$|.detail.hpp$|.details.hpp$|_winrt.hpp$|/cuda/|/legacy/)') + h_files = [] + hpp_files = [] + for (root, dirnames, filenames) in os.walk(os.path.join(module_location, 'include')): + h_files += [os.path.join(root, filename) for filename in fnmatch.filter(filenames, '*.h')] + hpp_files += [os.path.join(root, filename) for filename in fnmatch.filter(filenames, '*.hpp')] + srcfiles = h_files + hpp_files + srcfiles = [f for f in srcfiles if not re_bad.search(f.replace('\\', '/'))] + logging.info('\nFiles (%d):\n%s', len(srcfiles), pformat(srcfiles)) + common_headers_fname = os.path.join(misc_location, 'filelist_common') + if os.path.exists(common_headers_fname): + with open(common_headers_fname) as f: + common_headers = [os.path.join(module_location, str(l).strip()) for l in f.readlines() if str(l).strip()] + logging.info('\nCommon headers (%d):\n%s', len(common_headers), pformat(common_headers)) + gendict_fname = os.path.join(misc_location, 'gen_dict.json') + module_source_map = {} + if os.path.exists(gendict_fname): + with open(gendict_fname) as f: + gen_type_dict = json.load(f) + namespace_ignore_list = gen_type_dict.get('namespace_ignore_list', []) + class_ignore_list += gen_type_dict.get('class_ignore_list', []) + enum_ignore_list += gen_type_dict.get('enum_ignore_list', []) + const_ignore_list += gen_type_dict.get('const_ignore_list', []) + const_private_list += gen_type_dict.get('const_private_list', []) + missing_consts.update(gen_type_dict.get('missing_consts', {})) + type_dict.update(gen_type_dict.get('type_dict', {})) + AdditionalImports[module] = gen_type_dict.get('AdditionalImports', {}) + ManualFuncs.update(gen_type_dict.get('ManualFuncs', {})) + func_arg_fix.update(gen_type_dict.get('func_arg_fix', {})) + header_fix.update(gen_type_dict.get('header_fix', {})) + enum_fix.update(gen_type_dict.get('enum_fix', {})) + const_fix.update(gen_type_dict.get('const_fix', {})) + module_source_map = gen_type_dict.get('SourceMap', {}) + namespaces_dict.update(gen_type_dict.get('namespaces_dict', {})) + module_imports += gen_type_dict.get('module_imports', []) + objc_files_dir = os.path.join(misc_location, 'common') + copied_files = [] + if os.path.exists(objc_files_dir): + copied_files += copy_objc_files(objc_files_dir, objc_base_path, module, True) + target_path = 'macosx' if args.target == 'osx' else module_source_map.get(args.target, args.target) + target_files_dir = os.path.join(misc_location, target_path) + if os.path.exists(target_files_dir): + copied_files += copy_objc_files(target_files_dir, objc_base_path, module, True) + objc_test_files_dir = os.path.join(misc_location, 'test') + if os.path.exists(objc_test_files_dir): + copy_objc_files(objc_test_files_dir, objc_test_base_path, 'test', False) + objc_test_resources_dir = os.path.join(objc_test_files_dir, 'resources') + if os.path.exists(objc_test_resources_dir): + copy_tree(objc_test_resources_dir, os.path.join(objc_test_base_path, 'test', 'resources')) + manual_classes = [x for x in [x[x.rfind('/') + 1:-2] for x in [x for x in copied_files if x.endswith('.h')]] if x in type_dict] + if len(srcfiles) > 0: + generator.gen(srcfiles, module, dstdir, objc_base_path, common_headers, manual_classes) + else: + logging.info('No generated code for module: %s', module) + generator.finalize(args.target, objc_base_path, objc_build_dir) + print('Generated files: %d (updated %d)' % (total_files, updated_files)) + +# File: opencv-master/modules/python/package/cv2/__init__.py +"""""" +import os +import importlib +import sys +__all__ = [] +try: + import numpy + import numpy.core.multiarray +except ImportError: + print('OpenCV bindings requires "numpy" package.') + print('Install it via command:') + print(' pip install numpy') + raise + +def __load_extra_py_code_for_module(base, name, enable_debug_print=False): + module_name = '{}.{}'.format(__name__, name) + export_module_name = '{}.{}'.format(base, name) + native_module = sys.modules.pop(module_name, None) + try: + py_module = importlib.import_module(module_name) + except ImportError as err: + if enable_debug_print: + print("Can't load Python code for module:", module_name, '. Reason:', err) + return False + if base in sys.modules and (not hasattr(sys.modules[base], name)): + setattr(sys.modules[base], name, py_module) + sys.modules[export_module_name] = py_module + if native_module: + setattr(py_module, '_native', native_module) + for (k, v) in filter(lambda kv: not hasattr(py_module, kv[0]), native_module.__dict__.items()): + if enable_debug_print: + print(' symbol({}): {} = {}'.format(name, k, v)) + setattr(py_module, k, v) + return True + +def __collect_extra_submodules(enable_debug_print=False): + + def modules_filter(module): + return all((not module.startswith('_'), not module.startswith('python-'), os.path.isdir(os.path.join(_extra_submodules_init_path, module)))) + if sys.version_info[0] < 3: + if enable_debug_print: + print('Extra submodules is loaded only for Python 3') + return [] + __INIT_FILE_PATH = os.path.abspath(__file__) + _extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH) + return filter(modules_filter, os.listdir(_extra_submodules_init_path)) + +def bootstrap(): + import sys + import copy + save_sys_path = copy.copy(sys.path) + if hasattr(sys, 'OpenCV_LOADER'): + print(sys.path) + raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.') + sys.OpenCV_LOADER = True + DEBUG = False + if hasattr(sys, 'OpenCV_LOADER_DEBUG'): + DEBUG = True + import platform + if DEBUG: + print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system()))) + LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) + PYTHON_EXTENSIONS_PATHS = [] + BINARIES_PATHS = [] + g_vars = globals() + l_vars = locals().copy() + if sys.version_info[:2] < (3, 0): + from .load_config_py2 import exec_file_wrapper + else: + from .load_config_py3 import exec_file_wrapper + + def load_first_config(fnames, required=True): + for fname in fnames: + fpath = os.path.join(LOADER_DIR, fname) + if not os.path.exists(fpath): + if DEBUG: + print('OpenCV loader: config not found, skip: {}'.format(fpath)) + continue + if DEBUG: + print('OpenCV loader: loading config: {}'.format(fpath)) + exec_file_wrapper(fpath, g_vars, l_vars) + return True + if required: + raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames)) + load_first_config(['config.py'], True) + load_first_config(['config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]), 'config-{}.py'.format(sys.version_info[0])], True) + if DEBUG: + print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS']))) + if DEBUG: + print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS']))) + applySysPathWorkaround = False + if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'): + applySysPathWorkaround = True + else: + try: + BASE_DIR = os.path.dirname(LOADER_DIR) + if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR: + applySysPathWorkaround = True + except: + if DEBUG: + print('OpenCV loader: exception during checking workaround for sys.path[0]') + pass + for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']): + sys.path.insert(1 if not applySysPathWorkaround else 0, p) + if os.name == 'nt': + if sys.version_info[:2] >= (3, 8): + for p in l_vars['BINARIES_PATHS']: + try: + os.add_dll_directory(p) + except Exception as e: + if DEBUG: + print('Failed os.add_dll_directory(): ' + str(e)) + pass + os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '') + if DEBUG: + print('OpenCV loader: PATH={}'.format(str(os.environ['PATH']))) + else: + os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '') + if DEBUG: + print('Relink everything from native cv2 module to cv2 package') + py_module = sys.modules.pop('cv2') + native_module = importlib.import_module('cv2') + sys.modules['cv2'] = py_module + setattr(py_module, '_native', native_module) + for (item_name, item) in filter(lambda kv: kv[0] not in ('__file__', '__loader__', '__spec__', '__name__', '__package__'), native_module.__dict__.items()): + if item_name not in g_vars: + g_vars[item_name] = item + sys.path = save_sys_path + try: + del sys.OpenCV_LOADER + except Exception as e: + if DEBUG: + print('Exception during delete OpenCV_LOADER:', e) + if DEBUG: + print('OpenCV loader: binary extension... OK') + for submodule in __collect_extra_submodules(DEBUG): + if __load_extra_py_code_for_module('cv2', submodule, DEBUG): + if DEBUG: + print('Extra Python code for', submodule, 'is loaded') + if DEBUG: + print('OpenCV loader: DONE') +bootstrap() + +# File: opencv-master/modules/python/src2/copy_typings_stubs_on_success.py +import argparse +import warnings +import os +import sys +if sys.version_info >= (3, 8): + from functools import partial + import shutil + copy_tree = partial(shutil.copytree, dirs_exist_ok=True) +else: + from distutils.dir_util import copy_tree + +def main(): + args = parse_arguments() + py_typed_path = os.path.join(args.stubs_dir, 'py.typed') + if not os.path.isfile(py_typed_path): + warnings.warn('{} is missing, it means that typings stubs generation is either failed or has been skipped. Ensure that Python 3.6+ is used for build and there is no warnings during Python source code generation phase.'.format(py_typed_path)) + return + copy_tree(args.stubs_dir, args.output_dir) + +def parse_arguments(): + parser = argparse.ArgumentParser(description='Copies generated typing stubs only when generation succeeded. This is identified by presence of the `py.typed` file inside typing stubs directory.') + parser.add_argument('--stubs_dir', type=str, help='Path to directory containing generated typing stubs file') + parser.add_argument('--output_dir', type=str, help='Path to output directory') + return parser.parse_args() +if __name__ == '__main__': + main() + +# File: opencv-master/modules/python/src2/gen2.py +from __future__ import print_function +import hdr_parser, sys, re +from string import Template +from collections import namedtuple +from itertools import chain +from typing_stubs_generator import TypingStubsGenerator +if sys.version_info[0] >= 3: + from io import StringIO +else: + from cStringIO import StringIO +if sys.version_info >= (3, 6): + from typing_stubs_generation import SymbolName +else: + SymbolName = namedtuple('SymbolName', ('namespaces', 'classes', 'name')) + + def parse_symbol_name(cls, full_symbol_name, known_namespaces): + chunks = full_symbol_name.split('.') + (namespaces, name) = (chunks[:-1], chunks[-1]) + classes = [] + while len(namespaces) > 0 and '.'.join(namespaces) not in known_namespaces: + classes.insert(0, namespaces.pop()) + return cls(tuple(namespaces), tuple(classes), name) + setattr(SymbolName, 'parse', classmethod(parse_symbol_name)) +forbidden_arg_types = ['void*'] +ignored_arg_types = ['RNG*'] +pass_by_val_types = ['Point*', 'Point2f*', 'Rect*', 'String*', 'double*', 'float*', 'int*'] +gen_template_check_self = Template('\n ${cname} * self1 = 0;\n if (!pyopencv_${name}_getp(self, self1))\n return failmsgp("Incorrect type of self (must be \'${name}\' or its derivative)");\n ${pname} _self_ = ${cvt}(self1);\n') +gen_template_call_constructor_prelude = Template('new (&(self->v)) Ptr<$cname>(); // init Ptr with placement new\n if(self) ') +gen_template_call_constructor = Template('self->v.reset(new ${cname}${py_args})') +gen_template_simple_call_constructor_prelude = Template('if(self) ') +gen_template_simple_call_constructor = Template('new (&(self->v)) ${cname}${py_args}') +gen_template_parse_args = Template('const char* keywords[] = { $kw_list, NULL };\n if( PyArg_ParseTupleAndKeywords(py_args, kw, "$fmtspec", (char**)keywords, $parse_arglist)$code_cvt )') +gen_template_func_body = Template('$code_decl\n $code_parse\n {\n ${code_prelude}ERRWRAP2($code_fcall);\n $code_ret;\n }\n') +gen_template_mappable = Template('\n {\n ${mappable} _src;\n if (pyopencv_to_safe(src, _src, info))\n {\n return cv_mappable_to(_src, dst);\n }\n }\n') +gen_template_type_decl = Template('\n// Converter (${name})\n\ntemplate<>\nstruct PyOpenCV_Converter< ${cname} >\n{\n static PyObject* from(const ${cname}& r)\n {\n return pyopencv_${name}_Instance(r);\n }\n static bool to(PyObject* src, ${cname}& dst, const ArgInfo& info)\n {\n if(!src || src == Py_None)\n return true;\n ${cname} * dst_;\n if (pyopencv_${name}_getp(src, dst_))\n {\n dst = *dst_;\n return true;\n }\n ${mappable_code}\n failmsg("Expected ${cname} for argument \'%s\'", info.name);\n return false;\n }\n};\n\n') +gen_template_map_type_cvt = Template('\ntemplate<> bool pyopencv_to(PyObject* src, ${cname}& dst, const ArgInfo& info);\n\n') +gen_template_set_prop_from_map = Template('\n if( PyMapping_HasKeyString(src, (char*)"$propname") )\n {\n tmp = PyMapping_GetItemString(src, (char*)"$propname");\n ok = tmp && pyopencv_to_safe(tmp, dst.$propname, ArgInfo("$propname", 0));\n Py_DECREF(tmp);\n if(!ok) return false;\n }') +gen_template_type_impl = Template('\n// GetSet (${name})\n\n${getset_code}\n\n// Methods (${name})\n\n${methods_code}\n\n// Tables (${name})\n\nstatic PyGetSetDef pyopencv_${name}_getseters[] =\n{${getset_inits}\n {NULL} /* Sentinel */\n};\n\nstatic PyMethodDef pyopencv_${name}_methods[] =\n{\n${methods_inits}\n {NULL, NULL}\n};\n') +gen_template_get_prop = Template('\nstatic PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)\n{\n return pyopencv_from(p->v${access}${member});\n}\n') +gen_template_get_prop_algo = Template('\nstatic PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)\n{\n $cname* _self_ = dynamic_cast<$cname*>(p->v.get());\n if (!_self_)\n return failmsgp("Incorrect type of object (must be \'${name}\' or its derivative)");\n return pyopencv_from(_self_${access}${member});\n}\n') +gen_template_set_prop = Template('\nstatic int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)\n{\n if (!value)\n {\n PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");\n return -1;\n }\n return pyopencv_to_safe(value, p->v${access}${member}, ArgInfo("value", 0)) ? 0 : -1;\n}\n') +gen_template_set_prop_algo = Template('\nstatic int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)\n{\n if (!value)\n {\n PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");\n return -1;\n }\n $cname* _self_ = dynamic_cast<$cname*>(p->v.get());\n if (!_self_)\n {\n failmsgp("Incorrect type of object (must be \'${name}\' or its derivative)");\n return -1;\n }\n return pyopencv_to_safe(value, _self_${access}${member}, ArgInfo("value", 0)) ? 0 : -1;\n}\n') +gen_template_prop_init = Template('\n {(char*)"${export_member_name}", (getter)pyopencv_${name}_get_${member}, NULL, (char*)"${export_member_name}", NULL},') +gen_template_rw_prop_init = Template('\n {(char*)"${export_member_name}", (getter)pyopencv_${name}_get_${member}, (setter)pyopencv_${name}_set_${member}, (char*)"${export_member_name}", NULL},') +gen_template_overloaded_function_call = Template('\n {\n${variant}\n\n pyPopulateArgumentConversionErrors();\n }\n') + +class FormatStrings: + string = 's' + unsigned_char = 'b' + short_int = 'h' + int = 'i' + unsigned_int = 'I' + long = 'l' + unsigned_long = 'k' + long_long = 'L' + unsigned_long_long = 'K' + size_t = 'n' + float = 'f' + double = 'd' + object = 'O' +ArgTypeInfo = namedtuple('ArgTypeInfo', ['atype', 'format_str', 'default_value', 'strict_conversion']) +ArgTypeInfo.__new__.__defaults__ = (False,) +simple_argtype_mapping = {'bool': ArgTypeInfo('bool', FormatStrings.unsigned_char, '0', True), 'size_t': ArgTypeInfo('size_t', FormatStrings.unsigned_long_long, '0', True), 'int': ArgTypeInfo('int', FormatStrings.int, '0', True), 'float': ArgTypeInfo('float', FormatStrings.float, '0.f', True), 'double': ArgTypeInfo('double', FormatStrings.double, '0', True), 'c_string': ArgTypeInfo('char*', FormatStrings.string, '(char*)""'), 'string': ArgTypeInfo('std::string', FormatStrings.object, None, True), 'Stream': ArgTypeInfo('Stream', FormatStrings.object, 'Stream::Null()', True), 'cuda_Stream': ArgTypeInfo('cuda::Stream', FormatStrings.object, 'cuda::Stream::Null()', True), 'cuda_GpuMat': ArgTypeInfo('cuda::GpuMat', FormatStrings.object, 'cuda::GpuMat()', True), 'UMat': ArgTypeInfo('UMat', FormatStrings.object, 'UMat()', True)} +python_reserved_keywords = {'True', 'None', 'False', 'as', 'assert', 'def', 'del', 'elif', 'except', 'exec', 'finally', 'from', 'global', 'import', 'in', 'is', 'lambda', 'nonlocal', 'pass', 'print', 'raise', 'with', 'yield'} + +def normalize_class_name(name): + return re.sub('^cv\\.', '', name).replace('.', '_') + +def get_type_format_string(arg_type_info): + if arg_type_info.strict_conversion: + return FormatStrings.object + else: + return arg_type_info.format_str + +class ClassProp(object): + + def __init__(self, decl): + self.tp = decl[0].replace('*', '_ptr') + self.name = decl[1] + self.default_value = decl[2] + self.readonly = True + if '/RW' in decl[3]: + self.readonly = False + + @property + def export_name(self): + if self.name in python_reserved_keywords: + return self.name + '_' + return self.name + +class ClassInfo(object): + + def __init__(self, name, decl=None, codegen=None): + (self.original_scope_name, self.original_name) = name.rsplit('.', 1) + if codegen: + self.export_scope_name = codegen.get_export_scope_name(self.original_scope_name) + else: + self.export_scope_name = self.original_scope_name + self.export_scope_name = re.sub('^cv\\.?', '', self.export_scope_name) + self.export_name = self.original_name + self.class_id = normalize_class_name(name) + self.cname = name.replace('.', '::') + self.ismap = False + self.is_parameters = False + self.issimple = False + self.isalgorithm = False + self.methods = {} + self.props = [] + self.mappables = [] + self.consts = {} + self.base = None + self.constructor = None + if decl: + bases = decl[1].split()[1:] + if len(bases) > 1: + print('Note: Class %s has more than 1 base class (not supported by Python C extensions)' % (self.cname,)) + print(' Bases: ', ' '.join(bases)) + print(' Only the first base class will be used') + elif len(bases) == 1: + self.base = bases[0].strip(',') + if self.base.startswith('cv::'): + self.base = self.base[4:] + if self.base == 'Algorithm': + self.isalgorithm = True + self.base = self.base.replace('::', '_') + for m in decl[2]: + if m.startswith('='): + self.export_name = m[1:] + elif m == '/Map': + self.ismap = True + elif m == '/Simple': + self.issimple = True + elif m == '/Params': + self.is_parameters = True + self.issimple = True + self.props = [ClassProp(p) for p in decl[3]] + if not self.has_export_alias and self.original_name.startswith('Cv'): + self.export_name = self.export_name[2:] + + @property + def wname(self): + if len(self.export_scope_name) > 0: + return self.export_scope_name.replace('.', '_') + '_' + self.export_name + return self.export_name + + @property + def name(self): + return self.class_id + + @property + def full_export_scope_name(self): + return 'cv.' + self.export_scope_name if len(self.export_scope_name) else 'cv' + + @property + def full_export_name(self): + return self.full_export_scope_name + '.' + self.export_name + + @property + def full_original_name(self): + return self.original_scope_name + '.' + self.original_name + + @property + def has_export_alias(self): + return self.export_name != self.original_name + + def gen_map_code(self, codegen): + all_classes = codegen.classes + code = 'static bool pyopencv_to(PyObject* src, %s& dst, const ArgInfo& info)\n{\n PyObject* tmp;\n bool ok;\n' % self.cname + code += ''.join([gen_template_set_prop_from_map.substitute(propname=p.name, proptype=p.tp) for p in self.props]) + if self.base: + code += '\n return pyopencv_to_safe(src, (%s&)dst, info);\n}\n' % all_classes[self.base].cname + else: + code += '\n return true;\n}\n' + return code + + def gen_code(self, codegen): + all_classes = codegen.classes + if self.ismap: + return self.gen_map_code(codegen) + getset_code = StringIO() + getset_inits = StringIO() + sorted_props = [(p.name, p) for p in self.props] + sorted_props.sort() + access_op = '->' + if self.issimple: + access_op = '.' + for (pname, p) in sorted_props: + if self.isalgorithm: + getset_code.write(gen_template_get_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op)) + else: + getset_code.write(gen_template_get_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op)) + if p.readonly: + getset_inits.write(gen_template_prop_init.substitute(name=self.name, member=pname, export_member_name=p.export_name)) + else: + if self.isalgorithm: + getset_code.write(gen_template_set_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op)) + else: + getset_code.write(gen_template_set_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op)) + getset_inits.write(gen_template_rw_prop_init.substitute(name=self.name, member=pname, export_member_name=p.export_name)) + methods_code = StringIO() + methods_inits = StringIO() + sorted_methods = list(self.methods.items()) + sorted_methods.sort() + if self.constructor is not None: + methods_code.write(self.constructor.gen_code(codegen)) + for (mname, m) in sorted_methods: + methods_code.write(m.gen_code(codegen)) + methods_inits.write(m.get_tab_entry()) + code = gen_template_type_impl.substitute(name=self.name, getset_code=getset_code.getvalue(), getset_inits=getset_inits.getvalue(), methods_code=methods_code.getvalue(), methods_inits=methods_inits.getvalue()) + return code + + def gen_def(self, codegen): + all_classes = codegen.classes + baseptr = 'NoBase' + if self.base and self.base in all_classes: + baseptr = all_classes[self.base].name + constructor_name = '0' + if self.constructor is not None: + constructor_name = self.constructor.get_wrapper_name() + return 'CVPY_TYPE({}, {}, {}, {}, {}, {}, "{}")\n'.format(self.export_name, self.class_id, self.cname if self.issimple else 'Ptr<{}>'.format(self.cname), self.original_name if self.issimple else 'Ptr', baseptr, constructor_name, '.' + self.export_scope_name if len(self.export_scope_name) > 0 else self.export_scope_name) + +def handle_ptr(tp): + if tp.startswith('Ptr_'): + tp = 'Ptr<' + '::'.join(tp.split('_')[1:]) + '>' + return tp + +class ArgInfo(object): + + def __init__(self, atype, name, default_value, modifiers=(), enclosing_arg=None): + self.tp = handle_ptr(atype) + self.name = name + self.defval = default_value + self._modifiers = tuple(modifiers) + self.isarray = False + self.is_smart_ptr = self.tp.startswith('Ptr<') + self.arraylen = 0 + self.arraycvt = None + for m in self._modifiers: + if m.startswith('/A'): + self.isarray = True + self.arraylen = m[2:].strip() + elif m.startswith('/CA'): + self.isarray = True + self.arraycvt = m[2:].strip() + self.py_inputarg = False + self.py_outputarg = False + self.enclosing_arg = enclosing_arg + + def __str__(self): + return 'ArgInfo("{}", tp="{}", default="{}", in={}, out={})'.format(self.name, self.tp, self.defval, self.inputarg, self.outputarg) + + def __repr__(self): + return str(self) + + @property + def export_name(self): + if self.name in python_reserved_keywords: + return self.name + '_' + return self.name + + @property + def nd_mat(self): + return '/ND' in self._modifiers + + @property + def inputarg(self): + return '/O' not in self._modifiers + + @property + def arithm_op_src_arg(self): + return '/AOS' in self._modifiers + + @property + def outputarg(self): + return '/O' in self._modifiers or '/IO' in self._modifiers + + @property + def pathlike(self): + return '/PATH' in self._modifiers + + @property + def returnarg(self): + return self.outputarg + + @property + def isrvalueref(self): + return '/RRef' in self._modifiers + + @property + def full_name(self): + if self.enclosing_arg is None: + return self.name + return self.enclosing_arg.name + '.' + self.name + + def isbig(self): + return self.tp in ['Mat', 'vector_Mat', 'cuda::GpuMat', 'cuda_GpuMat', 'GpuMat', 'vector_GpuMat', 'vector_cuda_GpuMat', 'UMat', 'vector_UMat'] + + def crepr(self): + arg = 1 if self.outputarg else 0 + arg += 2 if self.arithm_op_src_arg else 0 + arg += 4 if self.pathlike else 0 + arg += 8 if self.nd_mat else 0 + return 'ArgInfo("%s", %d)' % (self.name, arg) + +def find_argument_class_info(argument_type, function_namespace, function_class_name, known_classes): + possible_classes = tuple(filter(lambda cls: cls.endswith(argument_type), known_classes)) + if not possible_classes: + return None + if len(possible_classes) == 1: + return known_classes[possible_classes[0]] + if function_class_name: + type_to_match = function_class_name + '_' + argument_type + if type_to_match in possible_classes: + return known_classes[type_to_match] + else: + type_to_match = argument_type + type_to_match = '{}_{}'.format(function_namespace.lstrip('cv.').replace('.', '_'), type_to_match) + if type_to_match in possible_classes: + return known_classes[type_to_match] + if argument_type in possible_classes: + return known_classes[argument_type] + return None + +class FuncVariant(object): + + def __init__(self, namespace, classname, name, decl, isconstructor, known_classes, isphantom=False): + self.name = self.wname = name + self.isconstructor = isconstructor + self.isphantom = isphantom + self.docstring = decl[5] + self.rettype = decl[4] or handle_ptr(decl[1]) + if self.rettype == 'void': + self.rettype = '' + self.args = [] + self.array_counters = {} + for arg_decl in decl[3]: + assert len(arg_decl) == 4, 'ArgInfo contract is violated. Arg declaration should contain:"arg_type", "name", "default_value", "modifiers". Got tuple: {}'.format(arg_decl) + ainfo = ArgInfo(atype=arg_decl[0], name=arg_decl[1], default_value=arg_decl[2], modifiers=arg_decl[3]) + if ainfo.isarray and (not ainfo.arraycvt): + c = ainfo.arraylen + c_arrlist = self.array_counters.get(c, []) + if c_arrlist: + c_arrlist.append(ainfo.name) + else: + self.array_counters[c] = [ainfo.name] + self.args.append(ainfo) + self.init_pyproto(namespace, classname, known_classes) + + def is_arg_optional(self, py_arg_index): + return py_arg_index >= len(self.py_arglist) - self.py_noptargs + + def init_pyproto(self, namespace, classname, known_classes): + argstr = '' + arglist = [] + outarr_list = [] + outlist = [] + firstoptarg = 1000000 + arguments = [] + for arg in self.args: + arg_class_info = find_argument_class_info(arg.tp, namespace, classname, known_classes) + if arg_class_info is not None and arg_class_info.is_parameters: + for prop in arg_class_info.props: + arguments.append(ArgInfo(prop.tp, prop.name, prop.default_value, enclosing_arg=arg)) + else: + arguments.append(arg) + argument_names = tuple((arg.name for arg in arguments)) + assert len(set(argument_names)) == len(argument_names), "Duplicate arguments with names '{}' in function '{}'. Please, check named arguments used in function interface".format(argument_names, self.name) + self.args = arguments + for (argno, a) in enumerate(self.args): + if a.name in self.array_counters: + continue + assert a.tp not in forbidden_arg_types, 'Forbidden type "{}" for argument "{}" in "{}" ("{}")'.format(a.tp, a.name, self.name, self.classname) + if a.tp in ignored_arg_types: + continue + if a.returnarg: + outlist.append((a.name, argno)) + if not a.inputarg and a.isbig(): + outarr_list.append((a.name, argno)) + continue + if not a.inputarg: + continue + if not a.defval: + arglist.append((a.name, argno)) + else: + firstoptarg = min(firstoptarg, len(arglist)) + if outarr_list: + arglist += outarr_list + outarr_list = [] + arglist.append((a.name, argno)) + if outarr_list: + firstoptarg = min(firstoptarg, len(arglist)) + arglist += outarr_list + firstoptarg = min(firstoptarg, len(arglist)) + noptargs = len(arglist) - firstoptarg + argnamelist = [self.args[argno].export_name for (_, argno) in arglist] + argstr = ', '.join(argnamelist[:firstoptarg]) + argstr = '[, '.join([argstr] + argnamelist[firstoptarg:]) + argstr += ']' * noptargs + if self.rettype: + outlist = [('retval', -1)] + outlist + elif self.isconstructor: + assert outlist == [] + outlist = [('self', -1)] + if self.isconstructor: + if classname.startswith('Cv'): + classname = classname[2:] + outstr = '<%s object>' % (classname,) + elif outlist: + outstr = ', '.join([o[0] for o in outlist]) + else: + outstr = 'None' + self.py_arg_str = argstr + self.py_return_str = outstr + self.py_prototype = '%s(%s) -> %s' % (self.wname, argstr, outstr) + self.py_noptargs = noptargs + self.py_arglist = arglist + for (_, argno) in arglist: + self.args[argno].py_inputarg = True + for (_, argno) in outlist: + if argno >= 0: + self.args[argno].py_outputarg = True + self.py_outlist = outlist + +class FuncInfo(object): + + def __init__(self, classname, name, cname, isconstructor, namespace, is_static): + self.classname = classname + self.name = name + self.cname = cname + self.isconstructor = isconstructor + self.namespace = namespace + self.is_static = is_static + self.variants = [] + + def add_variant(self, decl, known_classes, isphantom=False): + self.variants.append(FuncVariant(self.namespace, self.classname, self.name, decl, self.isconstructor, known_classes, isphantom)) + + def get_wrapper_name(self): + name = self.name + if self.classname: + classname = self.classname + '_' + if '[' in name: + name = 'getelem' + else: + classname = '' + if self.is_static: + name += '_static' + return 'pyopencv_' + self.namespace.replace('.', '_') + '_' + classname + name + + def get_wrapper_prototype(self, codegen): + full_fname = self.get_wrapper_name() + if self.isconstructor: + return 'static int {fn_name}(pyopencv_{type_name}_t* self, PyObject* py_args, PyObject* kw)'.format(fn_name=full_fname, type_name=codegen.classes[self.classname].name) + if self.classname: + self_arg = 'self' + else: + self_arg = '' + return 'static PyObject* %s(PyObject* %s, PyObject* py_args, PyObject* kw)' % (full_fname, self_arg) + + def get_tab_entry(self): + prototype_list = [] + docstring_list = [] + have_empty_constructor = False + for v in self.variants: + s = v.py_prototype + if not v.py_arglist and self.isconstructor: + have_empty_constructor = True + if s not in prototype_list: + prototype_list.append(s) + docstring_list.append(v.docstring) + if have_empty_constructor and len(self.variants) == 2: + idx = self.variants[1].py_arglist != [] + s = self.variants[idx].py_prototype + p1 = s.find('(') + p2 = s.rfind(')') + prototype_list = [s[:p1 + 1] + '[' + s[p1 + 1:p2] + ']' + s[p2:]] + full_docstring = '' + for (prototype, body) in zip(prototype_list, docstring_list): + full_docstring += Template('$prototype\n$docstring\n\n\n\n').substitute(prototype=prototype, docstring='\n'.join(['. ' + line for line in body.split('\n')])) + full_docstring = full_docstring.strip().replace('\\', '\\\\').replace('\n', '\\n').replace('"', '\\"') + full_docstring = full_docstring.encode('ascii', errors='xmlcharrefreplace').decode() + return Template(' {"$py_funcname", CV_PY_FN_WITH_KW_($wrap_funcname, $flags), "$py_docstring"},\n').substitute(py_funcname=self.variants[0].wname, wrap_funcname=self.get_wrapper_name(), flags='METH_STATIC' if self.is_static else '0', py_docstring=full_docstring) + + def gen_code(self, codegen): + all_classes = codegen.classes + proto = self.get_wrapper_prototype(codegen) + code = '%s\n{\n' % (proto,) + code += ' using namespace %s;\n\n' % self.namespace.replace('.', '::') + selfinfo = None + ismethod = self.classname != '' and (not self.isconstructor) + fullname = self.name + if self.classname: + selfinfo = all_classes[self.classname] + if not self.isconstructor: + if not self.is_static: + code += gen_template_check_self.substitute(name=selfinfo.name, cname=selfinfo.cname if selfinfo.issimple else 'Ptr<{}>'.format(selfinfo.cname), pname=selfinfo.cname + '*' if selfinfo.issimple else 'Ptr<{}>'.format(selfinfo.cname), cvt='' if selfinfo.issimple else '*') + fullname = selfinfo.wname + '.' + fullname + all_code_variants = [] + variants = [] + variants_umat = [] + for v in self.variants: + hasUMat = False + for a in v.args: + hasUMat = hasUMat or 'UMat' in a.tp + if hasUMat: + variants_umat.append(v) + else: + variants.append(v) + variants.extend(variants_umat) + for v in variants: + code_decl = '' + code_ret = '' + code_cvt_list = [] + code_args = '(' + all_cargs = [] + if v.isphantom and ismethod and (not self.is_static): + code_args += '_self_' + instantiated_args = set() + for a in v.args: + if a.tp in ignored_arg_types: + defval = a.defval + if not defval and a.tp.endswith('*'): + defval = '0' + assert defval + if not code_args.endswith('('): + code_args += ', ' + code_args += defval + all_cargs.append([[None, ''], '']) + continue + tp1 = tp = a.tp + amp = '' + defval0 = '' + if tp in pass_by_val_types: + tp = tp1 = tp[:-1] + amp = '&' + if tp.endswith('*'): + defval0 = '0' + tp1 = tp.replace('*', '_ptr') + tp_candidates = [a.tp, normalize_class_name(self.namespace + '.' + a.tp)] + if any((tp in codegen.enums.keys() for tp in tp_candidates)): + defval0 = 'static_cast<%s>(%d)' % (a.tp, 0) + if tp in simple_argtype_mapping: + arg_type_info = simple_argtype_mapping[tp] + elif tp in all_classes: + tp_classinfo = all_classes[tp] + cname_of_value = tp_classinfo.cname if tp_classinfo.issimple else 'Ptr<{}>'.format(tp_classinfo.cname) + arg_type_info = ArgTypeInfo(cname_of_value, FormatStrings.object, defval0, True) + assert not (a.is_smart_ptr and tp_classinfo.issimple), "Can't pass 'simple' type as Ptr<>" + if not a.is_smart_ptr and (not tp_classinfo.issimple): + assert amp == '' + amp = '*' + else: + arg_type_info = ArgTypeInfo(tp, FormatStrings.object, defval0, True) + parse_name = a.name + if a.py_inputarg and arg_type_info.strict_conversion: + parse_name = 'pyobj_' + a.full_name.replace('.', '_') + code_decl += ' PyObject* %s = NULL;\n' % (parse_name,) + if a.tp == 'char': + code_cvt_list.append('convert_to_char(%s, &%s, %s)' % (parse_name, a.full_name, a.crepr())) + else: + code_cvt_list.append('pyopencv_to_safe(%s, %s, %s)' % (parse_name, a.full_name, a.crepr())) + all_cargs.append([arg_type_info, parse_name]) + if a.enclosing_arg: + a = a.enclosing_arg + arg_type_info = ArgTypeInfo(a.tp, FormatStrings.object, default_value=a.defval, strict_conversion=True) + if a.name in instantiated_args: + continue + instantiated_args.add(a.name) + defval = a.defval + if not defval: + defval = arg_type_info.default_value + else: + if 'UMat' in tp: + if 'Mat' in defval and 'UMat' not in defval: + defval = defval.replace('Mat', 'UMat') + if 'cuda::GpuMat' in tp: + if 'Mat' in defval and 'GpuMat' not in defval: + defval = defval.replace('Mat', 'cuda::GpuMat') + if defval == tp + '()' and arg_type_info.format_str == FormatStrings.object: + defval = '' + if a.outputarg and (not a.inputarg): + defval = '' + if defval: + code_decl += ' %s %s=%s;\n' % (arg_type_info.atype, a.name, defval) + else: + code_decl += ' %s %s;\n' % (arg_type_info.atype, a.name) + if not code_args.endswith('('): + code_args += ', ' + if a.isrvalueref: + code_args += amp + 'std::move(' + a.name + ')' + else: + code_args += amp + a.name + code_args += ')' + if self.isconstructor: + if selfinfo.issimple: + templ_prelude = gen_template_simple_call_constructor_prelude + templ = gen_template_simple_call_constructor + else: + templ_prelude = gen_template_call_constructor_prelude + templ = gen_template_call_constructor + code_prelude = templ_prelude.substitute(name=selfinfo.name, cname=selfinfo.cname) + code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname, py_args=code_args) + if v.isphantom: + code_fcall = code_fcall.replace('new ' + selfinfo.cname, self.cname.replace('::', '_')) + else: + code_prelude = '' + code_fcall = '' + if v.rettype: + code_decl += ' ' + v.rettype + ' retval;\n' + code_fcall += 'retval = ' + if not v.isphantom and ismethod and (not self.is_static): + code_fcall += '_self_->' + self.cname + else: + code_fcall += self.cname + code_fcall += code_args + if code_cvt_list: + code_cvt_list = [''] + code_cvt_list + if v.rettype: + tp = v.rettype + tp1 = tp.replace('*', '_ptr') + default_info = ArgTypeInfo(tp, FormatStrings.object, '0') + arg_type_info = simple_argtype_mapping.get(tp, default_info) + all_cargs.append(arg_type_info) + if v.args and v.py_arglist: + fmtspec = ''.join([get_type_format_string(all_cargs[argno][0]) for (_, argno) in v.py_arglist]) + if v.py_noptargs > 0: + fmtspec = fmtspec[:-v.py_noptargs] + '|' + fmtspec[-v.py_noptargs:] + fmtspec += ':' + fullname + code_parse = gen_template_parse_args.substitute(kw_list=', '.join(['"' + v.args[argno].export_name + '"' for (_, argno) in v.py_arglist]), fmtspec=fmtspec, parse_arglist=', '.join(['&' + all_cargs[argno][1] for (_, argno) in v.py_arglist]), code_cvt=' &&\n '.join(code_cvt_list)) + else: + code_parse = 'if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0))' + if len(v.py_outlist) == 0: + code_ret = 'Py_RETURN_NONE' + elif len(v.py_outlist) == 1: + if self.isconstructor: + code_ret = 'return 0' + else: + (aname, argno) = v.py_outlist[0] + code_ret = 'return pyopencv_from(%s)' % (aname,) + else: + fmtspec = 'N' * len(v.py_outlist) + code_ret = 'return Py_BuildValue("(%s)", %s)' % (fmtspec, ', '.join(['pyopencv_from(' + aname + ')' for (aname, argno) in v.py_outlist])) + all_code_variants.append(gen_template_func_body.substitute(code_decl=code_decl, code_parse=code_parse, code_prelude=code_prelude, code_fcall=code_fcall, code_ret=code_ret)) + if len(all_code_variants) == 1: + code += all_code_variants[0] + else: + code += ' pyPrepareArgumentConversionErrorsStorage({});\n'.format(len(all_code_variants)) + code += ' \n'.join((gen_template_overloaded_function_call.substitute(variant=v) for v in all_code_variants)) + code += ' pyRaiseCVOverloadException("{}");\n'.format(self.name) + def_ret = 'NULL' + if self.isconstructor: + def_ret = '-1' + code += '\n return %s;\n}\n\n' % def_ret + cname = self.cname + classinfo = None + if self.classname: + classinfo = all_classes[self.classname] + if self.isconstructor: + py_name = classinfo.full_export_name + else: + py_name = classinfo.full_export_name + '.' + self.variants[0].wname + if not self.is_static and (not self.isconstructor): + cname = classinfo.cname + '::' + cname + else: + py_name = '.'.join([self.namespace, self.variants[0].wname]) + py_signatures = codegen.py_signatures.setdefault(cname, []) + for v in self.variants: + s = dict(name=py_name, arg=v.py_arg_str, ret=v.py_return_str) + for old in py_signatures: + if s == old: + break + else: + py_signatures.append(s) + return code + +class Namespace(object): + + def __init__(self): + self.funcs = {} + self.consts = {} + +class PythonWrapperGenerator(object): + + def __init__(self): + self.clear() + + def clear(self): + self.classes = {} + self.namespaces = {} + self.consts = {} + self.enums = {} + self.typing_stubs_generator = TypingStubsGenerator() + self.code_include = StringIO() + self.code_enums = StringIO() + self.code_types = StringIO() + self.code_funcs = StringIO() + self.code_ns_reg = StringIO() + self.code_ns_init = StringIO() + self.code_type_publish = StringIO() + self.py_signatures = dict() + self.class_idx = 0 + + def add_class(self, stype, name, decl): + classinfo = ClassInfo(name, decl, self) + classinfo.decl_idx = self.class_idx + self.class_idx += 1 + if classinfo.name in self.classes: + print('Generator error: class %s (cname=%s) already exists' % (classinfo.name, classinfo.cname)) + sys.exit(-1) + self.classes[classinfo.name] = classinfo + (namespace, _, _) = self.split_decl_name(name) + namespace = '.'.join(namespace) + self.namespaces.setdefault(namespace, Namespace()) + py_name = classinfo.full_export_name + py_signatures = self.py_signatures.setdefault(classinfo.cname, []) + py_signatures.append(dict(name=py_name)) + + def get_export_scope_name(self, original_scope_name): + class_scope = self.classes.get(normalize_class_name(original_scope_name), None) + if class_scope: + return class_scope.full_export_name + return original_scope_name + + def split_decl_name(self, name): + return SymbolName.parse(name, self.parser.namespaces) + + def add_const(self, name, decl): + cname = name.replace('.', '::') + (namespace, classes, name) = self.split_decl_name(name) + namespace = '.'.join(namespace) + name = '_'.join(chain(classes, (name,))) + ns = self.namespaces.setdefault(namespace, Namespace()) + if name in ns.consts: + print('Generator error: constant %s (cname=%s) already exists' % (name, cname)) + sys.exit(-1) + ns.consts[name] = cname + value = decl[1] + py_name = '.'.join([namespace, name]) + py_signatures = self.py_signatures.setdefault(cname, []) + py_signatures.append(dict(name=py_name, value=value)) + + def add_enum(self, name, decl): + enumeration_name = SymbolName.parse(name, self.parser.namespaces) + is_scoped_enum = decl[0].startswith('enum class') or decl[0].startswith('enum struct') + wname = normalize_class_name(name) + if wname.endswith(''): + wname = None + else: + self.enums[wname] = name + const_decls = decl[3] + enum_entries = {} + for decl in const_decls: + enum_entries[decl[0].split('.')[-1]] = decl[1] + self.add_const(decl[0].replace('const ', '').strip(), decl) + self.typing_stubs_generator.add_enum(enumeration_name, is_scoped_enum, enum_entries) + + def add_func(self, decl): + (namespace, classes, barename) = self.split_decl_name(decl[0]) + cname = '::'.join(chain(namespace, classes, (barename,))) + name = barename + classname = '' + bareclassname = '' + if classes: + classname = normalize_class_name('.'.join(namespace + classes)) + bareclassname = classes[-1] + namespace_str = '.'.join(namespace) + isconstructor = name == bareclassname + is_static = False + isphantom = False + mappable = None + for m in decl[2]: + if m == '/S': + is_static = True + elif m == '/phantom': + isphantom = True + cname = cname.replace('::', '_') + elif m.startswith('='): + name = m[1:] + elif m.startswith('/mappable='): + mappable = m[10:] + self.classes[classname].mappables.append(mappable) + return + if isconstructor: + name = '_'.join(chain(classes[:-1], (name,))) + if is_static: + func_map = self.classes[classname].methods + func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace_str, is_static)) + func.add_variant(decl, self.classes, isphantom) + g_name = '_'.join(chain(classes, (name,))) + w_classes = [] + for i in range(0, len(classes)): + classes_i = classes[:i + 1] + classname_i = normalize_class_name('.'.join(namespace + classes_i)) + w_classname = self.classes[classname_i].wname + namespace_prefix = normalize_class_name('.'.join(namespace)) + '_' + if w_classname.startswith(namespace_prefix): + w_classname = w_classname[len(namespace_prefix):] + w_classes.append(w_classname) + g_wname = '_'.join(w_classes + [name]) + func_map = self.namespaces.setdefault(namespace_str, Namespace()).funcs + self.typing_stubs_generator.add_ignored_function_name(g_name) + func = func_map.setdefault(g_name, FuncInfo('', g_name, cname, isconstructor, namespace_str, False)) + func.add_variant(decl, self.classes, isphantom) + if g_wname != g_name: + self.typing_stubs_generator.add_ignored_function_name(g_wname) + wfunc = func_map.setdefault(g_wname, FuncInfo('', g_wname, cname, isconstructor, namespace_str, False)) + wfunc.add_variant(decl, self.classes, isphantom) + else: + if classname and (not isconstructor): + if not isphantom: + cname = barename + func_map = self.classes[classname].methods + else: + func_map = self.namespaces.setdefault(namespace_str, Namespace()).funcs + func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace_str, is_static)) + func.add_variant(decl, self.classes, isphantom) + if classname and isconstructor: + self.classes[classname].constructor = func + + def gen_namespace(self, ns_name): + ns = self.namespaces[ns_name] + wname = normalize_class_name(ns_name) + self.code_ns_reg.write('static PyMethodDef methods_%s[] = {\n' % wname) + for (name, func) in sorted(ns.funcs.items()): + if func.isconstructor: + continue + self.code_ns_reg.write(func.get_tab_entry()) + custom_entries_macro = 'PYOPENCV_EXTRA_METHODS_{}'.format(wname.upper()) + self.code_ns_reg.write('#ifdef {}\n {}\n#endif\n'.format(custom_entries_macro, custom_entries_macro)) + self.code_ns_reg.write(' {NULL, NULL}\n};\n\n') + self.code_ns_reg.write('static ConstDef consts_%s[] = {\n' % wname) + for (name, cname) in sorted(ns.consts.items()): + self.code_ns_reg.write(' {"%s", static_cast(%s)},\n' % (name, cname)) + compat_name = re.sub('([a-z])([A-Z])', '\\1_\\2', name).upper() + if name != compat_name: + self.code_ns_reg.write(' {"%s", static_cast(%s)},\n' % (compat_name, cname)) + custom_entries_macro = 'PYOPENCV_EXTRA_CONSTANTS_{}'.format(wname.upper()) + self.code_ns_reg.write('#ifdef {}\n {}\n#endif\n'.format(custom_entries_macro, custom_entries_macro)) + self.code_ns_reg.write(' {NULL, 0}\n};\n\n') + + def gen_enum_reg(self, enum_name): + name_seg = enum_name.split('.') + is_enum_class = False + if len(name_seg) >= 2 and name_seg[-1] == name_seg[-2]: + enum_name = '.'.join(name_seg[:-1]) + is_enum_class = True + wname = normalize_class_name(enum_name) + cname = enum_name.replace('.', '::') + code = '' + if re.sub('^cv\\.', '', enum_name) != wname: + code += 'typedef {0} {1};\n'.format(cname, wname) + code += 'CV_PY_FROM_ENUM({0})\nCV_PY_TO_ENUM({0})\n\n'.format(wname) + self.code_enums.write(code) + + def save(self, path, name, buf): + with open(path + '/' + name, 'wt') as f: + f.write(buf.getvalue()) + + def save_json(self, path, name, value): + import json + with open(path + '/' + name, 'wt') as f: + json.dump(value, f) + + def gen(self, srcfiles, output_path): + self.clear() + self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True) + for hdr in srcfiles: + decls = self.parser.parse(hdr) + if len(decls) == 0: + continue + if hdr.find('misc/python/shadow_') < 0: + if hdr.find('opencv2/') >= 0: + self.code_include.write('#include "{0}"\n'.format(hdr[hdr.rindex('opencv2/'):])) + else: + self.code_include.write('#include "{0}"\n'.format(hdr)) + for decl in decls: + name = decl[0] + if name.startswith('struct') or name.startswith('class'): + p = name.find(' ') + stype = name[:p] + name = name[p + 1:].strip() + self.add_class(stype, name, decl) + elif name.startswith('const'): + self.add_const(name.replace('const ', '').strip(), decl) + elif name.startswith('enum'): + self.add_enum(name.rsplit(' ', 1)[1], decl) + else: + self.add_func(decl) + for (name, classinfo) in self.classes.items(): + if classinfo.base: + chunks = classinfo.base.split('_') + base = '_'.join(chunks) + while base not in self.classes and len(chunks) > 1: + del chunks[-2] + base = '_'.join(chunks) + if base not in self.classes: + print('Generator error: unable to resolve base %s for %s' % (classinfo.base, classinfo.name)) + sys.exit(-1) + base_instance = self.classes[base] + classinfo.base = base + classinfo.isalgorithm |= base_instance.isalgorithm + self.classes[name] = classinfo + processed = dict() + + def process_isalgorithm(classinfo): + if classinfo.isalgorithm or classinfo in processed: + return classinfo.isalgorithm + res = False + if classinfo.base: + res = process_isalgorithm(self.classes[classinfo.base]) + classinfo.isalgorithm |= res + res = classinfo.isalgorithm + processed[classinfo] = True + return res + for (name, classinfo) in self.classes.items(): + process_isalgorithm(classinfo) + classlist = list(self.classes.items()) + classlist.sort() + for (name, classinfo) in classlist: + self.code_types.write('//{}\n'.format(80 * '=')) + self.code_types.write('// {} ({})\n'.format(name, 'Map' if classinfo.ismap else 'Generic')) + self.code_types.write('//{}\n'.format(80 * '=')) + self.code_types.write(classinfo.gen_code(self)) + if classinfo.ismap: + self.code_types.write(gen_template_map_type_cvt.substitute(name=classinfo.name, cname=classinfo.cname)) + else: + mappable_code = '\n'.join([gen_template_mappable.substitute(cname=classinfo.cname, mappable=mappable) for mappable in classinfo.mappables]) + code = gen_template_type_decl.substitute(name=classinfo.name, cname=classinfo.cname if classinfo.issimple else 'Ptr<{}>'.format(classinfo.cname), mappable_code=mappable_code) + self.code_types.write(code) + classlist1 = [(classinfo.decl_idx, name, classinfo) for (name, classinfo) in classlist] + classlist1.sort() + published_types = set() + for (decl_idx, name, classinfo) in classlist1: + if classinfo.ismap: + continue + + def _registerType(classinfo): + if classinfo.decl_idx in published_types: + return self.typing_stubs_generator.find_class_node(classinfo, self.parser.namespaces) + published_types.add(classinfo.decl_idx) + class_node = self.typing_stubs_generator.create_class_node(classinfo, self.parser.namespaces) + if classinfo.base and classinfo.base in self.classes: + base_classinfo = self.classes[classinfo.base] + base_node = _registerType(base_classinfo) + class_node.add_base(base_node) + self.code_type_publish.write(classinfo.gen_def(self)) + return class_node + _registerType(classinfo) + for (ns_name, ns) in sorted(self.namespaces.items()): + if ns_name.split('.')[0] != 'cv': + continue + for (name, func) in sorted(ns.funcs.items()): + if func.isconstructor: + continue + code = func.gen_code(self) + self.code_funcs.write(code) + if name not in self.typing_stubs_generator.type_hints_ignored_functions: + self.typing_stubs_generator.create_function_node(func) + self.gen_namespace(ns_name) + self.code_ns_init.write('CVPY_MODULE("{}", {});\n'.format(ns_name[2:], normalize_class_name(ns_name))) + enumlist = list(self.enums.values()) + enumlist.sort() + for name in enumlist: + self.gen_enum_reg(name) + constlist = list(self.consts.items()) + constlist.sort() + for (name, constinfo) in constlist: + self.gen_const_reg(constinfo) + self.typing_stubs_generator.generate(output_path) + self.save(output_path, 'pyopencv_generated_include.h', self.code_include) + self.save(output_path, 'pyopencv_generated_funcs.h', self.code_funcs) + self.save(output_path, 'pyopencv_generated_enums.h', self.code_enums) + self.save(output_path, 'pyopencv_generated_types.h', self.code_type_publish) + self.save(output_path, 'pyopencv_generated_types_content.h', self.code_types) + self.save(output_path, 'pyopencv_generated_modules.h', self.code_ns_init) + self.save(output_path, 'pyopencv_generated_modules_content.h', self.code_ns_reg) + self.save_json(output_path, 'pyopencv_signatures.json', self.py_signatures) +if __name__ == '__main__': + srcfiles = hdr_parser.opencv_hdr_list + dstdir = '/Users/vp/tmp' + if len(sys.argv) > 1: + dstdir = sys.argv[1] + if len(sys.argv) > 2: + with open(sys.argv[2], 'r') as f: + srcfiles = [l.strip() for l in f.readlines()] + generator = PythonWrapperGenerator() + generator.gen(srcfiles, dstdir) + +# File: opencv-master/modules/python/src2/hdr_parser.py +from __future__ import print_function +import os, sys, re, string, io +opencv_hdr_list = ['../../core/include/opencv2/core.hpp', '../../core/include/opencv2/core/mat.hpp', '../../core/include/opencv2/core/ocl.hpp', '../../flann/include/opencv2/flann/miniflann.hpp', '../../ml/include/opencv2/ml.hpp', '../../imgproc/include/opencv2/imgproc.hpp', '../../calib3d/include/opencv2/calib3d.hpp', '../../features2d/include/opencv2/features2d.hpp', '../../video/include/opencv2/video/tracking.hpp', '../../video/include/opencv2/video/background_segm.hpp', '../../objdetect/include/opencv2/objdetect.hpp', '../../imgcodecs/include/opencv2/imgcodecs.hpp', '../../videoio/include/opencv2/videoio.hpp', '../../highgui/include/opencv2/highgui.hpp'] +'' + +class CppHeaderParser(object): + + def __init__(self, generate_umat_decls=False, generate_gpumat_decls=False): + self._generate_umat_decls = generate_umat_decls + self._generate_gpumat_decls = generate_gpumat_decls + self.BLOCK_TYPE = 0 + self.BLOCK_NAME = 1 + self.PROCESS_FLAG = 2 + self.PUBLIC_SECTION = 3 + self.CLASS_DECL = 4 + self.namespaces = set() + + def batch_replace(self, s, pairs): + for (before, after) in pairs: + s = s.replace(before, after) + return s + + def get_macro_arg(self, arg_str, npos): + npos2 = npos3 = arg_str.find('(', npos) + if npos2 < 0: + print('Error: no arguments for the macro at %s:%d' % (self.hname, self.lineno)) + sys.exit(-1) + balance = 1 + while 1: + (t, npos3) = self.find_next_token(arg_str, ['(', ')'], npos3 + 1) + if npos3 < 0: + print("Error: no matching ')' in the macro call at %s:%d" % (self.hname, self.lineno)) + sys.exit(-1) + if t == '(': + balance += 1 + if t == ')': + balance -= 1 + if balance == 0: + break + return (arg_str[npos2 + 1:npos3].strip(), npos3) + + def parse_arg(self, arg_str, argno): + modlist = [] + if 'CV_ND' in arg_str: + modlist.append('/ND') + arg_str = arg_str.replace('CV_ND', '') + if 'CV_OUT' in arg_str: + modlist.append('/O') + arg_str = arg_str.replace('CV_OUT', '') + if 'CV_IN_OUT' in arg_str: + modlist.append('/IO') + arg_str = arg_str.replace('CV_IN_OUT', '') + if 'CV_WRAP_FILE_PATH' in arg_str: + modlist.append('/PATH') + arg_str = arg_str.replace('CV_WRAP_FILE_PATH', '') + isarray = False + npos = arg_str.find('CV_CARRAY') + if npos >= 0: + isarray = True + (macro_arg, npos3) = self.get_macro_arg(arg_str, npos) + modlist.append('/A ' + macro_arg) + arg_str = arg_str[:npos] + arg_str[npos3 + 1:] + npos = arg_str.find('CV_CUSTOM_CARRAY') + if npos >= 0: + isarray = True + (macro_arg, npos3) = self.get_macro_arg(arg_str, npos) + modlist.append('/CA ' + macro_arg) + arg_str = arg_str[:npos] + arg_str[npos3 + 1:] + npos = arg_str.find('const') + if npos >= 0: + modlist.append('/C') + npos = arg_str.find('&&') + if npos >= 0: + arg_str = arg_str.replace('&&', '') + modlist.append('/RRef') + npos = arg_str.find('&') + if npos >= 0: + modlist.append('/Ref') + arg_str = arg_str.strip() + word_start = 0 + word_list = [] + npos = -1 + while 1: + npos += 1 + (t, npos) = self.find_next_token(arg_str, [' ', '&', '*', '<', '>', ','], npos) + w = arg_str[word_start:npos].strip() + if w == 'operator': + word_list.append('operator ' + arg_str[npos:].strip()) + break + if w not in ['', 'const']: + word_list.append(w) + if t not in ['', ' ', '&']: + word_list.append(t) + if not t: + break + word_start = npos + 1 + npos = word_start - 1 + arg_type = '' + arg_name = '' + angle_stack = [] + wi = -1 + prev_w = '' + for w in word_list: + wi += 1 + if w == '*': + if prev_w == 'char' and (not isarray): + arg_type = arg_type[:-len('char')] + 'c_string' + else: + arg_type += w + continue + elif w == '<': + arg_type += '_' + angle_stack.append(0) + elif w == ',' or w == '>': + if not angle_stack: + print("Error at %s:%d: argument contains ',' or '>' not within template arguments" % (self.hname, self.lineno)) + sys.exit(-1) + if w == ',': + arg_type += '_and_' + elif w == '>': + if angle_stack[0] == 0: + print('Error at %s:%d: template has no arguments' % (self.hname, self.lineno)) + sys.exit(-1) + if angle_stack[0] > 1: + arg_type += '_end_' + angle_stack[-1:] = [] + elif angle_stack: + arg_type += w + angle_stack[-1] += 1 + elif arg_type == 'struct': + arg_type += ' ' + w + elif arg_type and arg_type != '~': + arg_name = ' '.join(word_list[wi:]) + break + else: + arg_type += w + prev_w = w + counter_str = '' + add_star = False + if '[' in arg_name and (not 'operator' in arg_str): + p1 = arg_name.find('[') + p2 = arg_name.find(']', p1 + 1) + if p2 < 0: + print('Error at %s:%d: no closing ]' % (self.hname, self.lineno)) + sys.exit(-1) + counter_str = arg_name[p1 + 1:p2].strip() + if counter_str == '': + counter_str = '?' + if not isarray: + modlist.append('/A ' + counter_str.strip()) + arg_name = arg_name[:p1] + add_star = True + if not arg_name: + if arg_type.startswith('operator'): + (arg_type, arg_name) = ('', arg_type) + else: + arg_name = 'arg' + str(argno) + argno += 1 + while arg_type.endswith('_end_'): + arg_type = arg_type[:-len('_end_')] + if add_star: + arg_type += '*' + arg_type = self.batch_replace(arg_type, [('std::', ''), ('cv::', ''), ('::', '_')]) + return (arg_type, arg_name, modlist, argno) + + def parse_enum(self, decl_str): + l = decl_str + ll = l.split(',') + if ll[-1].strip() == '': + ll = ll[:-1] + prev_val = '' + prev_val_delta = -1 + decl = [] + for pair in ll: + pv = pair.split('=') + if len(pv) == 1: + prev_val_delta += 1 + val = '' + if prev_val: + val = prev_val + '+' + val += str(prev_val_delta) + else: + prev_val_delta = 0 + prev_val = val = pv[1].strip() + decl.append(['const ' + self.get_dotted_name(pv[0].strip()), val, [], [], None, '']) + return decl + + def parse_class_decl(self, decl_str): + l = decl_str + modlist = [] + if 'CV_EXPORTS_W_MAP' in l: + l = l.replace('CV_EXPORTS_W_MAP', '') + modlist.append('/Map') + if 'CV_EXPORTS_W_SIMPLE' in l: + l = l.replace('CV_EXPORTS_W_SIMPLE', '') + modlist.append('/Simple') + if 'CV_EXPORTS_W_PARAMS' in l: + l = l.replace('CV_EXPORTS_W_PARAMS', '') + modlist.append('/Map') + modlist.append('/Params') + npos = l.find('CV_EXPORTS_AS') + if npos < 0: + npos = l.find('CV_WRAP_AS') + if npos >= 0: + (macro_arg, npos3) = self.get_macro_arg(l, npos) + modlist.append('=' + macro_arg) + l = l[:npos] + l[npos3 + 1:] + l = self.batch_replace(l, [('CV_EXPORTS_W', ''), ('CV_EXPORTS', ''), ('public virtual ', ' '), ('public ', ' '), ('::', '.')]).strip() + ll = re.split('\\s+|\\s*[,:]\\s*', l) + ll = [le for le in ll if le] + classname = ll[1] + bases = ll[2:] + return (classname, bases, modlist) + + def parse_func_decl_no_wrap(self, decl_str, static_method=False, docstring=''): + decl_str = (decl_str or '').strip() + virtual_method = False + explicit_method = False + if decl_str.startswith('explicit'): + decl_str = decl_str[len('explicit'):].lstrip() + explicit_method = True + if decl_str.startswith('virtual'): + decl_str = decl_str[len('virtual'):].lstrip() + virtual_method = True + if decl_str.startswith('static'): + decl_str = decl_str[len('static'):].lstrip() + static_method = True + fdecl = decl_str.replace('CV_OUT', '').replace('CV_IN_OUT', '') + fdecl = fdecl.strip().replace('\t', ' ') + while ' ' in fdecl: + fdecl = fdecl.replace(' ', ' ') + fname = fdecl[:fdecl.find('(')].strip() + fnpos = fname.rfind(' ') + if fnpos < 0: + fnpos = 0 + fname = fname[fnpos:].strip() + rettype = fdecl[:fnpos].strip() + if rettype.endswith('operator'): + fname = ('operator ' + fname).strip() + rettype = rettype[:rettype.rfind('operator')].strip() + if rettype.endswith('::'): + rpos = rettype.rfind(' ') + if rpos >= 0: + fname = rettype[rpos + 1:].strip() + fname + rettype = rettype[:rpos].strip() + else: + fname = rettype + fname + rettype = '' + apos = fdecl.find('(') + if fname.endswith('operator'): + fname += ' ()' + apos = fdecl.find('(', apos + 1) + fname = 'cv.' + fname.replace('::', '.') + decl = [fname, rettype, [], [], None, docstring] + implmatch = re.match('(\\(.*?\\))\\s*:\\s*(\\w+\\(.*?\\),?\\s*)+', fdecl[apos:]) + if bool(implmatch): + fdecl = fdecl[:apos] + implmatch.group(1) + args0str = fdecl[apos + 1:fdecl.rfind(')')].strip() + if args0str != '' and args0str != 'void': + args0str = re.sub('\\([^)]*\\)', lambda m: m.group(0).replace(',', '@comma@'), args0str) + args0 = args0str.split(',') + args = [] + narg = '' + for arg in args0: + narg += arg.strip() + balance_paren = narg.count('(') - narg.count(')') + balance_angle = narg.count('<') - narg.count('>') + if balance_paren == 0 and balance_angle == 0: + args.append(narg.strip()) + narg = '' + for arg in args: + dfpos = arg.find('=') + defval = '' + if dfpos >= 0: + defval = arg[dfpos + 1:].strip() + else: + dfpos = arg.find('CV_DEFAULT') + if dfpos >= 0: + (defval, pos3) = self.get_macro_arg(arg, dfpos) + else: + dfpos = arg.find('CV_WRAP_DEFAULT') + if dfpos >= 0: + (defval, pos3) = self.get_macro_arg(arg, dfpos) + if dfpos >= 0: + defval = defval.replace('@comma@', ',') + arg = arg[:dfpos].strip() + pos = len(arg) - 1 + while pos >= 0 and (arg[pos] in '_[]' or arg[pos].isalpha() or arg[pos].isdigit()): + pos -= 1 + if pos >= 0: + aname = arg[pos + 1:].strip() + atype = arg[:pos + 1].strip() + if aname.endswith('&') or aname.endswith('*') or aname in ['int', 'String', 'Mat']: + atype = (atype + ' ' + aname).strip() + aname = '' + else: + atype = arg + aname = '' + if aname.endswith(']'): + bidx = aname.find('[') + atype += aname[bidx:] + aname = aname[:bidx] + decl[3].append([atype, aname, defval, []]) + if static_method: + decl[2].append('/S') + if virtual_method: + decl[2].append('/V') + if explicit_method: + decl[2].append('/E') + if bool(re.match('.*\\)\\s*(const)?\\s*=\\s*0', decl_str)): + decl[2].append('/A') + if bool(re.match('.*\\)\\s*const(\\s*=\\s*0)?', decl_str)): + decl[2].append('/C') + return decl + + def parse_func_decl(self, decl_str, mat='Mat', docstring=''): + if self.wrap_mode: + if not ('CV_EXPORTS_AS' in decl_str or 'CV_EXPORTS_W' in decl_str or 'CV_WRAP' in decl_str): + return [] + if 'CVAPI(' in decl_str and self.wrap_mode: + return [] + top = self.block_stack[-1] + func_modlist = [] + npos = decl_str.find('CV_EXPORTS_AS') + if npos >= 0: + (arg, npos3) = self.get_macro_arg(decl_str, npos) + func_modlist.append('=' + arg) + decl_str = decl_str[:npos] + decl_str[npos3 + 1:] + npos = decl_str.find('CV_WRAP_AS') + if npos >= 0: + (arg, npos3) = self.get_macro_arg(decl_str, npos) + func_modlist.append('=' + arg) + decl_str = decl_str[:npos] + decl_str[npos3 + 1:] + npos = decl_str.find('CV_WRAP_PHANTOM') + if npos >= 0: + (decl_str, _) = self.get_macro_arg(decl_str, npos) + func_modlist.append('/phantom') + npos = decl_str.find('CV_WRAP_MAPPABLE') + if npos >= 0: + (mappable, npos3) = self.get_macro_arg(decl_str, npos) + func_modlist.append('/mappable=' + mappable) + classname = top[1] + return ['.'.join([classname, classname]), None, func_modlist, [], None, None] + virtual_method = False + pure_virtual_method = False + const_method = False + decl_str = self.batch_replace(decl_str, [('static inline', ''), ('inline', ''), ('explicit ', ''), ('CV_EXPORTS_W', ''), ('CV_EXPORTS', ''), ('CV_CDECL', ''), ('CV_WRAP ', ' '), ('CV_INLINE', ''), ('CV_DEPRECATED', ''), ('CV_DEPRECATED_EXTERNAL', ''), ('CV_NODISCARD_STD', '')]).strip() + if decl_str.strip().startswith('virtual'): + virtual_method = True + decl_str = decl_str.replace('virtual', '') + end_tokens = decl_str[decl_str.rfind(')'):].split() + const_method = 'const' in end_tokens + pure_virtual_method = '=' in end_tokens and '0' in end_tokens + static_method = False + context = top[0] + if decl_str.startswith('static') and (context == 'class' or context == 'struct'): + decl_str = decl_str[len('static'):].lstrip() + static_method = True + args_begin = decl_str.find('(') + if decl_str.startswith('CVAPI'): + rtype_end = decl_str.find(')', args_begin + 1) + if rtype_end < 0: + print('Error at %d. no terminating ) in CVAPI() macro: %s' % (self.lineno, decl_str)) + sys.exit(-1) + decl_str = decl_str[args_begin + 1:rtype_end] + ' ' + decl_str[rtype_end + 1:] + args_begin = decl_str.find('(') + if args_begin < 0: + print("Error at %d: no args in '%s'" % (self.lineno, decl_str)) + sys.exit(-1) + decl_start = decl_str[:args_begin].strip() + if decl_start.endswith('operator'): + args_begin = decl_str.find('(', args_begin + 1) + if args_begin < 0: + print("Error at %d: no args in '%s'" % (self.lineno, decl_str)) + sys.exit(-1) + decl_start = decl_str[:args_begin].strip() + if decl_start.endswith('()'): + decl_start = decl_start[0:-2].rstrip() + ' ()' + if bool(re.match('^(\\w+::)*(?P\\w+)::~?(?P=x)$', decl_start)): + decl_start = 'void ' + decl_start + (rettype, funcname, modlist, argno) = self.parse_arg(decl_start, -1) + original_type = None + i = decl_start.rfind(funcname) + if i > 0: + original_type = decl_start[:i].replace('&', '').replace('const', '').strip() + if argno >= 0: + classname = top[1] + if rettype == classname or rettype == '~' + classname: + (rettype, funcname) = ('', rettype) + elif bool(re.match('\\w+\\s+\\(\\*\\w+\\)\\s*\\(.*\\)', decl_str)): + return [] + elif bool(re.match('\\w+\\s+\\(\\w+::\\*\\w+\\)\\s*\\(.*\\)', decl_str)): + return [] + elif bool(re.match('[A-Z_]+', decl_start)): + return [] + elif '__declspec' == decl_start: + return [] + elif bool(re.match('\\w+\\s+\\(\\*\\w+\\)\\[\\d+\\]', decl_str)): + return [] + else: + print("Error at %s:%d the function/method name is missing: '%s'" % (self.hname, self.lineno, decl_start)) + sys.exit(-1) + if self.wrap_mode and ('::' in funcname or funcname.startswith('~')): + return [] + funcname = self.get_dotted_name(funcname) + is_arithm_op_func = funcname in {'cv.add', 'cv.subtract', 'cv.absdiff', 'cv.multiply', 'cv.divide'} + if not self.wrap_mode: + decl = self.parse_func_decl_no_wrap(decl_str, static_method, docstring) + decl[0] = funcname + return decl + arg_start = args_begin + 1 + npos = arg_start - 1 + balance = 1 + angle_balance = 0 + args_decls = [] + args = [] + argno = 1 + while balance > 0: + npos += 1 + (t, npos) = self.find_next_token(decl_str, ['(', ')', ',', '<', '>'], npos) + if not t: + print("Error: no closing ')' at %d" % (self.lineno,)) + sys.exit(-1) + if t == '<': + angle_balance += 1 + if t == '>': + angle_balance -= 1 + if t == '(': + balance += 1 + if t == ')': + balance -= 1 + if t == ',' and balance == 1 and (angle_balance == 0) or balance == 0: + a = decl_str[arg_start:npos].strip() + arg_start = npos + 1 + if a: + eqpos = a.find('=') + defval = '' + modlist = [] + if eqpos >= 0: + defval = a[eqpos + 1:].strip() + else: + eqpos = a.find('CV_DEFAULT') + if eqpos >= 0: + (defval, pos3) = self.get_macro_arg(a, eqpos) + else: + eqpos = a.find('CV_WRAP_DEFAULT') + if eqpos >= 0: + (defval, pos3) = self.get_macro_arg(a, eqpos) + if defval == 'NULL': + defval = '0' + if eqpos >= 0: + a = a[:eqpos].strip() + (arg_type, arg_name, modlist, argno) = self.parse_arg(a, argno) + if self.wrap_mode: + vector_mat = 'vector_{}'.format(mat) + vector_mat_template = 'vector<{}>'.format(mat) + if arg_type == 'InputArray': + arg_type = mat + if is_arithm_op_func: + modlist.append('/AOS') + elif arg_type == 'InputOutputArray': + arg_type = mat + modlist.append('/IO') + elif arg_type == 'OutputArray': + arg_type = mat + modlist.append('/O') + elif arg_type == 'InputArrayOfArrays': + arg_type = vector_mat + elif arg_type == 'InputOutputArrayOfArrays': + arg_type = vector_mat + modlist.append('/IO') + elif arg_type == 'OutputArrayOfArrays': + arg_type = vector_mat + modlist.append('/O') + defval = self.batch_replace(defval, [('InputArrayOfArrays', vector_mat_template), ('InputOutputArrayOfArrays', vector_mat_template), ('OutputArrayOfArrays', vector_mat_template), ('InputArray', mat), ('InputOutputArray', mat), ('OutputArray', mat), ('noArray', arg_type)]).strip() + if '/IO' in modlist and '/O' in modlist: + modlist.remove('/O') + if (arg_name.lower() == 'filename' or arg_name.lower() == 'filepath') and '/PATH' not in modlist: + modlist.append('/PATH') + args.append([arg_type, arg_name, defval, modlist]) + npos = arg_start - 1 + if static_method: + func_modlist.append('/S') + if const_method: + func_modlist.append('/C') + if virtual_method: + func_modlist.append('/V') + if pure_virtual_method: + func_modlist.append('/PV') + return [funcname, rettype, func_modlist, args, original_type, docstring] + + def get_dotted_name(self, name): + if not self.block_stack: + return name + if name.startswith('cv.'): + return name + qualified_name = '.' in name or '::' in name + n = '' + for b in self.block_stack: + (block_type, block_name) = (b[self.BLOCK_TYPE], b[self.BLOCK_NAME]) + if block_type in ['file', 'enum']: + continue + if block_type in ['enum struct', 'enum class'] and block_name == name: + continue + if block_type not in ['struct', 'class', 'namespace', 'enum struct', 'enum class']: + print('Error at %d: there are non-valid entries in the current block stack %s' % (self.lineno, self.block_stack)) + sys.exit(-1) + if block_name and (block_type == 'namespace' or not qualified_name): + n += block_name + '.' + n += name.replace('::', '.') + if n.endswith('.Algorithm'): + n = 'cv.Algorithm' + return n + + def parse_stmt(self, stmt, end_token, mat='Mat', docstring=''): + stack_top = self.block_stack[-1] + context = stack_top[self.BLOCK_TYPE] + if stmt.startswith('inline namespace'): + return ('namespace', '', True, None) + stmt_type = '' + if end_token == '{': + stmt_type = 'block' + if context == 'block': + print('Error at %d: should not call parse_stmt inside blocks' % (self.lineno,)) + sys.exit(-1) + if context == 'class' or context == 'struct': + while 1: + colon_pos = stmt.find(':') + if colon_pos < 0: + break + w = stmt[:colon_pos].strip() + if w in ['public', 'protected', 'private']: + if w == 'public' or (not self.wrap_mode and w == 'protected'): + stack_top[self.PUBLIC_SECTION] = True + else: + stack_top[self.PUBLIC_SECTION] = False + stmt = stmt[colon_pos + 1:].strip() + break + if not stack_top[self.PUBLIC_SECTION] or stmt.startswith('template'): + return (stmt_type, '', False, None) + if end_token == '{': + if not self.wrap_mode and stmt.startswith('typedef struct'): + stmt_type = 'struct' + try: + (classname, bases, modlist) = self.parse_class_decl(stmt[len('typedef '):]) + except: + print('Error at %s:%d' % (self.hname, self.lineno)) + exit(1) + if classname.startswith('_Ipl'): + classname = classname[1:] + decl = [stmt_type + ' ' + self.get_dotted_name(classname), '', modlist, [], None, docstring] + if bases: + decl[1] = ': ' + ', '.join([self.get_dotted_name(b).replace('.', '::') for b in bases]) + return (stmt_type, classname, True, decl) + if stmt.startswith('class') or stmt.startswith('struct'): + stmt_type = stmt.split()[0] + if stmt.strip() != stmt_type: + try: + (classname, bases, modlist) = self.parse_class_decl(stmt) + except: + print('Error at %s:%d' % (self.hname, self.lineno)) + exit(1) + decl = [] + if 'CV_EXPORTS_W' in stmt or 'CV_EXPORTS_AS' in stmt or (not self.wrap_mode): + decl = [stmt_type + ' ' + self.get_dotted_name(classname), '', modlist, [], None, docstring] + if bases: + decl[1] = ': ' + ', '.join([self.get_dotted_name(b).replace('.', '::') for b in bases]) + return (stmt_type, classname, True, decl) + if stmt.startswith('enum') or stmt.startswith('namespace'): + stmt = stmt.split(':')[0] + stmt_list = stmt.rsplit(' ', 1) + if len(stmt_list) < 2: + stmt_list.append('') + return (stmt_list[0], stmt_list[1], True, None) + if stmt.startswith('extern') and '"C"' in stmt: + return ('namespace', '', True, None) + if end_token == '}' and context.startswith('enum'): + decl = self.parse_enum(stmt) + name = stack_top[self.BLOCK_NAME] + return (context, name, False, decl) + if end_token == ';' and stmt.startswith('typedef'): + return (stmt_type, '', False, None) + paren_pos = stmt.find('(') + if paren_pos >= 0: + decl = self.parse_func_decl(stmt, mat=mat, docstring=docstring) + return (stmt_type, '', False, decl) + if (context == 'struct' or context == 'class') and end_token == ';' and stmt: + class_decl = stack_top[self.CLASS_DECL] + if 'CV_PROP' in stmt: + var_modlist = [] + if 'CV_PROP_RW' in stmt: + var_modlist.append('/RW') + stmt = self.batch_replace(stmt, [('CV_PROP_RW', ''), ('CV_PROP', '')]).strip() + var_list = stmt.split(',') + (var_type, var_name1, modlist, argno) = self.parse_arg(var_list[0], -1) + var_list = [var_name1] + [i.strip() for i in var_list[1:]] + for v in var_list: + prop_definition = v.split('=') + prop_name = prop_definition[0].strip() + if len(prop_definition) == 1: + prop_default_value = '' + else: + prop_default_value = prop_definition[-1] + class_decl[3].append([var_type, prop_name, prop_default_value, var_modlist]) + return (stmt_type, '', False, None) + return (stmt_type, '', False, None) + + def find_next_token(self, s, tlist, p=0): + token = '' + tpos = len(s) + for t in tlist: + pos = s.find(t, p) + if pos < 0: + continue + if pos < tpos: + tpos = pos + token = t + return (token, tpos) + + def parse(self, hname, wmode=True): + self.hname = hname + decls = [] + f = io.open(hname, 'rt', encoding='utf-8') + linelist = list(f.readlines()) + f.close() + SCAN = 0 + COMMENT = 1 + DIRECTIVE = 2 + DOCSTRING = 3 + DIRECTIVE_IF_0 = 4 + state = SCAN + self.block_stack = [['file', hname, True, True, None]] + block_head = '' + docstring = '' + self.lineno = 0 + self.wrap_mode = wmode + depth_if_0 = 0 + for l0 in linelist: + self.lineno += 1 + l = l0.strip() + l = self.batch_replace(l, [('GAPI_EXPORTS', 'CV_EXPORTS'), ('GAPI_EXPORTS_W', 'CV_EXPORTS_W'), ('GAPI_EXPORTS_W_SIMPLE', 'CV_EXPORTS_W_SIMPLE'), ('GAPI_WRAP', 'CV_WRAP'), ('GAPI_PROP', 'CV_PROP'), ('GAPI_PROP_RW', 'CV_PROP_RW'), ('defined(GAPI_STANDALONE)', '0')]) + if state == SCAN and l.startswith('#'): + state = DIRECTIVE + if state == DIRECTIVE: + if l.endswith('\\'): + continue + state = SCAN + l = re.sub('//(.+)?', '', l).strip() + if l in ['#if 0', '#if defined(__OPENCV_BUILD)', '#ifdef __OPENCV_BUILD', '#if !defined(OPENCV_BINDING_PARSER)', '#ifndef OPENCV_BINDING_PARSER']: + state = DIRECTIVE_IF_0 + depth_if_0 = 1 + continue + if state == DIRECTIVE_IF_0: + if l.startswith('#'): + l = l[1:].strip() + if l.startswith('if'): + depth_if_0 += 1 + continue + if l.startswith('endif'): + depth_if_0 -= 1 + if depth_if_0 == 0: + state = SCAN + else: + pass + continue + if state == COMMENT: + pos = l.find('*/') + if pos < 0: + continue + l = l[pos + 2:] + state = SCAN + if state == DOCSTRING: + pos = l.find('*/') + if pos < 0: + docstring += l0 + continue + docstring += l[:pos] + '\n' + l = l[pos + 2:] + state = SCAN + if l.startswith('CV__') or l.startswith('__CV_'): + state = SCAN + continue + if state != SCAN: + print('Error at %d: invalid state = %d' % (self.lineno, state)) + sys.exit(-1) + while 1: + if re.search('=\\s*\\{\\s*\\}', l): + (token, pos) = (';', len(l)) + else: + (token, pos) = self.find_next_token(l, [';', '"', '{', '}', '//', '/*']) + if not token: + block_head += ' ' + l + block_head = block_head.strip() + if len(block_head) > 0 and block_head[-1] == ')' and block_head.startswith('CV_ENUM_FLAGS('): + l = '' + token = ';' + else: + break + if token == '//': + block_head += ' ' + l[:pos] + l = '' + continue + if token == '/*': + block_head += ' ' + l[:pos] + end_pos = l.find('*/', pos + 2) + if len(l) > pos + 2 and l[pos + 2] == '*': + if end_pos < 0: + state = DOCSTRING + docstring = l[pos + 3:] + '\n' + break + else: + docstring = l[pos + 3:end_pos] + elif end_pos < 0: + state = COMMENT + break + l = l[end_pos + 2:] + continue + if token == '"': + pos2 = pos + 1 + while 1: + (t2, pos2) = self.find_next_token(l, ['\\', '"'], pos2) + if t2 == '': + print('Error at %d: no terminating \'"\'' % (self.lineno,)) + sys.exit(-1) + if t2 == '"': + break + pos2 += 2 + block_head += ' ' + l[:pos2 + 1] + l = l[pos2 + 1:] + continue + stmt = (block_head + ' ' + l[:pos]).strip() + stmt = ' '.join(stmt.split()) + stack_top = self.block_stack[-1] + if stmt.startswith('@'): + break + decl = None + if stack_top[self.PROCESS_FLAG]: + docstring = docstring.strip() + (stmt_type, name, parse_flag, decl) = self.parse_stmt(stmt, token, docstring=docstring) + if decl: + if stmt_type.startswith('enum'): + decls.append([stmt_type + ' ' + self.get_dotted_name(name), '', [], decl, None, '']) + else: + decls.append(decl) + if self._generate_gpumat_decls and ('cv.cuda' in decl[0] or decl[0] in ['cv.imshow']): + args = decl[3] + has_mat = len(list(filter(lambda x: x[0] in {'Mat', 'vector_Mat'}, args))) > 0 + if has_mat: + (_, _, _, gpumat_decl) = self.parse_stmt(stmt, token, mat='cuda::GpuMat', docstring=docstring) + if gpumat_decl != decl: + decls.append(gpumat_decl) + if self._generate_umat_decls: + args = decl[3] + has_mat = len(list(filter(lambda x: x[0] in {'Mat', 'vector_Mat'}, args))) > 0 + if has_mat: + (_, _, _, umat_decl) = self.parse_stmt(stmt, token, mat='UMat', docstring=docstring) + if umat_decl != decl: + decls.append(umat_decl) + docstring = '' + if stmt_type == 'namespace': + chunks = [block[1] for block in self.block_stack if block[0] == 'namespace'] + [name] + self.namespaces.add('.'.join(filter(lambda c: len(c) > 0, chunks))) + else: + (stmt_type, name, parse_flag) = ('block', '', False) + if token == '{': + if stmt_type == 'class': + public_section = False + else: + public_section = True + self.block_stack.append([stmt_type, name, parse_flag, public_section, decl]) + if token == '}': + if not self.block_stack: + print('Error at %d: the block stack is empty' % (self.lineno,)) + self.block_stack[-1:] = [] + if pos + 1 < len(l) and l[pos + 1] == ';': + pos += 1 + block_head = '' + l = l[pos + 1:] + return decls + + def print_decls(self, decls): + for d in decls: + print(d[0], d[1], ';'.join(d[2])) + for a in d[3]: + print(' ', a[0], a[1], a[2], end='') + if a[3]: + print('; '.join(a[3])) + else: + print() +if __name__ == '__main__': + parser = CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True) + decls = [] + for hname in opencv_hdr_list: + decls += parser.parse(hname) + parser.print_decls(decls) + print(len(decls)) + print('namespaces:', ' '.join(sorted(parser.namespaces))) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/__init__.py +from .nodes import NamespaceNode, ClassNode, ClassProperty, EnumerationNode, FunctionNode, ConstantNode, TypeNode, OptionalTypeNode, TupleTypeNode, AliasTypeNode, SequenceTypeNode, AnyTypeNode, AggregatedTypeNode +from .types_conversion import replace_template_parameters_with_placeholders, get_template_instantiation_type, create_type_node +from .ast_utils import SymbolName, ScopeNotFoundError, SymbolNotFoundError, find_scope, find_class_node, create_class_node, create_function_node, resolve_enum_scopes +from .generation import generate_typing_stubs + +# File: opencv-master/modules/python/src2/typing_stubs_generation/api_refinement.py +__all__ = ['apply_manual_api_refinement'] +from typing import cast, Sequence, Callable, Iterable +from .nodes import NamespaceNode, FunctionNode, OptionalTypeNode, TypeNode, ClassProperty, PrimitiveTypeNode, ASTNodeTypeNode, AggregatedTypeNode, CallableTypeNode, AnyTypeNode, TupleTypeNode, UnionTypeNode, ProtocolClassNode, DictTypeNode, ClassTypeNode +from .ast_utils import find_function_node, SymbolName, for_each_function_overload +from .types_conversion import create_type_node + +def apply_manual_api_refinement(root: NamespaceNode) -> None: + refine_highgui_module(root) + refine_cuda_module(root) + export_matrix_type_constants(root) + refine_dnn_module(root) + builtin_exception = root.add_class('Exception') + builtin_exception.is_exported = False + root.add_class('error', (builtin_exception,), ERROR_CLASS_PROPERTIES) + for (symbol_name, refine_symbol) in NODES_TO_REFINE.items(): + refine_symbol(root, symbol_name) + version_constant = root.add_constant('__version__', '') + version_constant._value_type = 'str' + '' + root.add_function('redirectError', [FunctionNode.Arg('onError', OptionalTypeNode(CallableTypeNode('ErrorCallback', [PrimitiveTypeNode.int_(), PrimitiveTypeNode.str_(), PrimitiveTypeNode.str_(), PrimitiveTypeNode.str_(), PrimitiveTypeNode.int_()])))]) + +def export_matrix_type_constants(root: NamespaceNode) -> None: + MAX_PREDEFINED_CHANNELS = 4 + depth_names = ('CV_8U', 'CV_8S', 'CV_16U', 'CV_16S', 'CV_32S', 'CV_32F', 'CV_64F', 'CV_16F') + for (depth_value, depth_name) in enumerate(depth_names): + root.add_constant(depth_name, str(depth_value)) + for c in range(MAX_PREDEFINED_CHANNELS): + root.add_constant(f'{depth_name}C{c + 1}', f'{depth_value + 8 * c}') + root.add_function(f'{depth_name}C', (FunctionNode.Arg('channels', PrimitiveTypeNode.int_()),), FunctionNode.RetType(PrimitiveTypeNode.int_())) + root.add_function('CV_MAKETYPE', (FunctionNode.Arg('depth', PrimitiveTypeNode.int_()), FunctionNode.Arg('channels', PrimitiveTypeNode.int_())), FunctionNode.RetType(PrimitiveTypeNode.int_())) + +def make_optional_arg(arg_name: str) -> Callable[[NamespaceNode, SymbolName], None]: + + def _make_optional_arg(root_node: NamespaceNode, function_symbol_name: SymbolName) -> None: + function = find_function_node(root_node, function_symbol_name) + for overload in function.overloads: + arg_idx = _find_argument_index(overload.arguments, arg_name) + if isinstance(overload.arguments[arg_idx].type_node, OptionalTypeNode): + continue + overload.arguments[arg_idx].type_node = OptionalTypeNode(cast(TypeNode, overload.arguments[arg_idx].type_node)) + return _make_optional_arg + +def refine_cuda_module(root: NamespaceNode) -> None: + + def fix_cudaoptflow_enums_names() -> None: + for class_name in ('NvidiaOpticalFlow_1_0', 'NvidiaOpticalFlow_2_0'): + if class_name not in cuda_root.classes: + continue + opt_flow_class = cuda_root.classes[class_name] + _trim_class_name_from_argument_types(for_each_function_overload(opt_flow_class), class_name) + + def fix_namespace_usage_scope(cuda_ns: NamespaceNode) -> None: + USED_TYPES = ('GpuMat', 'Stream') + + def fix_type_usage(type_node: TypeNode) -> None: + if isinstance(type_node, AggregatedTypeNode): + for item in type_node.items: + fix_type_usage(item) + if isinstance(type_node, ASTNodeTypeNode): + if type_node._typename in USED_TYPES: + type_node._typename = f'cuda_{type_node._typename}' + for overload in for_each_function_overload(cuda_ns): + if overload.return_type is not None: + fix_type_usage(overload.return_type.type_node) + for type_node in [arg.type_node for arg in overload.arguments if arg.type_node is not None]: + fix_type_usage(type_node) + if 'cuda' not in root.namespaces: + return + cuda_root = root.namespaces['cuda'] + fix_cudaoptflow_enums_names() + for ns in [ns for (ns_name, ns) in root.namespaces.items() if ns_name.startswith('cuda')]: + fix_namespace_usage_scope(ns) + +def refine_highgui_module(root: NamespaceNode) -> None: + if 'destroyAllWindows' not in root.functions: + return + '' + root.add_function('createTrackbar', [FunctionNode.Arg('trackbarName', PrimitiveTypeNode.str_()), FunctionNode.Arg('windowName', PrimitiveTypeNode.str_()), FunctionNode.Arg('value', PrimitiveTypeNode.int_()), FunctionNode.Arg('count', PrimitiveTypeNode.int_()), FunctionNode.Arg('onChange', CallableTypeNode('TrackbarCallback', PrimitiveTypeNode.int_('int')))]) + '' + root.add_function('createButton', [FunctionNode.Arg('buttonName', PrimitiveTypeNode.str_()), FunctionNode.Arg('onChange', CallableTypeNode('ButtonCallback', UnionTypeNode('onButtonChangeCallbackData', [TupleTypeNode('onButtonChangeCallbackData', [PrimitiveTypeNode.int_()]), TupleTypeNode('onButtonChangeCallbackData', [PrimitiveTypeNode.int_(), AnyTypeNode('void*')])]))), FunctionNode.Arg('userData', OptionalTypeNode(AnyTypeNode('void*')), default_value='None'), FunctionNode.Arg('buttonType', PrimitiveTypeNode.int_(), default_value='0'), FunctionNode.Arg('initialButtonState', PrimitiveTypeNode.int_(), default_value='0')]) + '' + root.add_function('setMouseCallback', [FunctionNode.Arg('windowName', PrimitiveTypeNode.str_()), FunctionNode.Arg('onMouse', CallableTypeNode('MouseCallback', [PrimitiveTypeNode.int_(), PrimitiveTypeNode.int_(), PrimitiveTypeNode.int_(), PrimitiveTypeNode.int_(), OptionalTypeNode(AnyTypeNode('void*'))])), FunctionNode.Arg('param', OptionalTypeNode(AnyTypeNode('void*')), default_value='None')]) + +def refine_dnn_module(root: NamespaceNode) -> None: + if 'dnn' not in root.namespaces: + return + dnn_module = root.namespaces['dnn'] + '' + layer_proto = ProtocolClassNode('LayerProtocol', dnn_module) + layer_proto.add_function('__init__', arguments=[FunctionNode.Arg('params', DictTypeNode('LayerParams', PrimitiveTypeNode.str_(), create_type_node('cv::dnn::DictValue'))), FunctionNode.Arg('blobs', create_type_node('vector'))]) + layer_proto.add_function('getMemoryShapes', arguments=[FunctionNode.Arg('inputs', create_type_node('vector>'))], return_type=FunctionNode.RetType(create_type_node('vector>'))) + layer_proto.add_function('forward', arguments=[FunctionNode.Arg('inputs', create_type_node('vector'))], return_type=FunctionNode.RetType(create_type_node('vector'))) + '' + root.add_function('dnn_registerLayer', arguments=[FunctionNode.Arg('layerTypeName', PrimitiveTypeNode.str_()), FunctionNode.Arg('layerClass', ClassTypeNode(ASTNodeTypeNode(layer_proto.export_name, f'dnn.{layer_proto.export_name}')))]) + '' + root.add_function('dnn_unregisterLayer', arguments=[FunctionNode.Arg('layerTypeName', PrimitiveTypeNode.str_())]) + +def _trim_class_name_from_argument_types(overloads: Iterable[FunctionNode.Overload], class_name: str) -> None: + separator = f'{class_name}_' + for overload in overloads: + for arg in [arg for arg in overload.arguments if arg.type_node is not None]: + ast_node = cast(ASTNodeTypeNode, arg.type_node) + if class_name in ast_node.ctype_name: + fixed_name = ast_node._typename.split(separator)[-1] + ast_node._typename = fixed_name + +def _find_argument_index(arguments: Sequence[FunctionNode.Arg], name: str) -> int: + for (i, arg) in enumerate(arguments): + if arg.name == name: + return i + raise RuntimeError(f"Failed to find argument with name: '{name}' in {arguments}") +NODES_TO_REFINE = {SymbolName(('cv',), (), 'resize'): make_optional_arg('dsize'), SymbolName(('cv',), (), 'calcHist'): make_optional_arg('mask'), SymbolName(('cv',), (), 'floodFill'): make_optional_arg('mask')} +ERROR_CLASS_PROPERTIES = (ClassProperty('code', PrimitiveTypeNode.int_(), False), ClassProperty('err', PrimitiveTypeNode.str_(), False), ClassProperty('file', PrimitiveTypeNode.str_(), False), ClassProperty('func', PrimitiveTypeNode.str_(), False), ClassProperty('line', PrimitiveTypeNode.int_(), False), ClassProperty('msg', PrimitiveTypeNode.str_(), False)) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/ast_utils.py +from typing import NamedTuple, Sequence, Tuple, Union, List, Dict, Callable, Optional, Generator, cast +import keyword +from .nodes import ASTNode, NamespaceNode, ClassNode, FunctionNode, EnumerationNode, ClassProperty, OptionalTypeNode, TupleTypeNode +from .types_conversion import create_type_node + +class ScopeNotFoundError(Exception): + pass + +class SymbolNotFoundError(Exception): + pass + +class SymbolName(NamedTuple): + namespaces: Tuple[str, ...] + classes: Tuple[str, ...] + name: str + + def __str__(self) -> str: + return '(namespace="{}", classes="{}", name="{}")'.format('::'.join(self.namespaces), '::'.join(self.classes), self.name) + + def __repr__(self) -> str: + return str(self) + + @classmethod + def parse(cls, full_symbol_name: str, known_namespaces: Sequence[str], symbol_parts_delimiter: str='.') -> 'SymbolName': + chunks = full_symbol_name.split(symbol_parts_delimiter) + (namespaces, name) = (chunks[:-1], chunks[-1]) + classes: List[str] = [] + while len(namespaces) > 0 and '.'.join(namespaces) not in known_namespaces: + classes.insert(0, namespaces.pop()) + return SymbolName(tuple(namespaces), tuple(classes), name) + +def find_scope(root: NamespaceNode, symbol_name: SymbolName, create_missing_namespaces: bool=True) -> Union[NamespaceNode, ClassNode]: + assert isinstance(root, NamespaceNode), 'Wrong hierarchy root type: {}'.format(type(root)) + assert symbol_name.namespaces[0] == root.name, "Trying to find scope for '{}' with root namespace different from: '{}'".format(symbol_name, root.name) + scope: Union[NamespaceNode, ClassNode] = root + for namespace in symbol_name.namespaces[1:]: + if namespace not in scope.namespaces: + if not create_missing_namespaces: + raise ScopeNotFoundError("Can't find a scope for '{}', with '{}', because namespace '{}' is not created yet and `create_missing_namespaces` flag is set to False".format(symbol_name.name, symbol_name, namespace)) + scope = scope.add_namespace(namespace) + else: + scope = scope.namespaces[namespace] + for class_name in symbol_name.classes: + if class_name not in scope.classes: + raise ScopeNotFoundError("Can't find a scope for '{}', with '{}', because '{}' class is not registered yet".format(symbol_name.name, symbol_name, class_name)) + scope = scope.classes[class_name] + return scope + +def find_class_node(root: NamespaceNode, class_symbol: SymbolName, create_missing_namespaces: bool=False) -> ClassNode: + scope = find_scope(root, class_symbol, create_missing_namespaces) + if class_symbol.name not in scope.classes: + raise SymbolNotFoundError("Can't find {} in its scope".format(class_symbol)) + return scope.classes[class_symbol.name] + +def find_function_node(root: NamespaceNode, function_symbol: SymbolName, create_missing_namespaces: bool=False) -> FunctionNode: + scope = find_scope(root, function_symbol, create_missing_namespaces) + if function_symbol.name not in scope.functions: + raise SymbolNotFoundError("Can't find {} in its scope".format(function_symbol)) + return scope.functions[function_symbol.name] + +def create_function_node_in_scope(scope: Union[NamespaceNode, ClassNode], func_info) -> FunctionNode: + + def prepare_overload_arguments_and_return_type(variant): + arguments = [] + for (i, (_, argno)) in enumerate(variant.py_arglist): + arg_info = variant.args[argno] + type_node = create_type_node(arg_info.tp) + default_value = None + if len(arg_info.defval): + default_value = arg_info.defval + if variant.is_arg_optional(i): + if arg_info.py_outputarg: + type_node = OptionalTypeNode(type_node) + default_value = 'None' + elif arg_info.isbig() and 'None' not in type_node.typename: + type_node = OptionalTypeNode(type_node) + arguments.append(FunctionNode.Arg(arg_info.export_name, type_node=type_node, default_value=default_value)) + if func_info.isconstructor: + return (arguments, None) + if len(variant.py_outlist) > 1: + ret_types = [] + if variant.py_outlist[0][1] == -1: + ret_types.append(create_type_node(variant.rettype)) + outlist = variant.py_outlist[1:] + else: + outlist = variant.py_outlist + for (_, argno) in outlist: + assert argno >= 0, f'Logic Error! Outlist contains function return type: {outlist}' + ret_types.append(create_type_node(variant.args[argno].tp)) + return (arguments, FunctionNode.RetType(TupleTypeNode('return_type', ret_types))) + if len(variant.py_outlist) == 1: + if variant.rettype: + return (arguments, FunctionNode.RetType(create_type_node(variant.rettype))) + ret_type = variant.args[variant.py_outlist[0][1]].tp + return (arguments, FunctionNode.RetType(create_type_node(ret_type))) + return (arguments, None) + function_node = FunctionNode(func_info.name) + function_node.parent = scope + if func_info.isconstructor: + function_node.export_name = '__init__' + for variant in func_info.variants: + (arguments, ret_type) = prepare_overload_arguments_and_return_type(variant) + if isinstance(scope, ClassNode): + if func_info.is_static: + if ret_type is not None and ret_type.typename.endswith(scope.name): + function_node.is_classmethod = True + arguments.insert(0, FunctionNode.Arg('cls')) + else: + function_node.is_static = True + else: + arguments.insert(0, FunctionNode.Arg('self')) + function_node.add_overload(arguments, ret_type) + return function_node + +def create_function_node(root: NamespaceNode, func_info) -> FunctionNode: + func_symbol_name = SymbolName(func_info.namespace.split('.') if len(func_info.namespace) else (), func_info.classname.split('.') if len(func_info.classname) else (), func_info.name) + return create_function_node_in_scope(find_scope(root, func_symbol_name), func_info) + +def create_class_node_in_scope(scope: Union[NamespaceNode, ClassNode], symbol_name: SymbolName, class_info) -> ClassNode: + properties = [] + for property in class_info.props: + export_property_name = property.name + if keyword.iskeyword(export_property_name): + export_property_name += '_' + properties.append(ClassProperty(name=export_property_name, type_node=create_type_node(property.tp), is_readonly=property.readonly)) + class_node = scope.add_class(symbol_name.name, properties=properties) + class_node.export_name = class_info.export_name + if class_info.constructor is not None: + create_function_node_in_scope(class_node, class_info.constructor) + for method in class_info.methods.values(): + create_function_node_in_scope(class_node, method) + return class_node + +def create_class_node(root: NamespaceNode, class_info, namespaces: Sequence[str]) -> ClassNode: + symbol_name = SymbolName.parse(class_info.full_original_name, namespaces) + scope = find_scope(root, symbol_name) + return create_class_node_in_scope(scope, symbol_name, class_info) + +def resolve_enum_scopes(root: NamespaceNode, enums: Dict[SymbolName, EnumerationNode]): + for (symbol_name, enum_node) in enums.items(): + if symbol_name.classes: + try: + scope = find_scope(root, symbol_name) + except ScopeNotFoundError: + for (i, class_name) in enumerate(symbol_name.classes): + scope = find_scope(root, SymbolName(symbol_name.namespaces, classes=symbol_name.classes[:i], name=class_name)) + if class_name in scope.classes: + continue + class_node = scope.add_class(class_name) + class_node.is_exported = False + scope = find_scope(root, symbol_name) + else: + scope = find_scope(root, symbol_name) + enum_node.parent = scope + +def get_enclosing_namespace(node: ASTNode, class_node_callback: Optional[Callable[[ClassNode], None]]=None) -> NamespaceNode: + parent_node = node.parent + while not isinstance(parent_node, NamespaceNode): + assert parent_node is not None, "Can't find enclosing namespace for '{}' known as: '{}'".format(node.full_export_name, node.native_name) + if class_node_callback: + class_node_callback(cast(ClassNode, parent_node)) + parent_node = parent_node.parent + return parent_node + +def get_enum_module_and_export_name(enum_node: EnumerationNode) -> Tuple[str, str]: + enum_export_name = enum_node.export_name + + def update_full_export_name(class_node: ClassNode) -> None: + nonlocal enum_export_name + enum_export_name = class_node.export_name + '_' + enum_export_name + namespace_node = get_enclosing_namespace(enum_node, update_full_export_name) + return (enum_export_name, namespace_node.full_export_name) + +def for_each_class(node: Union[NamespaceNode, ClassNode]) -> Generator[ClassNode, None, None]: + for cls in node.classes.values(): + yield cls + if len(cls.classes): + yield from for_each_class(cls) + +def for_each_function(node: Union[NamespaceNode, ClassNode], traverse_class_nodes: bool=True) -> Generator[FunctionNode, None, None]: + yield from node.functions.values() + if traverse_class_nodes: + for cls in for_each_class(node): + yield from for_each_function(cls) + +def for_each_function_overload(node: Union[NamespaceNode, ClassNode], traverse_class_nodes: bool=True) -> Generator[FunctionNode.Overload, None, None]: + for func in for_each_function(node, traverse_class_nodes): + yield from func.overloads +if __name__ == '__main__': + import doctest + doctest.testmod() + +# File: opencv-master/modules/python/src2/typing_stubs_generation/generation.py +__all__ = ('generate_typing_stubs',) +from io import StringIO +from pathlib import Path +import re +from typing import Callable, NamedTuple, Union, Set, Dict, Collection, Tuple, List +import warnings +from .ast_utils import get_enclosing_namespace, get_enum_module_and_export_name, for_each_function_overload, for_each_class +from .predefined_types import PREDEFINED_TYPES +from .api_refinement import apply_manual_api_refinement +from .nodes import ASTNode, ASTNodeType, NamespaceNode, ClassNode, FunctionNode, EnumerationNode, ConstantNode, ProtocolClassNode +from .nodes.type_node import TypeNode, AliasTypeNode, AliasRefTypeNode, AggregatedTypeNode, ASTNodeTypeNode, ConditionalAliasTypeNode, PrimitiveTypeNode + +def generate_typing_stubs(root: NamespaceNode, output_path: Path): + apply_manual_api_refinement(root) + root.resolve_type_nodes() + _generate_typing_module(root, output_path) + _populate_reexported_symbols(root) + _generate_typing_stubs(root, output_path) + +def _generate_typing_stubs(root: NamespaceNode, output_path: Path) -> None: + output_path = Path(output_path) / root.export_name + output_path.mkdir(parents=True, exist_ok=True) + required_imports = _collect_required_imports(root) + output_stream = StringIO() + output_stream.write('__all__: list[str] = []\n\n') + _write_required_imports(required_imports, output_stream) + _write_reexported_symbols_section(root, output_stream) + has_enums = _generate_section_stub(StubSection('# Enumerations', ASTNodeType.Enumeration), root, output_stream, 0) + for class_node in root.classes.values(): + if _generate_enums_from_classes_tree(class_node, output_stream, indent=0): + has_enums = True + if has_enums: + output_stream.write('\n') + for section in STUB_SECTIONS: + _generate_section_stub(section, root, output_stream, 0) + (output_path / '__init__.pyi').write_text(output_stream.getvalue()) + for ns in root.namespaces.values(): + _generate_typing_stubs(ns, output_path) + +class StubSection(NamedTuple): + name: str + node_type: ASTNodeType +STUB_SECTIONS = (StubSection('# Constants', ASTNodeType.Constant), StubSection('# Classes', ASTNodeType.Class), StubSection('# Functions', ASTNodeType.Function)) + +def _generate_section_stub(section: StubSection, node: ASTNode, output_stream: StringIO, indent: int) -> bool: + if section.node_type not in node._children: + return False + children = node._children[section.node_type] + if len(children) == 0: + return False + output_stream.write(' ' * indent) + output_stream.write(section.name) + output_stream.write('\n') + stub_generator = NODE_TYPE_TO_STUB_GENERATOR[section.node_type] + children = filter(lambda c: c.is_exported, children.values()) + if hasattr(section.node_type, 'weight'): + children = sorted(children, key=lambda child: getattr(child, 'weight')) + for child in children: + stub_generator(child, output_stream, indent) + output_stream.write('\n') + return True + +def _generate_class_stub(class_node: ClassNode, output_stream: StringIO, indent: int=0) -> None: + class_module = get_enclosing_namespace(class_node) + class_module_name = class_module.full_export_name + if len(class_node.bases) > 0: + bases = [] + for base in class_node.bases: + base_module = get_enclosing_namespace(base) + if base_module != class_module: + bases.append(base.full_export_name) + else: + bases.append(base.export_name) + inheritance_str = f"({', '.join(bases)})" + elif isinstance(class_node, ProtocolClassNode): + inheritance_str = '(Protocol)' + else: + inheritance_str = '' + output_stream.write('{indent}class {name}{bases}:\n'.format(indent=' ' * indent, name=class_node.export_name, bases=inheritance_str)) + has_content = len(class_node.properties) > 0 + for property in class_node.properties: + if property.is_readonly: + template = '{indent}@property\n{indent}def {name}(self) -> {type}: ...\n' + else: + template = '{indent}{name}: {type}\n' + output_stream.write(template.format(indent=' ' * (indent + 4), name=property.name, type=property.relative_typename(class_module_name))) + if len(class_node.properties) > 0: + output_stream.write('\n') + for section in STUB_SECTIONS: + if _generate_section_stub(section, class_node, output_stream, indent + 4): + has_content = True + if not has_content: + output_stream.write(' ' * (indent + 4)) + output_stream.write('...\n\n') + +def _generate_constant_stub(constant_node: ConstantNode, output_stream: StringIO, indent: int=0, extra_export_prefix: str='', generate_uppercase_version: bool=True) -> Tuple[str, ...]: + + def write_constant_to_stream(export_name: str) -> None: + output_stream.write('{indent}{name}: {value_type}\n'.format(name=export_name, value_type=constant_node.value_type, indent=' ' * indent)) + export_name = extra_export_prefix + constant_node.export_name + write_constant_to_stream(export_name) + if generate_uppercase_version: + if re.match('^__.*__$', export_name) is not None: + return (export_name,) + uppercase_name = re.sub('([a-z])([A-Z])', '\\1_\\2', export_name).upper() + if export_name != uppercase_name: + write_constant_to_stream(uppercase_name) + return (export_name, uppercase_name) + return (export_name,) + +def _generate_enumeration_stub(enumeration_node: EnumerationNode, output_stream: StringIO, indent: int=0, extra_export_prefix: str='') -> None: + entries_extra_prefix = extra_export_prefix + if enumeration_node.is_scoped: + entries_extra_prefix += enumeration_node.export_name + '_' + generated_constants_entries: List[str] = [] + for entry in enumeration_node.constants.values(): + generated_constants_entries.extend(_generate_constant_stub(entry, output_stream, indent, entries_extra_prefix)) + if enumeration_node.export_name.endswith(''): + output_stream.write('\n') + return + output_stream.write('{indent}{export_prefix}{name} = int\n{indent}"""One of [{entries}]"""\n\n'.format(export_prefix=extra_export_prefix, name=enumeration_node.export_name, entries=', '.join(generated_constants_entries), indent=' ' * indent)) + +def _generate_function_stub(function_node: FunctionNode, output_stream: StringIO, indent: int=0) -> None: + if not function_node.overloads: + warnings.warn('Function node "{}" exported as "{}" has no overloads'.format(function_node.full_name, function_node.full_export_name)) + return + decorators = [] + if function_node.is_classmethod: + decorators.append(' ' * indent + '@classmethod') + elif function_node.is_static: + decorators.append(' ' * indent + '@staticmethod') + if len(function_node.overloads) > 1: + decorators.append(' ' * indent + '@_typing.overload') + function_module = get_enclosing_namespace(function_node) + function_module_name = function_module.full_export_name + for overload in function_node.overloads: + annotated_args = [] + for arg in overload.arguments: + annotated_arg = arg.name + typename = arg.relative_typename(function_module_name) + if typename is not None: + annotated_arg += ': ' + typename + if arg.default_value is not None: + annotated_arg += ' = ...' + annotated_args.append(annotated_arg) + if overload.return_type is not None: + ret_type = overload.return_type.relative_typename(function_module_name) + else: + ret_type = 'None' + output_stream.write('{decorators}{indent}def {name}({args}) -> {ret_type}: ...\n'.format(decorators='\n'.join(decorators) + '\n' if len(decorators) > 0 else '', name=function_node.export_name, args=', '.join(annotated_args), ret_type=ret_type, indent=' ' * indent)) + output_stream.write('\n') + +def _generate_enums_from_classes_tree(class_node: ClassNode, output_stream: StringIO, indent: int=0, class_name_prefix: str='') -> bool: + class_name_prefix = class_node.export_name + '_' + class_name_prefix + has_content = len(class_node.enumerations) > 0 + for enum_node in class_node.enumerations.values(): + _generate_enumeration_stub(enum_node, output_stream, indent, class_name_prefix) + for cls in class_node.classes.values(): + if _generate_enums_from_classes_tree(cls, output_stream, indent, class_name_prefix): + has_content = True + return has_content + +def check_overload_presence(node: Union[NamespaceNode, ClassNode]) -> bool: + for func_node in node.functions.values(): + if len(func_node.overloads) > 1: + return True + return False + +def _collect_required_imports(root: NamespaceNode) -> Collection[str]: + + def _add_required_usage_imports(type_node: TypeNode, imports: Set[str]): + for required_import in type_node.required_usage_imports: + imports.add(required_import) + required_imports: Set[str] = set() + has_overload = check_overload_presence(root) + has_protocol = False + for cls in for_each_class(root): + if not has_overload and check_overload_presence(cls): + has_overload = True + required_imports.add('import typing as _typing') + for prop in cls.properties: + _add_required_usage_imports(prop.type_node, required_imports) + for base in cls.bases: + base_namespace = get_enclosing_namespace(base) + if base_namespace != root: + required_imports.add('import ' + base_namespace.full_export_name) + if isinstance(cls, ProtocolClassNode): + has_protocol = True + if has_overload: + required_imports.add('import typing as _typing') + for overload in for_each_function_overload(root): + for arg in filter(lambda a: a.type_node is not None, overload.arguments): + _add_required_usage_imports(arg.type_node, required_imports) + if overload.return_type is not None: + _add_required_usage_imports(overload.return_type.type_node, required_imports) + root_import = 'import ' + root.full_export_name + if root_import in required_imports: + required_imports.remove(root_import) + if has_protocol: + required_imports.add('import sys') + ordered_required_imports = sorted(required_imports) + if has_protocol: + ordered_required_imports.append('if sys.version_info >= (3, 8):\n from typing import Protocol\nelse:\n from typing_extensions import Protocol') + return ordered_required_imports + +def _populate_reexported_symbols(root: NamespaceNode) -> None: + + def _reexport_submodule(ns: NamespaceNode) -> None: + for submodule in ns.namespaces.values(): + ns.reexported_submodules.append(submodule.export_name) + _reexport_submodule(submodule) + _reexport_submodule(root) + root.reexported_submodules.append('typing') + root.reexported_submodules_symbols['mat_wrapper'].append('Mat') + +def _write_reexported_symbols_section(module: NamespaceNode, output_stream: StringIO) -> None: + parent_name = module.full_export_name + for submodule in sorted(module.reexported_submodules): + output_stream.write('from {0} import {1} as {1}\n'.format(parent_name, submodule)) + for (submodule, symbols) in sorted(module.reexported_submodules_symbols.items(), key=lambda kv: kv[0]): + for symbol in symbols: + output_stream.write('from {0}.{1} import {2} as {2}\n'.format(parent_name, submodule, symbol)) + if len(module.reexported_submodules) or len(module.reexported_submodules_symbols): + output_stream.write('\n\n') + +def _write_required_imports(required_imports: Collection[str], output_stream: StringIO) -> None: + for required_import in required_imports: + output_stream.write(required_import) + output_stream.write('\n') + if len(required_imports): + output_stream.write('\n\n') + +def _generate_typing_module(root: NamespaceNode, output_path: Path) -> None: + + def register_alias_links_from_aggregated_type(type_node: TypeNode) -> None: + assert isinstance(type_node, AggregatedTypeNode), f"Provided type node '{type_node.ctype_name}' is not an aggregated type" + for item in filter(lambda i: isinstance(i, AliasRefTypeNode), type_node): + type_node = PREDEFINED_TYPES[item.ctype_name] + if isinstance(type_node, AliasTypeNode): + register_alias(type_node) + elif isinstance(type_node, ConditionalAliasTypeNode): + conditional_type_nodes[type_node.ctype_name] = type_node + + def create_alias_for_enum_node(enum_node_alias: AliasTypeNode) -> ConditionalAliasTypeNode: + enum_node = enum_node_alias.ast_node + assert enum_node.node_type == ASTNodeType.Enumeration, f'{enum_node} has wrong node type. Expected type: Enumeration.' + (enum_export_name, enum_module_name) = get_enum_module_and_export_name(enum_node) + return ConditionalAliasTypeNode(enum_export_name, '_typing.TYPE_CHECKING', positive_branch_type=enum_node_alias, negative_branch_type=PrimitiveTypeNode.int_(enum_export_name), condition_required_imports=('import typing as _typing',)) + + def register_alias(alias_node: AliasTypeNode) -> None: + typename = alias_node.typename + if typename in aliases: + return + for required_import in alias_node.required_definition_imports: + required_imports.add(required_import) + if isinstance(alias_node.value, AggregatedTypeNode): + register_alias_links_from_aggregated_type(alias_node.value) + for (i, item) in enumerate(alias_node.value.items): + if not isinstance(item, ASTNodeTypeNode) or item.ast_node is None: + continue + if item.ast_node.node_type != ASTNodeType.Enumeration: + continue + enum_node = create_alias_for_enum_node(item) + alias_node.value.items[i] = enum_node + conditional_type_nodes[enum_node.ctype_name] = enum_node + if isinstance(alias_node.value, ASTNodeTypeNode) and alias_node.value.ast_node == ASTNodeType.Enumeration: + enum_node = create_alias_for_enum_node(alias_node.ast_node) + conditional_type_nodes[enum_node.ctype_name] = enum_node + return + aliases[typename] = alias_node.value.full_typename.replace(root.export_name + '.typing.', '') + if alias_node.doc is not None: + aliases[typename] += f'\n"""{alias_node.doc}"""' + output_path = Path(output_path) / root.export_name / 'typing' + output_path.mkdir(parents=True, exist_ok=True) + required_imports: Set[str] = set() + aliases: Dict[str, str] = {} + conditional_type_nodes: Dict[str, ConditionalAliasTypeNode] = {} + TypeNode.compatible_to_runtime_usage = True + for node in PREDEFINED_TYPES.values(): + node.resolve(root) + if isinstance(node, AliasTypeNode): + register_alias(node) + elif isinstance(node, ConditionalAliasTypeNode): + conditional_type_nodes[node.ctype_name] = node + for node in conditional_type_nodes.values(): + for required_import in node.required_definition_imports: + required_imports.add(required_import) + output_stream = StringIO() + output_stream.write('__all__ = [\n') + for alias_name in aliases: + output_stream.write(f' "{alias_name}",\n') + output_stream.write(']\n\n') + _write_required_imports(required_imports, output_stream) + for (_, type_node) in conditional_type_nodes.items(): + output_stream.write(f'if {type_node.condition}:\n ') + output_stream.write(f'{type_node.typename} = {type_node.positive_branch_type.full_typename}\nelse:\n') + output_stream.write(f' {type_node.typename} = {type_node.negative_branch_type.full_typename}\n\n\n') + for (alias_name, alias_type) in aliases.items(): + output_stream.write(f'{alias_name} = {alias_type}\n') + TypeNode.compatible_to_runtime_usage = False + (output_path / '__init__.py').write_text(output_stream.getvalue()) +StubGenerator = Callable[[ASTNode, StringIO, int], None] +NODE_TYPE_TO_STUB_GENERATOR = {ASTNodeType.Class: _generate_class_stub, ASTNodeType.Constant: _generate_constant_stub, ASTNodeType.Enumeration: _generate_enumeration_stub, ASTNodeType.Function: _generate_function_stub} + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/__init__.py +from .node import ASTNode, ASTNodeType +from .namespace_node import NamespaceNode +from .class_node import ClassNode, ClassProperty, ProtocolClassNode +from .function_node import FunctionNode +from .enumeration_node import EnumerationNode +from .constant_node import ConstantNode +from .type_node import TypeNode, OptionalTypeNode, UnionTypeNode, NoneTypeNode, TupleTypeNode, ASTNodeTypeNode, AliasTypeNode, SequenceTypeNode, AnyTypeNode, AggregatedTypeNode, NDArrayTypeNode, AliasRefTypeNode, PrimitiveTypeNode, CallableTypeNode, DictTypeNode, ClassTypeNode + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/class_node.py +from typing import Type, Sequence, NamedTuple, Optional, Tuple, Dict +import itertools +import weakref +from .node import ASTNode, ASTNodeType +from .function_node import FunctionNode +from .enumeration_node import EnumerationNode +from .constant_node import ConstantNode +from .type_node import TypeNode, TypeResolutionError + +class ClassProperty(NamedTuple): + name: str + type_node: TypeNode + is_readonly: bool + + @property + def typename(self) -> str: + return self.type_node.full_typename + + def resolve_type_nodes(self, root: ASTNode) -> None: + try: + self.type_node.resolve(root) + except TypeResolutionError as e: + raise TypeResolutionError('Failed to resolve "{}" property'.format(self.name)) from e + + def relative_typename(self, full_node_name: str) -> str: + return self.type_node.relative_typename(full_node_name) + +class ClassNode(ASTNode): + + def __init__(self, name: str, parent: Optional[ASTNode]=None, export_name: Optional[str]=None, bases: Sequence['weakref.ProxyType[ClassNode]']=(), properties: Sequence[ClassProperty]=()) -> None: + super().__init__(name, parent, export_name) + self.bases = list(bases) + self.properties = properties + + @property + def weight(self) -> int: + return 1 + sum((base.weight for base in self.bases)) + + @property + def children_types(self) -> Tuple[ASTNodeType, ...]: + return (ASTNodeType.Class, ASTNodeType.Function, ASTNodeType.Enumeration, ASTNodeType.Constant) + + @property + def node_type(self) -> ASTNodeType: + return ASTNodeType.Class + + @property + def classes(self) -> Dict[str, 'ClassNode']: + return self._children[ASTNodeType.Class] + + @property + def functions(self) -> Dict[str, FunctionNode]: + return self._children[ASTNodeType.Function] + + @property + def enumerations(self) -> Dict[str, EnumerationNode]: + return self._children[ASTNodeType.Enumeration] + + @property + def constants(self) -> Dict[str, ConstantNode]: + return self._children[ASTNodeType.Constant] + + def add_class(self, name: str, bases: Sequence['weakref.ProxyType[ClassNode]']=(), properties: Sequence[ClassProperty]=()) -> 'ClassNode': + return self._add_child(ClassNode, name, bases=bases, properties=properties) + + def add_function(self, name: str, arguments: Sequence[FunctionNode.Arg]=(), return_type: Optional[FunctionNode.RetType]=None, is_static: bool=False) -> FunctionNode: + arguments = list(arguments) + if return_type is not None: + is_classmethod = return_type.typename == self.name + else: + is_classmethod = False + if not is_static: + arguments.insert(0, FunctionNode.Arg('self')) + elif is_classmethod: + is_static = False + arguments.insert(0, FunctionNode.Arg('cls')) + return self._add_child(FunctionNode, name, arguments=arguments, return_type=return_type, is_static=is_static, is_classmethod=is_classmethod) + + def add_enumeration(self, name: str) -> EnumerationNode: + return self._add_child(EnumerationNode, name) + + def add_constant(self, name: str, value: str) -> ConstantNode: + return self._add_child(ConstantNode, name, value=value) + + def add_base(self, base_class_node: 'ClassNode') -> None: + self.bases.append(weakref.proxy(base_class_node)) + + def resolve_type_nodes(self, root: ASTNode) -> None: + errors = [] + for child in itertools.chain(self.properties, self.functions.values(), self.classes.values()): + try: + try: + child.resolve_type_nodes(self) + except TypeResolutionError: + child.resolve_type_nodes(root) + except TypeResolutionError as e: + errors.append(str(e)) + if len(errors) > 0: + raise TypeResolutionError('Failed to resolve "{}" class against "{}". Errors: {}'.format(self.full_export_name, root.full_export_name, errors)) + +class ProtocolClassNode(ClassNode): + + def __init__(self, name: str, parent: Optional[ASTNode]=None, export_name: Optional[str]=None, properties: Sequence[ClassProperty]=()) -> None: + super().__init__(name, parent, export_name, bases=(), properties=properties) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/constant_node.py +from typing import Optional, Tuple +from .node import ASTNode, ASTNodeType + +class ConstantNode(ASTNode): + + def __init__(self, name: str, value: str, parent: Optional[ASTNode]=None, export_name: Optional[str]=None) -> None: + super().__init__(name, parent, export_name) + self.value = value + self._value_type = 'int' + + @property + def children_types(self) -> Tuple[ASTNodeType, ...]: + return () + + @property + def node_type(self) -> ASTNodeType: + return ASTNodeType.Constant + + @property + def value_type(self) -> str: + return self._value_type + + def __str__(self) -> str: + return "Constant('{}' exported as '{}': {})".format(self.name, self.export_name, self.value) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/enumeration_node.py +from typing import Type, Tuple, Optional, Dict +from .node import ASTNode, ASTNodeType +from .constant_node import ConstantNode + +class EnumerationNode(ASTNode): + + def __init__(self, name: str, is_scoped: bool=False, parent: Optional[ASTNode]=None, export_name: Optional[str]=None) -> None: + super().__init__(name, parent, export_name) + self.is_scoped = is_scoped + + @property + def children_types(self) -> Tuple[ASTNodeType, ...]: + return (ASTNodeType.Constant,) + + @property + def node_type(self) -> ASTNodeType: + return ASTNodeType.Enumeration + + @property + def constants(self) -> Dict[str, ConstantNode]: + return self._children[ASTNodeType.Constant] + + def add_constant(self, name: str, value: str) -> ConstantNode: + return self._add_child(ConstantNode, name, value=value) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/function_node.py +from typing import NamedTuple, Sequence, Optional, Tuple, List +from .node import ASTNode, ASTNodeType +from .type_node import TypeNode, NoneTypeNode, TypeResolutionError + +class FunctionNode(ASTNode): + + class Arg: + + def __init__(self, name: str, type_node: Optional[TypeNode]=None, default_value: Optional[str]=None) -> None: + self.name = name + self.type_node = type_node + self.default_value = default_value + + @property + def typename(self) -> Optional[str]: + return getattr(self.type_node, 'full_typename', None) + + def relative_typename(self, root: str) -> Optional[str]: + if self.type_node is not None: + return self.type_node.relative_typename(root) + return None + + def __str__(self) -> str: + return f'Arg(name={self.name}, type_node={self.type_node}, default_value={self.default_value})' + + def __repr__(self) -> str: + return str(self) + + class RetType: + + def __init__(self, type_node: TypeNode=NoneTypeNode('void')) -> None: + self.type_node = type_node + + @property + def typename(self) -> str: + return self.type_node.full_typename + + def relative_typename(self, root: str) -> Optional[str]: + return self.type_node.relative_typename(root) + + def __str__(self) -> str: + return f'RetType(type_node={self.type_node})' + + def __repr__(self) -> str: + return str(self) + + class Overload(NamedTuple): + arguments: Sequence['FunctionNode.Arg'] = () + return_type: Optional['FunctionNode.RetType'] = None + + def __init__(self, name: str, arguments: Optional[Sequence['FunctionNode.Arg']]=None, return_type: Optional['FunctionNode.RetType']=None, is_static: bool=False, is_classmethod: bool=False, parent: Optional[ASTNode]=None, export_name: Optional[str]=None) -> None: + super().__init__(name, parent, export_name) + self.overloads: List[FunctionNode.Overload] = [] + self.is_static = is_static + self.is_classmethod = is_classmethod + if arguments is not None: + self.add_overload(arguments, return_type) + + @property + def node_type(self) -> ASTNodeType: + return ASTNodeType.Function + + @property + def children_types(self) -> Tuple[ASTNodeType, ...]: + return () + + def add_overload(self, arguments: Sequence['FunctionNode.Arg']=(), return_type: Optional['FunctionNode.RetType']=None): + self.overloads.append(FunctionNode.Overload(arguments, return_type)) + + def resolve_type_nodes(self, root: ASTNode): + + def has_unresolved_type_node(item) -> bool: + return item.type_node is not None and (not item.type_node.is_resolved) + errors = [] + for overload in self.overloads: + for arg in filter(has_unresolved_type_node, overload.arguments): + try: + arg.type_node.resolve(root) + except TypeResolutionError as e: + errors.append('Failed to resolve "{}" argument: {}'.format(arg.name, e)) + if overload.return_type is not None and has_unresolved_type_node(overload.return_type): + try: + overload.return_type.type_node.resolve(root) + except TypeResolutionError as e: + errors.append('Failed to resolve return type: {}'.format(e)) + if len(errors) > 0: + raise TypeResolutionError('Failed to resolve "{}" function against "{}". Errors: {}'.format(self.full_export_name, root.full_export_name, ', '.join(('[{}]: {}'.format(i, e) for (i, e) in enumerate(errors))))) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/namespace_node.py +import itertools +import weakref +from collections import defaultdict +from typing import Dict, List, Optional, Sequence, Tuple +from .class_node import ClassNode, ClassProperty +from .constant_node import ConstantNode +from .enumeration_node import EnumerationNode +from .function_node import FunctionNode +from .node import ASTNode, ASTNodeType +from .type_node import TypeResolutionError + +class NamespaceNode(ASTNode): + + def __init__(self, name: str, parent: Optional[ASTNode]=None, export_name: Optional[str]=None) -> None: + super().__init__(name, parent, export_name) + self.reexported_submodules: List[str] = [] + '' + self.reexported_submodules_symbols: Dict[str, List[str]] = defaultdict(list) + '' + + @property + def node_type(self) -> ASTNodeType: + return ASTNodeType.Namespace + + @property + def children_types(self) -> Tuple[ASTNodeType, ...]: + return (ASTNodeType.Namespace, ASTNodeType.Class, ASTNodeType.Function, ASTNodeType.Enumeration, ASTNodeType.Constant) + + @property + def namespaces(self) -> Dict[str, 'NamespaceNode']: + return self._children[ASTNodeType.Namespace] + + @property + def classes(self) -> Dict[str, ClassNode]: + return self._children[ASTNodeType.Class] + + @property + def functions(self) -> Dict[str, FunctionNode]: + return self._children[ASTNodeType.Function] + + @property + def enumerations(self) -> Dict[str, EnumerationNode]: + return self._children[ASTNodeType.Enumeration] + + @property + def constants(self) -> Dict[str, ConstantNode]: + return self._children[ASTNodeType.Constant] + + def add_namespace(self, name: str) -> 'NamespaceNode': + return self._add_child(NamespaceNode, name) + + def add_class(self, name: str, bases: Sequence['weakref.ProxyType[ClassNode]']=(), properties: Sequence[ClassProperty]=()) -> 'ClassNode': + return self._add_child(ClassNode, name, bases=bases, properties=properties) + + def add_function(self, name: str, arguments: Sequence[FunctionNode.Arg]=(), return_type: Optional[FunctionNode.RetType]=None) -> FunctionNode: + return self._add_child(FunctionNode, name, arguments=arguments, return_type=return_type) + + def add_enumeration(self, name: str) -> EnumerationNode: + return self._add_child(EnumerationNode, name) + + def add_constant(self, name: str, value: str) -> ConstantNode: + return self._add_child(ConstantNode, name, value=value) + + def resolve_type_nodes(self, root: Optional[ASTNode]=None) -> None: + errors = [] + for child in itertools.chain(self.functions.values(), self.classes.values(), self.namespaces.values()): + try: + try: + child.resolve_type_nodes(self) + except TypeResolutionError: + if root is not None: + child.resolve_type_nodes(root) + else: + raise + except TypeResolutionError as e: + errors.append(str(e)) + if len(errors) > 0: + raise TypeResolutionError('Failed to resolve "{}" namespace against "{}". Errors: {}'.format(self.full_export_name, root if root is None else root.full_export_name, errors)) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/node.py +import abc +import enum +import itertools +from typing import Iterator, Type, TypeVar, Dict, Optional, Tuple, DefaultDict +from collections import defaultdict +import weakref +ASTNodeSubtype = TypeVar('ASTNodeSubtype', bound='ASTNode') +NodeType = Type['ASTNode'] +NameToNode = Dict[str, ASTNodeSubtype] + +class ASTNodeType(enum.Enum): + Namespace = enum.auto() + Class = enum.auto() + Function = enum.auto() + Enumeration = enum.auto() + Constant = enum.auto() + +class ASTNode: + + def __init__(self, name: str, parent: Optional['ASTNode']=None, export_name: Optional[str]=None) -> None: + FORBIDDEN_SYMBOLS = ';,*&#/|\\@!()[]^% ' + for forbidden_symbol in FORBIDDEN_SYMBOLS: + assert forbidden_symbol not in name, "Invalid node identifier '{}' - contains 1 or more forbidden symbols: ({})".format(name, FORBIDDEN_SYMBOLS) + assert ':' not in name, "Name '{}' contains C++ scope symbols (':'). Convert the name to Python style and create appropriate parent nodes".format(name) + assert '.' not in name, "Trying to create a node with '.' symbols in its name ({}). Dots are supposed to be a scope delimiters, so create all nodes in ('{}') and add '{}' as a last child node".format(name, '->'.join(name.split('.')[:-1]), name.rsplit('.', maxsplit=1)[-1]) + self.__name = name + self.export_name = name if export_name is None else export_name + self._parent: Optional['ASTNode'] = None + self.parent = parent + self.is_exported = True + self._children: DefaultDict[ASTNodeType, NameToNode] = defaultdict(dict) + + def __str__(self) -> str: + return "{}('{}' exported as '{}')".format(self.node_type.name, self.name, self.export_name) + + def __repr__(self) -> str: + return str(self) + + @abc.abstractproperty + def children_types(self) -> Tuple[ASTNodeType, ...]: + pass + + @abc.abstractproperty + def node_type(self) -> ASTNodeType: + pass + + def node_type_name(self) -> str: + return f'{self.node_type.name}::{self.name}' + + @property + def name(self) -> str: + return self.__name + + @property + def native_name(self) -> str: + return self.full_name.replace('.', '::') + + @property + def full_name(self) -> str: + return self._construct_full_name('name') + + @property + def full_export_name(self) -> str: + return self._construct_full_name('export_name') + + @property + def parent(self) -> Optional['ASTNode']: + return self._parent + + @parent.setter + def parent(self, value: Optional['ASTNode']) -> None: + assert value is None or isinstance(value, ASTNode), 'ASTNode.parent should be None or another ASTNode, but got: {}'.format(type(value)) + if value is not None: + value.__check_child_before_add(self, self.name) + if self._parent is not None: + self._parent._children[self.node_type].pop(self.name) + if value is None: + self._parent = None + return + self._parent = weakref.proxy(value) + value._children[self.node_type][self.name] = self + + def __check_child_before_add(self, child: ASTNodeSubtype, name: str) -> None: + assert len(self.children_types) > 0, f"Trying to add child node '{child.node_type_name}' to node '{self.node_type_name}' that can't have children nodes" + assert child.node_type in self.children_types, "Trying to add child node '{}' to node '{}' that supports only ({}) as its children types".format(child.node_type_name, self.node_type_name, ','.join((t.name for t in self.children_types))) + if self._find_child(child.node_type, name) is not None: + raise ValueError(f"Node '{self.node_type_name}' already has a child '{child.node_type_name}'") + + def _add_child(self, child_type: Type[ASTNodeSubtype], name: str, **kwargs) -> ASTNodeSubtype: + return child_type(name, parent=self, **kwargs) + + def _find_child(self, child_type: ASTNodeType, name: str) -> Optional[ASTNodeSubtype]: + if child_type not in self._children: + return None + return self._children[child_type].get(name, None) + + def _construct_full_name(self, property_name: str) -> str: + + def get_name(node: ASTNode) -> str: + return getattr(node, property_name) + assert property_name in ('name', 'export_name'), 'Invalid name property' + name_parts = [get_name(self)] + parent = self.parent + while parent is not None: + name_parts.append(get_name(parent)) + parent = parent.parent + return '.'.join(reversed(name_parts)) + + def __iter__(self) -> Iterator['ASTNode']: + return iter(itertools.chain.from_iterable((node for children_nodes in self._children.values() for node in children_nodes.values()))) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/nodes/type_node.py +from typing import Sequence, Generator, Tuple, Optional, Union +import weakref +import abc +from .node import ASTNode, ASTNodeType + +class TypeResolutionError(Exception): + pass + +class TypeNode(abc.ABC): + compatible_to_runtime_usage = False + '' + + def __init__(self, ctype_name: str) -> None: + self.ctype_name = ctype_name + + @abc.abstractproperty + def typename(self) -> str: + pass + + @property + def full_typename(self) -> str: + return self.typename + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + yield from () + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield from () + + @property + def is_resolved(self) -> bool: + return True + + def relative_typename(self, module_full_export_name: str) -> str: + return self.full_typename + + def resolve(self, root: ASTNode) -> None: + pass + +class NoneTypeNode(TypeNode): + + @property + def typename(self) -> str: + return 'None' + +class AnyTypeNode(TypeNode): + + @property + def typename(self) -> str: + return '_typing.Any' + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield 'import typing as _typing' + +class PrimitiveTypeNode(TypeNode): + + def __init__(self, ctype_name: str, typename: Optional[str]=None) -> None: + super().__init__(ctype_name) + self._typename = typename if typename is not None else ctype_name + + @property + def typename(self) -> str: + return self._typename + + @classmethod + def int_(cls, ctype_name: Optional[str]=None): + if ctype_name is None: + ctype_name = 'int' + return PrimitiveTypeNode(ctype_name, typename='int') + + @classmethod + def float_(cls, ctype_name: Optional[str]=None): + if ctype_name is None: + ctype_name = 'float' + return PrimitiveTypeNode(ctype_name, typename='float') + + @classmethod + def bool_(cls, ctype_name: Optional[str]=None): + if ctype_name is None: + ctype_name = 'bool' + return PrimitiveTypeNode(ctype_name, typename='bool') + + @classmethod + def str_(cls, ctype_name: Optional[str]=None): + if ctype_name is None: + ctype_name = 'string' + return PrimitiveTypeNode(ctype_name, 'str') + +class AliasRefTypeNode(TypeNode): + + def __init__(self, alias_ctype_name: str, alias_export_name: Optional[str]=None): + super().__init__(alias_ctype_name) + if alias_export_name is None: + self.alias_export_name = alias_ctype_name + else: + self.alias_export_name = alias_export_name + + @property + def typename(self) -> str: + return self.alias_export_name + + @property + def full_typename(self) -> str: + return 'cv2.typing.' + self.typename + +class AliasTypeNode(TypeNode): + + def __init__(self, ctype_name: str, value: TypeNode, export_name: Optional[str]=None, doc: Optional[str]=None) -> None: + super().__init__(ctype_name) + self.value = value + self._export_name = export_name + self.doc = doc + + @property + def typename(self) -> str: + if self._export_name is not None: + return self._export_name + return self.ctype_name + + @property + def full_typename(self) -> str: + return 'cv2.typing.' + self.typename + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + return self.value.required_usage_imports + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield 'import cv2.typing' + + @property + def is_resolved(self) -> bool: + return self.value.is_resolved + + def resolve(self, root: ASTNode): + try: + self.value.resolve(root) + except TypeResolutionError as e: + raise TypeResolutionError('Failed to resolve alias "{}" exposed as "{}"'.format(self.ctype_name, self.typename)) from e + + @classmethod + def int_(cls, ctype_name: str, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, PrimitiveTypeNode.int_(), export_name, doc) + + @classmethod + def float_(cls, ctype_name: str, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, PrimitiveTypeNode.float_(), export_name, doc) + + @classmethod + def array_ref_(cls, ctype_name: str, array_ref_name: str, shape: Optional[Tuple[int, ...]], dtype: Optional[str]=None, export_name: Optional[str]=None, doc: Optional[str]=None): + if doc is None: + doc = f'NDArray(shape={shape}, dtype={dtype})' + else: + doc += f'. NDArray(shape={shape}, dtype={dtype})' + return cls(ctype_name, AliasRefTypeNode(array_ref_name), export_name, doc) + + @classmethod + def union_(cls, ctype_name: str, items: Tuple[TypeNode, ...], export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, UnionTypeNode(ctype_name, items), export_name, doc) + + @classmethod + def optional_(cls, ctype_name: str, item: TypeNode, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, OptionalTypeNode(item), export_name, doc) + + @classmethod + def sequence_(cls, ctype_name: str, item: TypeNode, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, SequenceTypeNode(ctype_name, item), export_name, doc) + + @classmethod + def tuple_(cls, ctype_name: str, items: Tuple[TypeNode, ...], export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, TupleTypeNode(ctype_name, items), export_name, doc) + + @classmethod + def class_(cls, ctype_name: str, class_name: str, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, ASTNodeTypeNode(class_name), export_name, doc) + + @classmethod + def callable_(cls, ctype_name: str, arg_types: Union[TypeNode, Sequence[TypeNode]], ret_type: TypeNode=NoneTypeNode('void'), export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, CallableTypeNode(ctype_name, arg_types, ret_type), export_name, doc) + + @classmethod + def ref_(cls, ctype_name: str, alias_ctype_name: str, alias_export_name: Optional[str]=None, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, AliasRefTypeNode(alias_ctype_name, alias_export_name), export_name, doc) + + @classmethod + def dict_(cls, ctype_name: str, key_type: TypeNode, value_type: TypeNode, export_name: Optional[str]=None, doc: Optional[str]=None): + return cls(ctype_name, DictTypeNode(ctype_name, key_type, value_type), export_name, doc) + +class ConditionalAliasTypeNode(TypeNode): + + def __init__(self, ctype_name: str, condition: str, positive_branch_type: TypeNode, negative_branch_type: TypeNode, export_name: Optional[str]=None, condition_required_imports: Sequence[str]=()) -> None: + super().__init__(ctype_name) + self.condition = condition + self.positive_branch_type = positive_branch_type + self.positive_branch_type.ctype_name = self.ctype_name + self.negative_branch_type = negative_branch_type + self.negative_branch_type.ctype_name = self.ctype_name + self._export_name = export_name + self._condition_required_imports = condition_required_imports + + @property + def typename(self) -> str: + if self._export_name is not None: + return self._export_name + return self.ctype_name + + @property + def full_typename(self) -> str: + return 'cv2.typing.' + self.typename + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + yield from self.positive_branch_type.required_usage_imports + yield from self.negative_branch_type.required_usage_imports + yield from self._condition_required_imports + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield 'import cv2.typing' + + @property + def is_resolved(self) -> bool: + return self.positive_branch_type.is_resolved and self.negative_branch_type.is_resolved + + def resolve(self, root: ASTNode): + try: + self.positive_branch_type.resolve(root) + self.negative_branch_type.resolve(root) + except TypeResolutionError as e: + raise TypeResolutionError('Failed to resolve alias "{}" exposed as "{}"'.format(self.ctype_name, self.typename)) from e + + @classmethod + def numpy_array_(cls, ctype_name: str, export_name: Optional[str]=None, shape: Optional[Tuple[int, ...]]=None, dtype: Optional[str]=None): + return cls(ctype_name, '_typing.TYPE_CHECKING', NDArrayTypeNode(ctype_name, shape, dtype), NDArrayTypeNode(ctype_name, shape, dtype, use_numpy_generics=False), condition_required_imports=('import typing as _typing',)) + +class NDArrayTypeNode(TypeNode): + + def __init__(self, ctype_name: str, shape: Optional[Tuple[int, ...]]=None, dtype: Optional[str]=None, use_numpy_generics: bool=True) -> None: + super().__init__(ctype_name) + self.shape = shape + self.dtype = dtype + self._use_numpy_generics = use_numpy_generics + + @property + def typename(self) -> str: + if self._use_numpy_generics: + dtype = self.dtype if self.dtype is not None else 'numpy.generic' + return f'numpy.ndarray[_typing.Any, numpy.dtype[{dtype}]]' + return 'numpy.ndarray' + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield 'import numpy' + yield 'import typing as _typing' + +class ASTNodeTypeNode(TypeNode): + + def __init__(self, ctype_name: str, typename: Optional[str]=None, module_name: Optional[str]=None) -> None: + super().__init__(ctype_name) + self._typename = typename if typename is not None else ctype_name + self._module_name = module_name + self._ast_node: Optional[weakref.ProxyType[ASTNode]] = None + + @property + def ast_node(self): + return self._ast_node + + @property + def typename(self) -> str: + if self._ast_node is None: + return self._typename + typename = self._ast_node.export_name + if self._ast_node.node_type is not ASTNodeType.Enumeration: + return typename + parent = self._ast_node.parent + while parent.node_type is ASTNodeType.Class: + typename = parent.export_name + '_' + typename + parent = parent.parent + return typename + + @property + def full_typename(self) -> str: + if self._ast_node is not None: + if self._ast_node.node_type is not ASTNodeType.Enumeration: + return self._ast_node.full_export_name + typename = self._ast_node.export_name + parent = self._ast_node.parent + while parent.node_type is ASTNodeType.Class: + typename = parent.export_name + '_' + typename + parent = parent.parent + return parent.full_export_name + '.' + typename + if self._module_name is not None: + return self._module_name + '.' + self._typename + return self._typename + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + if self._module_name is None: + assert self._ast_node is not None, "Can't find a module for class '{}' exported as '{}'".format(self.ctype_name, self.typename) + module = self._ast_node.parent + while module.node_type is not ASTNodeType.Namespace: + module = module.parent + yield ('import ' + module.full_export_name) + else: + yield ('import ' + self._module_name) + + @property + def is_resolved(self) -> bool: + return self._ast_node is not None or self._module_name is not None + + def resolve(self, root: ASTNode): + if self.is_resolved: + return + node = _resolve_symbol(root, self.typename) + if node is None: + raise TypeResolutionError('Failed to resolve "{}" exposed as "{}"'.format(self.ctype_name, self.typename)) + self._ast_node = weakref.proxy(node) + + def relative_typename(self, module: str) -> str: + assert self._ast_node is not None or self._module_name is not None, "'{}' exported as '{}' is not resolved yet".format(self.ctype_name, self.typename) + if self._module_name is None: + type_module = self._ast_node.parent + while type_module.node_type is not ASTNodeType.Namespace: + type_module = type_module.parent + module_name = type_module.full_export_name + else: + module_name = self._module_name + if module_name != module: + return self.full_typename + return self.full_typename[len(module_name) + 1:] + +class AggregatedTypeNode(TypeNode): + + def __init__(self, ctype_name: str, items: Sequence[TypeNode]) -> None: + super().__init__(ctype_name) + self.items = list(items) + + @property + def is_resolved(self) -> bool: + return all((item.is_resolved for item in self.items)) + + def resolve(self, root: ASTNode) -> None: + errors = [] + for item in filter(lambda item: not item.is_resolved, self): + try: + item.resolve(root) + except TypeResolutionError as e: + errors.append(str(e)) + if len(errors) > 0: + raise TypeResolutionError('Failed to resolve one of "{}" items. Errors: {}'.format(self.full_typename, errors)) + + def __iter__(self): + return iter(self.items) + + def __len__(self) -> int: + return len(self.items) + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + for item in self: + yield from item.required_definition_imports + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + for item in self: + yield from item.required_usage_imports + +class ContainerTypeNode(AggregatedTypeNode): + + @property + def typename(self) -> str: + return self.type_format.format(self.types_separator.join((item.typename for item in self))) + + @property + def full_typename(self) -> str: + return self.type_format.format(self.types_separator.join((item.full_typename for item in self))) + + def relative_typename(self, module: str) -> str: + return self.type_format.format(self.types_separator.join((item.relative_typename(module) for item in self))) + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + yield 'import typing as _typing' + yield from super().required_definition_imports + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + if TypeNode.compatible_to_runtime_usage: + yield 'import typing as _typing' + yield from super().required_usage_imports + + @abc.abstractproperty + def type_format(self) -> str: + pass + + @abc.abstractproperty + def types_separator(self) -> str: + pass + +class SequenceTypeNode(ContainerTypeNode): + + def __init__(self, ctype_name: str, item: TypeNode) -> None: + super().__init__(ctype_name, (item,)) + + @property + def type_format(self) -> str: + return '_typing.Sequence[{}]' + + @property + def types_separator(self) -> str: + return ', ' + +class TupleTypeNode(ContainerTypeNode): + + @property + def type_format(self) -> str: + if TypeNode.compatible_to_runtime_usage: + return '_typing.Tuple[{}]' + return 'tuple[{}]' + + @property + def types_separator(self) -> str: + return ', ' + +class UnionTypeNode(ContainerTypeNode): + + @property + def type_format(self) -> str: + if TypeNode.compatible_to_runtime_usage: + return '_typing.Union[{}]' + return '{}' + + @property + def types_separator(self) -> str: + if TypeNode.compatible_to_runtime_usage: + return ', ' + return ' | ' + +class OptionalTypeNode(ContainerTypeNode): + + def __init__(self, value: TypeNode) -> None: + super().__init__(value.ctype_name, (value,)) + + @property + def type_format(self) -> str: + if TypeNode.compatible_to_runtime_usage: + return '_typing.Optional[{}]' + return '{} | None' + + @property + def types_separator(self) -> str: + return ', ' + +class DictTypeNode(ContainerTypeNode): + + def __init__(self, ctype_name: str, key_type: TypeNode, value_type: TypeNode) -> None: + super().__init__(ctype_name, (key_type, value_type)) + + @property + def key_type(self) -> TypeNode: + return self.items[0] + + @property + def value_type(self) -> TypeNode: + return self.items[1] + + @property + def type_format(self) -> str: + if TypeNode.compatible_to_runtime_usage: + return '_typing.Dict[{}]' + return 'dict[{}]' + + @property + def types_separator(self) -> str: + return ', ' + +class CallableTypeNode(AggregatedTypeNode): + + def __init__(self, ctype_name: str, arg_types: Union[TypeNode, Sequence[TypeNode]], ret_type: TypeNode=NoneTypeNode('void')) -> None: + if isinstance(arg_types, TypeNode): + super().__init__(ctype_name, (arg_types, ret_type)) + else: + super().__init__(ctype_name, (*arg_types, ret_type)) + + @property + def arg_types(self) -> Sequence[TypeNode]: + return self.items[:-1] + + @property + def ret_type(self) -> TypeNode: + return self.items[-1] + + @property + def typename(self) -> str: + return '_typing.Callable[[{}], {}]'.format(', '.join((arg.typename for arg in self.arg_types)), self.ret_type.typename) + + @property + def full_typename(self) -> str: + return '_typing.Callable[[{}], {}]'.format(', '.join((arg.full_typename for arg in self.arg_types)), self.ret_type.full_typename) + + def relative_typename(self, module: str) -> str: + return '_typing.Callable[[{}], {}]'.format(', '.join((arg.relative_typename(module) for arg in self.arg_types)), self.ret_type.relative_typename(module)) + + @property + def required_definition_imports(self) -> Generator[str, None, None]: + yield 'import typing as _typing' + yield from super().required_definition_imports + + @property + def required_usage_imports(self) -> Generator[str, None, None]: + yield 'import typing as _typing' + yield from super().required_usage_imports + +class ClassTypeNode(ContainerTypeNode): + + def __init__(self, value: TypeNode) -> None: + super().__init__(value.ctype_name, (value,)) + + @property + def type_format(self) -> str: + return '_typing.Type[{}]' + + @property + def types_separator(self) -> str: + return ', ' + +def _resolve_symbol(root: Optional[ASTNode], full_symbol_name: str) -> Optional[ASTNode]: + + def search_down_symbol(scope: Optional[ASTNode], scope_sep: str) -> Optional[ASTNode]: + parts = full_symbol_name.split(scope_sep, maxsplit=1) + while len(parts) == 2: + scope = _resolve_symbol(scope, parts[0]) + if scope is None: + return None + node = _resolve_symbol(scope, parts[1]) + if node is not None: + return node + parts = parts[1].split(scope_sep, maxsplit=1) + return None + assert root is not None, "Can't resolve symbol '{}' from NONE root".format(full_symbol_name) + for attr in filter(lambda attr: hasattr(root, attr), ('namespaces', 'classes', 'enumerations')): + nodes_dict = getattr(root, attr) + node = nodes_dict.get(full_symbol_name, None) + if node is not None: + return node + for scope_sep in ('_', '.'): + node = search_down_symbol(root, scope_sep) + if node is not None: + return node + return None + +# File: opencv-master/modules/python/src2/typing_stubs_generation/predefined_types.py +from .nodes.type_node import AliasTypeNode, AliasRefTypeNode, PrimitiveTypeNode, ASTNodeTypeNode, NDArrayTypeNode, NoneTypeNode, SequenceTypeNode, TupleTypeNode, UnionTypeNode, AnyTypeNode, ConditionalAliasTypeNode +_PREDEFINED_TYPES = (PrimitiveTypeNode.int_('int'), PrimitiveTypeNode.int_('uchar'), PrimitiveTypeNode.int_('unsigned'), PrimitiveTypeNode.int_('int64'), PrimitiveTypeNode.int_('uint8_t'), PrimitiveTypeNode.int_('int8_t'), PrimitiveTypeNode.int_('int32_t'), PrimitiveTypeNode.int_('uint32_t'), PrimitiveTypeNode.int_('size_t'), PrimitiveTypeNode.float_('float'), PrimitiveTypeNode.float_('double'), PrimitiveTypeNode.bool_('bool'), PrimitiveTypeNode.str_('string'), PrimitiveTypeNode.str_('char'), PrimitiveTypeNode.str_('String'), PrimitiveTypeNode.str_('c_string'), ConditionalAliasTypeNode.numpy_array_('NumPyArrayNumeric', dtype='numpy.integer[_typing.Any] | numpy.floating[_typing.Any]'), ConditionalAliasTypeNode.numpy_array_('NumPyArrayFloat32', dtype='numpy.float32'), ConditionalAliasTypeNode.numpy_array_('NumPyArrayFloat64', dtype='numpy.float64'), NoneTypeNode('void'), AliasTypeNode.int_('void*', 'IntPointer', 'Represents an arbitrary pointer'), AliasTypeNode.union_('Mat', items=(ASTNodeTypeNode('Mat', module_name='cv2.mat_wrapper'), AliasRefTypeNode('NumPyArrayNumeric')), export_name='MatLike'), AliasTypeNode.sequence_('MatShape', PrimitiveTypeNode.int_()), AliasTypeNode.sequence_('Size', PrimitiveTypeNode.int_(), doc='Required length is 2'), AliasTypeNode.sequence_('Size2f', PrimitiveTypeNode.float_(), doc='Required length is 2'), AliasTypeNode.sequence_('Scalar', PrimitiveTypeNode.float_(), doc='Required length is at most 4'), AliasTypeNode.sequence_('Point', PrimitiveTypeNode.int_(), doc='Required length is 2'), AliasTypeNode.ref_('Point2i', 'Point'), AliasTypeNode.sequence_('Point2f', PrimitiveTypeNode.float_(), doc='Required length is 2'), AliasTypeNode.sequence_('Point2d', PrimitiveTypeNode.float_(), doc='Required length is 2'), AliasTypeNode.sequence_('Point3i', PrimitiveTypeNode.int_(), doc='Required length is 3'), AliasTypeNode.sequence_('Point3f', PrimitiveTypeNode.float_(), doc='Required length is 3'), AliasTypeNode.sequence_('Point3d', PrimitiveTypeNode.float_(), doc='Required length is 3'), AliasTypeNode.sequence_('Range', PrimitiveTypeNode.int_(), doc='Required length is 2'), AliasTypeNode.sequence_('Rect', PrimitiveTypeNode.int_(), doc='Required length is 4'), AliasTypeNode.sequence_('Rect2i', PrimitiveTypeNode.int_(), doc='Required length is 4'), AliasTypeNode.sequence_('Rect2f', PrimitiveTypeNode.float_(), doc='Required length is 4'), AliasTypeNode.sequence_('Rect2d', PrimitiveTypeNode.float_(), doc='Required length is 4'), AliasTypeNode.dict_('Moments', PrimitiveTypeNode.str_('Moments::key'), PrimitiveTypeNode.float_('Moments::value')), AliasTypeNode.tuple_('RotatedRect', items=(AliasRefTypeNode('Point2f'), AliasRefTypeNode('Size2f'), PrimitiveTypeNode.float_()), doc='Any type providing sequence protocol is supported'), AliasTypeNode.tuple_('TermCriteria', items=(ASTNodeTypeNode('TermCriteria.Type'), PrimitiveTypeNode.int_(), PrimitiveTypeNode.float_()), doc='Any type providing sequence protocol is supported'), AliasTypeNode.sequence_('Vec2i', PrimitiveTypeNode.int_(), doc='Required length is 2'), AliasTypeNode.sequence_('Vec2f', PrimitiveTypeNode.float_(), doc='Required length is 2'), AliasTypeNode.sequence_('Vec2d', PrimitiveTypeNode.float_(), doc='Required length is 2'), AliasTypeNode.sequence_('Vec3i', PrimitiveTypeNode.int_(), doc='Required length is 3'), AliasTypeNode.sequence_('Vec3f', PrimitiveTypeNode.float_(), doc='Required length is 3'), AliasTypeNode.sequence_('Vec3d', PrimitiveTypeNode.float_(), doc='Required length is 3'), AliasTypeNode.sequence_('Vec4i', PrimitiveTypeNode.int_(), doc='Required length is 4'), AliasTypeNode.sequence_('Vec4f', PrimitiveTypeNode.float_(), doc='Required length is 4'), AliasTypeNode.sequence_('Vec4d', PrimitiveTypeNode.float_(), doc='Required length is 4'), AliasTypeNode.sequence_('Vec6f', PrimitiveTypeNode.float_(), doc='Required length is 6'), AliasTypeNode.class_('FeatureDetector', 'Feature2D', export_name='FeatureDetector'), AliasTypeNode.class_('DescriptorExtractor', 'Feature2D', export_name='DescriptorExtractor'), AliasTypeNode.class_('FeatureExtractor', 'Feature2D', export_name='FeatureExtractor'), AliasTypeNode.union_('GProtoArg', items=(AliasRefTypeNode('Scalar'), ASTNodeTypeNode('GMat'), ASTNodeTypeNode('GOpaqueT'), ASTNodeTypeNode('GArrayT'))), SequenceTypeNode('GProtoArgs', AliasRefTypeNode('GProtoArg')), AliasTypeNode.sequence_('GProtoInputArgs', AliasRefTypeNode('GProtoArg')), AliasTypeNode.sequence_('GProtoOutputArgs', AliasRefTypeNode('GProtoArg')), AliasTypeNode.union_('GRunArg', items=(AliasRefTypeNode('Mat', 'MatLike'), AliasRefTypeNode('Scalar'), ASTNodeTypeNode('GOpaqueT'), ASTNodeTypeNode('GArrayT'), SequenceTypeNode('GRunArg', AnyTypeNode('GRunArg')), NoneTypeNode('GRunArg'))), AliasTypeNode.optional_('GOptRunArg', AliasRefTypeNode('GRunArg')), AliasTypeNode.union_('GMetaArg', items=(ASTNodeTypeNode('GMat'), AliasRefTypeNode('Scalar'), ASTNodeTypeNode('GOpaqueT'), ASTNodeTypeNode('GArrayT'))), AliasTypeNode.union_('Prim', items=(ASTNodeTypeNode('gapi.wip.draw.Text'), ASTNodeTypeNode('gapi.wip.draw.Circle'), ASTNodeTypeNode('gapi.wip.draw.Image'), ASTNodeTypeNode('gapi.wip.draw.Line'), ASTNodeTypeNode('gapi.wip.draw.Rect'), ASTNodeTypeNode('gapi.wip.draw.Mosaic'), ASTNodeTypeNode('gapi.wip.draw.Poly'))), SequenceTypeNode('Prims', AliasRefTypeNode('Prim')), AliasTypeNode.array_ref_('Matx33f', array_ref_name='NumPyArrayFloat32', shape=(3, 3), dtype='numpy.float32'), AliasTypeNode.array_ref_('Matx33d', array_ref_name='NumPyArrayFloat64', shape=(3, 3), dtype='numpy.float64'), AliasTypeNode.array_ref_('Matx44f', array_ref_name='NumPyArrayFloat32', shape=(4, 4), dtype='numpy.float32'), AliasTypeNode.array_ref_('Matx44d', array_ref_name='NumPyArrayFloat64', shape=(4, 4), dtype='numpy.float64'), NDArrayTypeNode('vector', dtype='numpy.uint8'), NDArrayTypeNode('vector_uchar', dtype='numpy.uint8'), TupleTypeNode('GMat2', items=(ASTNodeTypeNode('GMat'), ASTNodeTypeNode('GMat'))), ASTNodeTypeNode('GOpaque', 'GOpaqueT'), ASTNodeTypeNode('GArray', 'GArrayT'), AliasTypeNode.union_('GTypeInfo', items=(ASTNodeTypeNode('GMat'), AliasRefTypeNode('Scalar'), ASTNodeTypeNode('GOpaqueT'), ASTNodeTypeNode('GArrayT'))), SequenceTypeNode('GCompileArgs', ASTNodeTypeNode('GCompileArg')), SequenceTypeNode('GTypesInfo', AliasRefTypeNode('GTypeInfo')), SequenceTypeNode('GRunArgs', AliasRefTypeNode('GRunArg')), SequenceTypeNode('GMetaArgs', AliasRefTypeNode('GMetaArg')), SequenceTypeNode('GOptRunArgs', AliasRefTypeNode('GOptRunArg')), AliasTypeNode.callable_('detail_ExtractArgsCallback', arg_types=SequenceTypeNode('GTypesInfo', AliasRefTypeNode('GTypeInfo')), ret_type=SequenceTypeNode('GRunArgs', AliasRefTypeNode('GRunArg')), export_name='ExtractArgsCallback'), AliasTypeNode.callable_('detail_ExtractMetaCallback', arg_types=SequenceTypeNode('GTypesInfo', AliasRefTypeNode('GTypeInfo')), ret_type=SequenceTypeNode('GMetaArgs', AliasRefTypeNode('GMetaArg')), export_name='ExtractMetaCallback'), AliasTypeNode.class_('LayerId', 'DictValue'), AliasTypeNode.dict_('LayerParams', key_type=PrimitiveTypeNode.str_(), value_type=UnionTypeNode('DictValue', items=(PrimitiveTypeNode.int_(), PrimitiveTypeNode.float_(), PrimitiveTypeNode.str_()))), PrimitiveTypeNode.int_('cvflann_flann_distance_t'), PrimitiveTypeNode.int_('flann_flann_distance_t'), PrimitiveTypeNode.int_('cvflann_flann_algorithm_t'), PrimitiveTypeNode.int_('flann_flann_algorithm_t'), AliasTypeNode.dict_('flann_IndexParams', key_type=PrimitiveTypeNode.str_(), value_type=UnionTypeNode('flann_IndexParams::value', items=(PrimitiveTypeNode.bool_(), PrimitiveTypeNode.int_(), PrimitiveTypeNode.float_(), PrimitiveTypeNode.str_())), export_name='IndexParams'), AliasTypeNode.dict_('flann_SearchParams', key_type=PrimitiveTypeNode.str_(), value_type=UnionTypeNode('flann_IndexParams::value', items=(PrimitiveTypeNode.bool_(), PrimitiveTypeNode.int_(), PrimitiveTypeNode.float_(), PrimitiveTypeNode.str_())), export_name='SearchParams'), AliasTypeNode.dict_('map_string_and_string', PrimitiveTypeNode.str_('map_string_and_string::key'), PrimitiveTypeNode.str_('map_string_and_string::value')), AliasTypeNode.dict_('map_string_and_int', PrimitiveTypeNode.str_('map_string_and_int::key'), PrimitiveTypeNode.int_('map_string_and_int::value')), AliasTypeNode.dict_('map_string_and_vector_size_t', PrimitiveTypeNode.str_('map_string_and_vector_size_t::key'), SequenceTypeNode('map_string_and_vector_size_t::value', PrimitiveTypeNode.int_('size_t'))), AliasTypeNode.dict_('map_string_and_vector_float', PrimitiveTypeNode.str_('map_string_and_vector_float::key'), SequenceTypeNode('map_string_and_vector_float::value', PrimitiveTypeNode.float_())), AliasTypeNode.dict_('map_int_and_double', PrimitiveTypeNode.int_('map_int_and_double::key'), PrimitiveTypeNode.float_('map_int_and_double::value'))) +PREDEFINED_TYPES = dict(zip((t.ctype_name for t in _PREDEFINED_TYPES), _PREDEFINED_TYPES)) + +# File: opencv-master/modules/python/src2/typing_stubs_generation/types_conversion.py +from typing import Tuple, List, Optional +from .predefined_types import PREDEFINED_TYPES +from .nodes.type_node import TypeNode, UnionTypeNode, SequenceTypeNode, ASTNodeTypeNode, TupleTypeNode + +def replace_template_parameters_with_placeholders(string: str) -> Tuple[str, Tuple[str, ...]]: + template_brackets_indices = [] + template_instantiations_count = 0 + template_start_index = 0 + for (i, c) in enumerate(string): + if c == '<': + template_instantiations_count += 1 + if template_instantiations_count == 1: + template_start_index = i + 1 + elif c == '>': + template_instantiations_count -= 1 + assert template_instantiations_count >= 0, "Provided string is ill-formed. There are more '>' than '<'." + if template_instantiations_count == 0: + template_brackets_indices.append((template_start_index, i)) + assert template_instantiations_count == 0, "Provided string is ill-formed. There are more '<' than '>'." + template_args: List[str] = [] + for (i, j) in reversed(template_brackets_indices): + template_args.insert(0, string[i:j]) + string = string[:i] + '{}' + string[j:] + return (string, tuple(template_args)) + +def get_template_instantiation_type(typename: str) -> str: + (_, args) = replace_template_parameters_with_placeholders(typename) + if len(args) == 0: + raise ValueError("typename ('{}') doesn't contain template instantiations".format(typename)) + if len(args) > 1: + raise ValueError("typename ('{}') contains more than 1 template instantiation".format(typename)) + return args[0] + +def normalize_ctype_name(typename: str) -> str: + for prefix_to_remove in ('cv', 'std'): + if typename.startswith(prefix_to_remove): + typename = typename[len(prefix_to_remove):] + typename = typename.replace('::', '_').lstrip('_') + if typename.endswith('&'): + typename = typename[:-1] + typename = typename.strip() + if typename == 'void*': + return typename + if is_pointer_type(typename): + for suffix in ('*', '_Ptr', 'Ptr'): + if typename.endswith(suffix): + return typename[:-len(suffix)] + if _is_template_instantiation(typename): + return normalize_ctype_name(get_template_instantiation_type(typename)) + return typename.split('_', maxsplit=1)[-1] + if typename.startswith('GArray_') or typename.startswith('GArray<'): + return 'GArrayT' + if typename.startswith('GOpaque_') or typename.startswith('GOpaque<'): + return 'GOpaqueT' + if typename == 'GStreamerPipeline' or typename.startswith('GStreamerSource'): + return 'gst_' + typename + return typename + +def is_tuple_type(typename: str) -> bool: + return typename.startswith('tuple') or typename.startswith('pair') + +def is_sequence_type(typename: str) -> bool: + return typename.startswith('vector') + +def is_pointer_type(typename: str) -> bool: + return typename.endswith('Ptr') or typename.endswith('*') or typename.startswith('Ptr') + +def is_union_type(typename: str) -> bool: + return typename.startswith('util_variant') + +def _is_template_instantiation(typename: str) -> bool: + if '<' in typename: + assert '>' in typename, "Wrong template class instantiation: {}. '>' is missing".format(typename) + return True + return False + +def create_type_nodes_from_template_arguments(template_args_str: str) -> List[TypeNode]: + type_nodes = [] + (template_args_str, templated_args_types) = replace_template_parameters_with_placeholders(template_args_str) + template_index = 0 + for template_arg in template_args_str.split(','): + template_arg = template_arg.strip() + if _is_template_instantiation(template_arg): + template_arg = template_arg.format(templated_args_types[template_index]) + template_index += 1 + type_nodes.append(create_type_node(template_arg)) + return type_nodes + +def create_type_node(typename: str, original_ctype_name: Optional[str]=None) -> TypeNode: + if original_ctype_name is None: + original_ctype_name = typename + typename = normalize_ctype_name(typename.strip()) + type_node = PREDEFINED_TYPES.get(typename) + if type_node is not None: + type_node.ctype_name = original_ctype_name + return type_node + for alias in PREDEFINED_TYPES.values(): + if alias.typename == typename: + return alias + if is_union_type(typename): + union_types = get_template_instantiation_type(typename) + return UnionTypeNode(original_ctype_name, items=create_type_nodes_from_template_arguments(union_types)) + if is_sequence_type(typename): + if _is_template_instantiation(typename): + inner_sequence_type = create_type_node(get_template_instantiation_type(typename)) + else: + inner_sequence_type = create_type_node(typename.split('_', 1)[-1]) + return SequenceTypeNode(original_ctype_name, inner_sequence_type) + if is_tuple_type(typename): + tuple_types = get_template_instantiation_type(typename) + return TupleTypeNode(original_ctype_name, items=create_type_nodes_from_template_arguments(tuple_types)) + return ASTNodeTypeNode(original_ctype_name, typename) +if __name__ == '__main__': + import doctest + doctest.testmod() + +# File: opencv-master/modules/python/src2/typing_stubs_generator.py +"""""" +import sys +import warnings +if sys.version_info >= (3, 6): + from contextlib import contextmanager + from typing import Dict, Set, Any, Sequence, Generator, Union + import traceback + from pathlib import Path + from typing_stubs_generation import generate_typing_stubs, NamespaceNode, EnumerationNode, SymbolName, ClassNode, create_function_node, create_class_node, find_class_node, resolve_enum_scopes + import functools + + class FailuresWrapper: + + def __init__(self, exceptions_as_warnings=True): + self.has_failure = False + self.exceptions_as_warnings = exceptions_as_warnings + + def wrap_exceptions_as_warnings(self, original_func=None, ret_type_on_failure=None): + + def parametrized_wrapper(func): + + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + if self.has_failure: + if ret_type_on_failure is None: + return None + return ret_type_on_failure() + try: + ret_type = func(*args, **kwargs) + except Exception: + self.has_failure = True + warnings.warn('Typing stubs generation has failed.\n{}'.format(traceback.format_exc())) + if ret_type_on_failure is None: + return None + return ret_type_on_failure() + return ret_type + if self.exceptions_as_warnings: + return wrapped_func + else: + return original_func + if original_func: + return parametrized_wrapper(original_func) + return parametrized_wrapper + + @contextmanager + def delete_on_failure(self, file_path): + if not self.has_failure and (not file_path.is_file()): + file_path.parent.mkdir(parents=True, exist_ok=True) + file_path.touch() + try: + yield + finally: + if self.has_failure and file_path.is_file(): + file_path.unlink() + failures_wrapper = FailuresWrapper(exceptions_as_warnings=True) + + class ClassNodeStub: + + def add_base(self, base_node): + pass + + class TypingStubsGenerator: + + def __init__(self): + self.cv_root = NamespaceNode('cv', export_name='cv2') + self.exported_enums = {} + self.type_hints_ignored_functions = set() + + @failures_wrapper.wrap_exceptions_as_warnings + def add_enum(self, symbol_name, is_scoped_enum, entries): + if symbol_name in self.exported_enums: + assert symbol_name.name == '', 'Trying to export 2 enums with same symbol name: {}'.format(symbol_name) + enumeration_node = self.exported_enums[symbol_name] + else: + enumeration_node = EnumerationNode(symbol_name.name, is_scoped_enum) + self.exported_enums[symbol_name] = enumeration_node + for (entry_name, entry_value) in entries.items(): + enumeration_node.add_constant(entry_name, entry_value) + + @failures_wrapper.wrap_exceptions_as_warnings + def add_ignored_function_name(self, function_name): + self.type_hints_ignored_functions.add(function_name) + + @failures_wrapper.wrap_exceptions_as_warnings + def create_function_node(self, func_info): + create_function_node(self.cv_root, func_info) + + @failures_wrapper.wrap_exceptions_as_warnings(ret_type_on_failure=ClassNodeStub) + def find_class_node(self, class_info, namespaces): + return find_class_node(self.cv_root, SymbolName.parse(class_info.full_original_name, namespaces), create_missing_namespaces=True) + + @failures_wrapper.wrap_exceptions_as_warnings(ret_type_on_failure=ClassNodeStub) + def create_class_node(self, class_info, namespaces): + return create_class_node(self.cv_root, class_info, namespaces) + + def generate(self, output_path): + output_path = Path(output_path) + py_typed_path = output_path / self.cv_root.export_name / 'py.typed' + with failures_wrapper.delete_on_failure(py_typed_path): + self._generate(output_path) + + @failures_wrapper.wrap_exceptions_as_warnings + def _generate(self, output_path): + resolve_enum_scopes(self.cv_root, self.exported_enums) + generate_typing_stubs(self.cv_root, output_path) +else: + + class ClassNode: + + def add_base(self, base_node): + pass + + class TypingStubsGenerator: + + def __init__(self): + self.type_hints_ignored_functions = set() + print('WARNING! Typing stubs can be generated only with Python 3.6 or higher. Current version {}'.format(sys.version_info)) + + def add_enum(self, symbol_name, is_scoped_enum, entries): + pass + + def add_ignored_function_name(self, function_name): + pass + + def create_function_node(self, func_info): + pass + + def create_class_node(self, class_info, namespaces): + return ClassNode() + + def find_class_node(self, class_info, namespaces): + return ClassNode() + + def generate(self, output_path): + pass + +# File: opencv-master/modules/ts/misc/chart.py +"""""" +from __future__ import print_function +import testlog_parser, sys, os, xml, re +from table_formatter import * +from optparse import OptionParser +cvsize_re = re.compile('^\\d+x\\d+$') +cvtype_re = re.compile('^(CV_)(8U|8S|16U|16S|32S|32F|64F)(C\\d{1,3})?$') + +def keyselector(a): + if cvsize_re.match(a): + size = [int(d) for d in a.split('x')] + return size[0] * size[1] + elif cvtype_re.match(a): + if a.startswith('CV_'): + a = a[3:] + depth = 7 + if a[0] == '8': + depth = (0, 1)[a[1] == 'S'] + elif a[0] == '1': + depth = (2, 3)[a[2] == 'S'] + elif a[2] == 'S': + depth = 4 + elif a[0] == '3': + depth = 5 + elif a[0] == '6': + depth = 6 + cidx = a.find('C') + if cidx < 0: + channels = 1 + else: + channels = int(a[a.index('C') + 1:]) + return (channels - 1 & 511) + (depth << 9) + return a +convert = lambda text: int(text) if text.isdigit() else text +alphanum_keyselector = lambda key: [convert(c) for c in re.split('([0-9]+)', str(keyselector(key)))] + +def getValueParams(test): + param = test.get('value_param') + if not param: + return [] + if param.startswith('('): + param = param[1:] + if param.endswith(')'): + param = param[:-1] + args = [] + prev_pos = 0 + start = 0 + balance = 0 + while True: + idx = param.find(',', prev_pos) + if idx < 0: + break + idxlb = param.find('(', prev_pos, idx) + while idxlb >= 0: + balance += 1 + idxlb = param.find('(', idxlb + 1, idx) + idxrb = param.find(')', prev_pos, idx) + while idxrb >= 0: + balance -= 1 + idxrb = param.find(')', idxrb + 1, idx) + assert balance >= 0 + if balance == 0: + args.append(param[start:idx].strip()) + start = idx + 1 + prev_pos = idx + 1 + args.append(param[start:].strip()) + return args + +def nextPermutation(indexes, lists, x, y): + idx = len(indexes) - 1 + while idx >= 0: + while idx == x or idx == y: + idx -= 1 + if idx < 0: + return False + v = indexes[idx] + 1 + if v < len(lists[idx]): + indexes[idx] = v + return True + else: + indexes[idx] = 0 + idx -= 1 + return False + +def getTestWideName(sname, indexes, lists, x, y): + name = sname + '::(' + for i in range(len(indexes)): + if i > 0: + name += ', ' + if i == x: + name += 'X' + elif i == y: + name += 'Y' + else: + name += lists[i][indexes[i]] + return str(name + ')') + +def getTest(stests, x, y, row, col): + for pair in stests: + if pair[1][x] == row and pair[1][y] == col: + return pair[0] + return None +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-o', '--output', dest='format', help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar='FMT', default='auto') + parser.add_option('-u', '--units', dest='units', help='units for output values (s, ms (default), us, ns or ticks)', metavar='UNITS', default='ms') + parser.add_option('-m', '--metric', dest='metric', help='output metric', metavar='NAME', default='gmean') + parser.add_option('-x', '', dest='x', help='argument number for rows', metavar='ROW', default=1) + parser.add_option('-y', '', dest='y', help='argument number for columns', metavar='COL', default=0) + parser.add_option('-f', '--filter', dest='filter', help='regex to filter tests', metavar='REGEX', default=None) + (options, args) = parser.parse_args() + if len(args) != 1: + print('Usage:\n', os.path.basename(sys.argv[0]), '.xml', file=sys.stderr) + exit(1) + options.generateHtml = detectHtmlOutputType(options.format) + if options.metric not in metrix_table: + options.metric = 'gmean' + if options.metric.endswith('%'): + options.metric = options.metric[:-1] + getter = metrix_table[options.metric][1] + tests = testlog_parser.parseLogFile(args[0]) + if options.filter: + expr = re.compile(options.filter) + tests = [(t, getValueParams(t)) for t in tests if expr.search(str(t))] + else: + tests = [(t, getValueParams(t)) for t in tests] + args[0] = os.path.basename(args[0]) + if not tests: + print('Error - no tests matched', file=sys.stderr) + exit(1) + argsnum = len(tests[0][1]) + sname = tests[0][0].shortName() + arglists = [] + for i in range(argsnum): + arglists.append({}) + names = set() + names1 = set() + for pair in tests: + sn = pair[0].shortName() + if len(pair[1]) > 1: + names.add(sn) + else: + names1.add(sn) + if sn == sname: + if len(pair[1]) != argsnum: + print('Error - unable to create chart tables for functions having different argument numbers', file=sys.stderr) + sys.exit(1) + for i in range(argsnum): + arglists[i][pair[1][i]] = 1 + if names1 or len(names) != 1: + print('Error - unable to create tables for functions from different test suits:', file=sys.stderr) + i = 1 + for name in sorted(names): + print('%4s: %s' % (i, name), file=sys.stderr) + i += 1 + if names1: + print('Other suits in this log (can not be chosen):', file=sys.stderr) + for name in sorted(names1): + print('%4s: %s' % (i, name), file=sys.stderr) + i += 1 + sys.exit(1) + if argsnum < 2: + print('Error - tests from %s have less than 2 parameters' % sname, file=sys.stderr) + exit(1) + for i in range(argsnum): + arglists[i] = sorted([str(key) for key in arglists[i].keys()], key=alphanum_keyselector) + if options.generateHtml and options.format != 'moinwiki': + htmlPrintHeader(sys.stdout, 'Report %s for %s' % (args[0], sname)) + indexes = [0] * argsnum + x = int(options.x) + y = int(options.y) + if x == y or x < 0 or y < 0 or (x >= argsnum) or (y >= argsnum): + x = 1 + y = 0 + while True: + stests = [] + for pair in tests: + t = pair[0] + v = pair[1] + for i in range(argsnum): + if i != x and i != y: + if v[i] != arglists[i][indexes[i]]: + t = None + break + if t: + stests.append(pair) + tbl = table(metrix_table[options.metric][0] + ' for\n' + getTestWideName(sname, indexes, arglists, x, y)) + tbl.newColumn('x', 'X\\Y') + for col in arglists[y]: + tbl.newColumn(col, col, align='center') + for row in arglists[x]: + tbl.newRow() + tbl.newCell('x', row) + for col in arglists[y]: + case = getTest(stests, x, y, row, col) + if case: + status = case.get('status') + if status != 'run': + tbl.newCell(col, status, color='red') + else: + val = getter(case, None, options.units) + if isinstance(val, float): + tbl.newCell(col, '%.2f %s' % (val, options.units), val) + else: + tbl.newCell(col, val, val) + else: + tbl.newCell(col, '-') + if options.generateHtml: + tbl.htmlPrintTable(sys.stdout, options.format == 'moinwiki') + else: + tbl.consolePrintTable(sys.stdout) + if not nextPermutation(indexes, arglists, x, y): + break + if options.generateHtml and options.format != 'moinwiki': + htmlPrintFooter(sys.stdout) + +# File: opencv-master/modules/ts/misc/color.py +"""""" +import math, os, sys +webcolors = {'indianred': '#cd5c5c', 'lightcoral': '#f08080', 'salmon': '#fa8072', 'darksalmon': '#e9967a', 'lightsalmon': '#ffa07a', 'red': '#ff0000', 'crimson': '#dc143c', 'firebrick': '#b22222', 'darkred': '#8b0000', 'pink': '#ffc0cb', 'lightpink': '#ffb6c1', 'hotpink': '#ff69b4', 'deeppink': '#ff1493', 'mediumvioletred': '#c71585', 'palevioletred': '#db7093', 'lightsalmon': '#ffa07a', 'coral': '#ff7f50', 'tomato': '#ff6347', 'orangered': '#ff4500', 'darkorange': '#ff8c00', 'orange': '#ffa500', 'gold': '#ffd700', 'yellow': '#ffff00', 'lightyellow': '#ffffe0', 'lemonchiffon': '#fffacd', 'lightgoldenrodyellow': '#fafad2', 'papayawhip': '#ffefd5', 'moccasin': '#ffe4b5', 'peachpuff': '#ffdab9', 'palegoldenrod': '#eee8aa', 'khaki': '#f0e68c', 'darkkhaki': '#bdb76b', 'lavender': '#e6e6fa', 'thistle': '#d8bfd8', 'plum': '#dda0dd', 'violet': '#ee82ee', 'orchid': '#da70d6', 'fuchsia': '#ff00ff', 'magenta': '#ff00ff', 'mediumorchid': '#ba55d3', 'mediumpurple': '#9370db', 'blueviolet': '#8a2be2', 'darkviolet': '#9400d3', 'darkorchid': '#9932cc', 'darkmagenta': '#8b008b', 'purple': '#800080', 'indigo': '#4b0082', 'darkslateblue': '#483d8b', 'slateblue': '#6a5acd', 'mediumslateblue': '#7b68ee', 'greenyellow': '#adff2f', 'chartreuse': '#7fff00', 'lawngreen': '#7cfc00', 'lime': '#00ff00', 'limegreen': '#32cd32', 'palegreen': '#98fb98', 'lightgreen': '#90ee90', 'mediumspringgreen': '#00fa9a', 'springgreen': '#00ff7f', 'mediumseagreen': '#3cb371', 'seagreen': '#2e8b57', 'forestgreen': '#228b22', 'green': '#008000', 'darkgreen': '#006400', 'yellowgreen': '#9acd32', 'olivedrab': '#6b8e23', 'olive': '#808000', 'darkolivegreen': '#556b2f', 'mediumaquamarine': '#66cdaa', 'darkseagreen': '#8fbc8f', 'lightseagreen': '#20b2aa', 'darkcyan': '#008b8b', 'teal': '#008080', 'aqua': '#00ffff', 'cyan': '#00ffff', 'lightcyan': '#e0ffff', 'paleturquoise': '#afeeee', 'aquamarine': '#7fffd4', 'turquoise': '#40e0d0', 'mediumturquoise': '#48d1cc', 'darkturquoise': '#00ced1', 'cadetblue': '#5f9ea0', 'steelblue': '#4682b4', 'lightsteelblue': '#b0c4de', 'powderblue': '#b0e0e6', 'lightblue': '#add8e6', 'skyblue': '#87ceeb', 'lightskyblue': '#87cefa', 'deepskyblue': '#00bfff', 'dodgerblue': '#1e90ff', 'cornflowerblue': '#6495ed', 'royalblue': '#4169e1', 'blue': '#0000ff', 'mediumblue': '#0000cd', 'darkblue': '#00008b', 'navy': '#000080', 'midnightblue': '#191970', 'cornsilk': '#fff8dc', 'blanchedalmond': '#ffebcd', 'bisque': '#ffe4c4', 'navajowhite': '#ffdead', 'wheat': '#f5deb3', 'burlywood': '#deb887', 'tan': '#d2b48c', 'rosybrown': '#bc8f8f', 'sandybrown': '#f4a460', 'goldenrod': '#daa520', 'darkgoldenrod': '#b8860b', 'peru': '#cd853f', 'chocolate': '#d2691e', 'saddlebrown': '#8b4513', 'sienna': '#a0522d', 'brown': '#a52a2a', 'maroon': '#800000', 'white': '#ffffff', 'snow': '#fffafa', 'honeydew': '#f0fff0', 'mintcream': '#f5fffa', 'azure': '#f0ffff', 'aliceblue': '#f0f8ff', 'ghostwhite': '#f8f8ff', 'whitesmoke': '#f5f5f5', 'seashell': '#fff5ee', 'beige': '#f5f5dc', 'oldlace': '#fdf5e6', 'floralwhite': '#fffaf0', 'ivory': '#fffff0', 'antiquewhite': '#faebd7', 'linen': '#faf0e6', 'lavenderblush': '#fff0f5', 'mistyrose': '#ffe4e1', 'gainsboro': '#dcdcdc', 'lightgrey': '#d3d3d3', 'silver': '#c0c0c0', 'darkgray': '#a9a9a9', 'gray': '#808080', 'dimgray': '#696969', 'lightslategray': '#778899', 'slategray': '#708090', 'darkslategray': '#2f4f4f', 'black': '#000000'} +if os.name == 'nt': + consoleColors = ['#000000', '#000080', '#008000', '#008080', '#800000', '#800080', '#808000', '#C0C0C0', '#808080', '#0000FF', '#00FF00', '#00FFFF', '#FF0000', '#FF00FF', '#FFFF00', '#FFFFFF'] +else: + consoleColors = ['#2e3436', '#cc0000', '#4e9a06', '#c4a000', '#3465a4', '#75507b', '#06989a', '#d3d7cf', '#ffffff', '#555753', '#ef2929', '#8ae234', '#fce94f', '#729fcf', '#ad7fa8', '#34e2e2', '#eeeeec'] + +def RGB2LAB(r, g, b): + if max(r, g, b): + r /= 255.0 + g /= 255.0 + b /= 255.0 + X = (0.412453 * r + 0.35758 * g + 0.180423 * b) / 0.950456 + Y = 0.212671 * r + 0.71516 * g + 0.072169 * b + Z = (0.019334 * r + 0.119193 * g + 0.950227 * b) / 1.088754 + T = 0.008856 + if X > T: + fX = math.pow(X, 1.0 / 3.0) + else: + fX = 7.787 * X + 16.0 / 116.0 + if Y > T: + Y3 = math.pow(Y, 1.0 / 3.0) + fY = Y3 + L = 116.0 * Y3 - 16.0 + else: + fY = 7.787 * Y + 16.0 / 116.0 + L = 903.3 * Y + if Z > T: + fZ = math.pow(Z, 1.0 / 3.0) + else: + fZ = 7.787 * Z + 16.0 / 116.0 + a = 500.0 * (fX - fY) + b = 200.0 * (fY - fZ) + return (L, a, b) + +def colorDistance(r1, g1, b1=None, r2=None, g2=None, b2=None): + if type(r1) == tuple and type(g1) == tuple and (b1 is None) and (r2 is None) and (g2 is None) and (b2 is None): + (l1, a1, b1) = RGB2LAB(*r1) + (l2, a2, b2) = RGB2LAB(*g1) + else: + (l1, a1, b1) = RGB2LAB(r1, g1, b1) + (l2, a2, b2) = RGB2LAB(r2, g2, b2) + dl = l1 - l2 + C1 = math.sqrt(a1 * a1 + b1 * b1) + C2 = math.sqrt(a2 * a2 + b2 * b2) + dC = C1 - C2 + da = a1 - a2 + db = b1 - b2 + dH = math.sqrt(max(0, da * da + db * db - dC * dC)) + Kl = 1 + K1 = 0.045 + K2 = 0.015 + s1 = dl / Kl + s2 = dC / (1.0 + K1 * C1) + s3 = dH / (1.0 + K2 * C1) + return math.sqrt(s1 * s1 + s2 * s2 + s3 * s3) + +def parseHexColor(col): + if len(col) != 4 and len(col) != 7 and (not col.startswith('#')): + return (0, 0, 0) + if len(col) == 4: + r = col[1] * 2 + g = col[2] * 2 + b = col[3] * 2 + else: + r = col[1:3] + g = col[3:5] + b = col[5:7] + return (int(r, 16), int(g, 16), int(b, 16)) + +def getColor(col): + if isinstance(col, str): + if col.lower() in webcolors: + return parseHexColor(webcolors[col.lower()]) + else: + return parseHexColor(col) + else: + return col + +def getNearestConsoleColor(col): + color = getColor(col) + minidx = 0 + mindist = colorDistance(color, getColor(consoleColors[0])) + for i in range(len(consoleColors)): + dist = colorDistance(color, getColor(consoleColors[i])) + if dist < mindist: + mindist = dist + minidx = i + return minidx +if os.name == 'nt': + import msvcrt + from ctypes import windll, Structure, c_short, c_ushort, byref + SHORT = c_short + WORD = c_ushort + + class COORD(Structure): + _fields_ = [('X', SHORT), ('Y', SHORT)] + + class SMALL_RECT(Structure): + _fields_ = [('Left', SHORT), ('Top', SHORT), ('Right', SHORT), ('Bottom', SHORT)] + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + _fields_ = [('dwSize', COORD), ('dwCursorPosition', COORD), ('wAttributes', WORD), ('srWindow', SMALL_RECT), ('dwMaximumWindowSize', COORD)] + + class winConsoleColorizer(object): + + def __init__(self, stream): + self.handle = msvcrt.get_osfhandle(stream.fileno()) + self.default_attrs = 7 + self.stream = stream + + def get_text_attr(self): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + windll.kernel32.GetConsoleScreenBufferInfo(self.handle, byref(csbi)) + return csbi.wAttributes + + def set_text_attr(self, color): + windll.kernel32.SetConsoleTextAttribute(self.handle, color) + + def write(self, *text, **attrs): + if not text: + return + color = attrs.get('color', None) + if color: + col = getNearestConsoleColor(color) + self.stream.flush() + self.set_text_attr(col) + self.stream.write(' '.join([str(t) for t in text])) + if color: + self.stream.flush() + self.set_text_attr(self.default_attrs) + +class dummyColorizer(object): + + def __init__(self, stream): + self.stream = stream + + def write(self, *text, **attrs): + if text: + self.stream.write(' '.join([str(t) for t in text])) + +class asciiSeqColorizer(object): + RESET_SEQ = '\x1b[0m' + ITALIC_SEQ = '\x1b[3m' + UNDERLINE_SEQ = '\x1b[4m' + STRIKEOUT_SEQ = '\x1b[9m' + COLOR_SEQ0 = '\x1b[00;%dm' + COLOR_SEQ1 = '\x1b[01;%dm' + + def __init__(self, stream): + self.stream = stream + + def get_seq(self, code): + if code > 8: + return self.__class__.COLOR_SEQ1 % (30 + code - 9) + else: + return self.__class__.COLOR_SEQ0 % (30 + code) + + def write(self, *text, **attrs): + if not text: + return + color = attrs.get('color', None) + if color: + col = getNearestConsoleColor(color) + self.stream.write(self.get_seq(col)) + self.stream.write(' '.join([str(t) for t in text])) + if color: + self.stream.write(self.__class__.RESET_SEQ) + +def getColorizer(stream): + if stream.isatty(): + if os.name == 'nt': + return winConsoleColorizer(stream) + else: + return asciiSeqColorizer(stream) + else: + return dummyColorizer(stream) + +# File: opencv-master/modules/ts/misc/concatlogs.py +"""""" +from optparse import OptionParser +import glob, sys, os, re +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-o', '--output', dest='output', help='output file name', metavar='FILENAME', default=None) + (options, args) = parser.parse_args() + if not options.output: + sys.stderr.write('Error: output file name is not provided') + exit(-1) + files = [] + for arg in args: + if '*' in arg or '?' in arg: + files.extend([os.path.abspath(f) for f in glob.glob(arg)]) + else: + files.append(os.path.abspath(arg)) + html = None + for f in sorted(files): + try: + fobj = open(f) + if not fobj: + continue + text = fobj.read() + if not html: + html = text + continue + idx1 = text.find('') + len('') + idx2 = html.rfind('') + html = html[:idx2] + re.sub('[ \\t\\n\\r]+', ' ', text[idx1:]) + except: + pass + if html: + idx1 = text.find('') + len('<title>') + idx2 = html.find('') + html = html[:idx1] + 'OpenCV performance testing report' + html[idx2:] + open(options.output, 'w').write(html) + else: + sys.stderr.write('Error: no input data') + exit(-1) + +# File: opencv-master/modules/ts/misc/report.py +"""""" +from __future__ import print_function +import testlog_parser, sys, os, xml, re, glob +from table_formatter import * +from optparse import OptionParser +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-o', '--output', dest='format', help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar='FMT', default='auto') + parser.add_option('-u', '--units', dest='units', help='units for output values (s, ms (default), us, ns or ticks)', metavar='UNITS', default='ms') + parser.add_option('-c', '--columns', dest='columns', help='comma-separated list of columns to show', metavar='COLS', default='') + parser.add_option('-f', '--filter', dest='filter', help='regex to filter tests', metavar='REGEX', default=None) + parser.add_option('', '--show-all', action='store_true', dest='showall', default=False, help='also include empty and "notrun" lines') + (options, args) = parser.parse_args() + if len(args) < 1: + print('Usage:\n', os.path.basename(sys.argv[0]), '.xml', file=sys.stderr) + exit(0) + options.generateHtml = detectHtmlOutputType(options.format) + files = [] + files1 = [] + for arg in args: + if '*' in arg or '?' in arg: + files1.extend([os.path.abspath(f) for f in glob.glob(arg)]) + else: + files.append(os.path.abspath(arg)) + seen = set() + files = [x for x in files if x not in seen and (not seen.add(x))] + files.extend(set(files1) - set(files)) + args = files + tests = [] + files = [] + for arg in set(args): + try: + cases = testlog_parser.parseLogFile(arg) + if cases: + files.append(os.path.basename(arg)) + tests.extend(cases) + except: + pass + if options.filter: + expr = re.compile(options.filter) + tests = [t for t in tests if expr.search(str(t))] + tbl = table(', '.join(files)) + if options.columns: + metrics = [s.strip() for s in options.columns.split(',')] + metrics = [m for m in metrics if m and (not m.endswith('%')) and (m in metrix_table)] + else: + metrics = None + if not metrics: + metrics = ['name', 'samples', 'outliers', 'min', 'median', 'gmean', 'mean', 'stddev'] + if 'name' not in metrics: + metrics.insert(0, 'name') + for m in metrics: + if m == 'name': + tbl.newColumn(m, metrix_table[m][0]) + else: + tbl.newColumn(m, metrix_table[m][0], align='center') + needNewRow = True + for case in sorted(tests, key=lambda x: str(x)): + if needNewRow: + tbl.newRow() + if not options.showall: + needNewRow = False + status = case.get('status') + if status != 'run': + if status != 'notrun': + needNewRow = True + for m in metrics: + if m == 'name': + tbl.newCell(m, str(case)) + else: + tbl.newCell(m, status, color='red') + else: + needNewRow = True + for m in metrics: + val = metrix_table[m][1](case, None, options.units) + if isinstance(val, float): + tbl.newCell(m, '%.2f %s' % (val, options.units), val) + else: + tbl.newCell(m, val, val) + if not needNewRow: + tbl.trimLastRow() + if options.generateHtml: + if options.format == 'moinwiki': + tbl.htmlPrintTable(sys.stdout, True) + else: + htmlPrintHeader(sys.stdout, 'Report %s tests from %s' % (len(tests), ', '.join(files))) + tbl.htmlPrintTable(sys.stdout) + htmlPrintFooter(sys.stdout) + else: + tbl.consolePrintTable(sys.stdout) + +# File: opencv-master/modules/ts/misc/run.py +"""""" +import os +import argparse +import logging +import datetime +from run_utils import Err, CMakeCache, log, execute +from run_suite import TestSuite +from run_android import AndroidTestSuite +epilog = '\nNOTE:\nAdditional options starting with "--gtest_" and "--perf_" will be passed directly to the test executables.\n' +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='OpenCV test runner script', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('build_path', nargs='?', default='.', help='Path to build directory (should contain CMakeCache.txt, default is current) or to directory with tests (all platform checks will be disabled in this case)') + parser.add_argument('-t', '--tests', metavar='MODULES', default='', help='Comma-separated list of modules to test (example: -t core,imgproc,java)') + parser.add_argument('-b', '--blacklist', metavar='MODULES', default='', help='Comma-separated list of modules to exclude from test (example: -b java)') + parser.add_argument('-a', '--accuracy', action='store_true', default=False, help='Look for accuracy tests instead of performance tests') + parser.add_argument('--check', action='store_true', default=False, help="Shortcut for '--perf_min_samples=1 --perf_force_samples=1'") + parser.add_argument('-w', '--cwd', metavar='PATH', default='.', help='Working directory for tests (default is current)') + parser.add_argument('--list', action='store_true', default=False, help='List available tests (executables)') + parser.add_argument('--list_short', action='store_true', default=False, help='List available tests (aliases)') + parser.add_argument('--list_short_main', action='store_true', default=False, help='List available tests (main repository, aliases)') + parser.add_argument('--configuration', metavar='CFG', default=None, help='Force Debug or Release configuration (for Visual Studio and Java tests build)') + parser.add_argument('-n', '--dry_run', action='store_true', help='Do not run the tests') + parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print more debug information') + parser.add_argument('--valgrind', action='store_true', default=False, help='Run C++ tests in valgrind') + parser.add_argument('--valgrind_supp', metavar='FILE', action='append', help='Path to valgrind suppression file (example: --valgrind_supp opencv/platforms/scripts/valgrind.supp)') + parser.add_argument('--valgrind_opt', metavar='OPT', action='append', default=[], help='Add command line option to valgrind (example: --valgrind_opt=--leak-check=full)') + parser.add_argument('--qemu', default='', help='Specify qemu binary and base parameters') + parser.add_argument('--android', action='store_true', default=False, help='Android: force all tests to run on device') + parser.add_argument('--android_sdk', metavar='PATH', help='Android: path to SDK to use adb and aapt tools') + parser.add_argument('--android_test_data_path', metavar='PATH', default='/sdcard/opencv_testdata/', help='Android: path to testdata on device') + parser.add_argument('--android_env', action='append', help='Android: add environment variable (NAME=VALUE)') + parser.add_argument('--android_propagate_opencv_env', action='store_true', default=False, help='Android: propagate OPENCV* environment variables') + parser.add_argument('--serial', metavar='serial number', default='', help='Android: directs command to the USB device or emulator with the given serial number') + parser.add_argument('--package', metavar='package', default='', help='Java: run JUnit tests for specified module or Android package') + parser.add_argument('--java_test_exclude', metavar='java_test_exclude', default='', help='Java: Filter out specific JUnit tests') + parser.add_argument('--trace', action='store_true', default=False, help='Trace: enable OpenCV tracing') + parser.add_argument('--trace_dump', metavar='trace_dump', default=-1, help='Trace: dump highlight calls (specify max entries count, 0 - dump all)') + (args, other_args) = parser.parse_known_args() + log.setLevel(logging.DEBUG if args.verbose else logging.INFO) + test_args = [a for a in other_args if a.startswith('--perf_') or a.startswith('--test_') or a.startswith('--gtest_')] + bad_args = [a for a in other_args if a not in test_args] + if len(bad_args) > 0: + log.error('Error: Bad arguments: %s', bad_args) + exit(1) + args.mode = 'test' if args.accuracy else 'perf' + android_env = [] + if args.android_env: + android_env.extend([entry.split('=', 1) for entry in args.android_env]) + if args.android_propagate_opencv_env: + android_env.extend([entry for entry in os.environ.items() if entry[0].startswith('OPENCV')]) + android_env = dict(android_env) + if args.android_test_data_path: + android_env['OPENCV_TEST_DATA_PATH'] = args.android_test_data_path + if args.valgrind: + try: + ver = execute(['valgrind', '--version'], silent=True) + log.debug('Using %s', ver) + except OSError as e: + log.error('Failed to run valgrind: %s', e) + exit(1) + if len(args.build_path) != 1: + test_args = [a for a in test_args if not a.startswith('--gtest_output=')] + if args.check: + if not [a for a in test_args if a.startswith('--perf_min_samples=')]: + test_args.extend(['--perf_min_samples=1']) + if not [a for a in test_args if a.startswith('--perf_force_samples=')]: + test_args.extend(['--perf_force_samples=1']) + if not [a for a in test_args if a.startswith('--perf_verify_sanity')]: + test_args.extend(['--perf_verify_sanity']) + if bool(os.environ.get('BUILD_PRECOMMIT', None)): + test_args.extend(['--skip_unstable=1']) + ret = 0 + logs = [] + stamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + path = args.build_path + try: + if not os.path.isdir(path): + raise Err('Not a directory (should contain CMakeCache.txt to test executables)') + cache = CMakeCache(args.configuration) + fname = os.path.join(path, 'CMakeCache.txt') + if os.path.isfile(fname): + log.debug('Reading cmake cache file: %s', fname) + cache.read(path, fname) + else: + log.debug('Assuming folder contains tests: %s', path) + cache.setDummy(path) + if args.android or cache.getOS() == 'android': + log.debug('Creating Android test runner') + suite = AndroidTestSuite(args, cache, stamp, android_env) + else: + log.debug('Creating native test runner') + suite = TestSuite(args, cache, stamp) + if args.list or args.list_short or args.list_short_main: + suite.listTests(args.list_short or args.list_short_main, args.list_short_main) + else: + log.debug("Running tests in '%s', working dir: '%s'", path, args.cwd) + + def parseTests(s): + return [o.strip() for o in s.split(',') if o] + (logs, ret) = suite.runTests(parseTests(args.tests), parseTests(args.blacklist), args.cwd, test_args) + except Err as e: + log.error("ERROR: test path '%s' ==> %s", path, e.msg) + ret = -1 + if logs: + log.warning('Collected: %s', logs) + if ret != 0: + log.error('ERROR: some tests have failed') + exit(ret) + +# File: opencv-master/modules/ts/misc/run_android.py +"""""" +import os +import re +import getpass +from run_utils import Err, log, execute, isColorEnabled, hostos +from run_suite import TestSuite + +def exe(program): + return program + '.exe' if hostos == 'nt' else program + +class ApkInfo: + + def __init__(self): + self.pkg_name = None + self.pkg_target = None + self.pkg_runner = None + + def forcePackage(self, package): + if package: + if package.startswith('.'): + self.pkg_target += package + else: + self.pkg_target = package + +class Tool: + + def __init__(self): + self.cmd = [] + + def run(self, args=[], silent=False): + cmd = self.cmd[:] + cmd.extend(args) + return execute(self.cmd + args, silent) + +class Adb(Tool): + + def __init__(self, sdk_dir): + Tool.__init__(self) + exe_path = os.path.join(sdk_dir, exe('platform-tools/adb')) + if not os.path.isfile(exe_path) or not os.access(exe_path, os.X_OK): + exe_path = None + if not exe_path: + exe_path = 'adb' + self.cmd = [exe_path] + + def init(self, serial): + if not serial: + serial = self.detectSerial() + if serial: + self.cmd.extend(['-s', serial]) + + def detectSerial(self): + adb_res = self.run(['devices'], silent=True) + connected_devices = re.findall('^[^\\n]+[ \\t]+device\\r?$', adb_res, re.MULTILINE) + if not connected_devices: + raise Err('Can not find Android device') + elif len(connected_devices) != 1: + raise Err('Too many (%s) devices are connected. Please specify single device using --serial option:\n\n%s', len(connected_devices), adb_res) + else: + return connected_devices[0].split('\t')[0] + + def getOSIdentifier(self): + return 'Android' + self.run(['shell', 'getprop ro.build.version.release'], silent=True).strip() + +class Aapt(Tool): + + def __init__(self, sdk_dir): + Tool.__init__(self) + aapt_fn = exe('aapt') + aapt = None + for (r, ds, fs) in os.walk(os.path.join(sdk_dir, 'build-tools')): + if aapt_fn in fs: + aapt = os.path.join(r, aapt_fn) + break + if not aapt: + raise Err('Can not find aapt tool: %s', aapt_fn) + self.cmd = [aapt] + + def dump(self, exe): + res = ApkInfo() + output = self.run(['dump', 'xmltree', exe, 'AndroidManifest.xml'], silent=True) + if not output: + raise Err('Can not dump manifest from %s', exe) + tags = re.split('[ ]+E: ', output) + manifest_tag = [t for t in tags if t.startswith('manifest ')] + if not manifest_tag: + raise Err('Can not read package name from: %s', exe) + res.pkg_name = re.search('^[ ]+A: package=\\"(?P.*?)\\" \\(Raw: \\"(?P=pkg)\\"\\)\\r?$', manifest_tag[0], flags=re.MULTILINE).group('pkg') + instrumentation_tag = [t for t in tags if t.startswith('instrumentation ')] + if not instrumentation_tag: + raise Err('Can not find instrumentation details in: %s', exe) + res.pkg_runner = re.search('^[ ]+A: android:name\\(0x[0-9a-f]{8}\\)=\\"(?P.*?)\\" \\(Raw: \\"(?P=runner)\\"\\)\\r?$', instrumentation_tag[0], flags=re.MULTILINE).group('runner') + res.pkg_target = re.search('^[ ]+A: android:targetPackage\\(0x[0-9a-f]{8}\\)=\\"(?P.*?)\\" \\(Raw: \\"(?P=pkg)\\"\\)\\r?$', instrumentation_tag[0], flags=re.MULTILINE).group('pkg') + if not res.pkg_name or not res.pkg_runner or (not res.pkg_target): + raise Err('Can not find instrumentation details in: %s', exe) + return res + +class AndroidTestSuite(TestSuite): + + def __init__(self, options, cache, id, android_env={}): + TestSuite.__init__(self, options, cache, id) + sdk_dir = options.android_sdk or os.environ.get('ANDROID_SDK', False) or os.path.dirname(os.path.dirname(self.cache.android_executable)) + log.debug('Detecting Android tools in directory: %s', sdk_dir) + self.adb = Adb(sdk_dir) + self.aapt = Aapt(sdk_dir) + self.env = android_env + + def isTest(self, fullpath): + if os.path.isfile(fullpath): + if fullpath.endswith('.apk') or os.access(fullpath, os.X_OK): + return True + return False + + def getOS(self): + return self.adb.getOSIdentifier() + + def checkPrerequisites(self): + self.adb.init(self.options.serial) + + def runTest(self, module, path, logfile, workingDir, args=[]): + args = args[:] + exe = os.path.abspath(path) + if exe.endswith('.apk'): + info = self.aapt.dump(exe) + if not info: + raise Err('Can not read info from test package: %s', exe) + info.forcePackage(self.options.package) + self.adb.run(['uninstall', info.pkg_name]) + output = self.adb.run(['install', exe], silent=True) + if not (output and 'Success' in output): + raise Err('Can not install package: %s', exe) + params = ['-e package %s' % info.pkg_target] + ret = self.adb.run(['shell', 'am instrument -w %s %s/%s' % (' '.join(params), info.pkg_name, info.pkg_runner)]) + return (None, ret) + else: + device_dir = getpass.getuser().replace(' ', '') + '_' + self.options.mode + '/' + if isColorEnabled(args): + args.append('--gtest_color=yes') + tempdir = '/data/local/tmp/' + android_dir = tempdir + device_dir + exename = os.path.basename(exe) + android_exe = android_dir + exename + self.adb.run(['push', exe, android_exe]) + self.adb.run(['shell', 'chmod 777 ' + android_exe]) + env_pieces = ['export %s=%s' % (a, b) for (a, b) in self.env.items()] + pieces = ['cd %s' % android_dir, './%s %s' % (exename, ' '.join(args))] + log.warning('Run: %s' % ' && '.join(pieces)) + ret = self.adb.run(['shell', ' && '.join(env_pieces + pieces)]) + hostlogpath = os.path.join(workingDir, logfile) + self.adb.run(['pull', android_dir + logfile, hostlogpath]) + self.adb.run(['shell', 'rm ' + android_dir + logfile]) + self.adb.run(['shell', 'rm ' + tempdir + '__opencv_temp.*'], silent=True) + if os.path.isfile(hostlogpath): + return (hostlogpath, ret) + return (None, ret) +if __name__ == '__main__': + log.error('This is utility file, please execute run.py script') + +# File: opencv-master/modules/ts/misc/run_long.py +"""""" +from __future__ import print_function +import xml.etree.ElementTree as ET +from glob import glob +from pprint import PrettyPrinter as PP +LONG_TESTS_DEBUG_VALGRIND = [('calib3d', 'Calib3d_InitUndistortRectifyMap.accuracy', 2017.22), ('dnn', 'Reproducibility*', 1000), ('dnn', '*RCNN*', 1000), ('dnn', '*RFCN*', 1000), ('dnn', '*EAST*', 1000), ('dnn', '*VGG16*', 1000), ('dnn', '*ZFNet*', 1000), ('dnn', '*ResNet101_DUC_HDC*', 1000), ('dnn', '*LResNet100E_IR*', 1000), ('dnn', '*read_yolo_voc_stream*', 1000), ('dnn', '*eccv16*', 1000), ('dnn', '*OpenPose*', 1000), ('dnn', '*SSD/*', 1000), ('gapi', 'Fluid.MemoryConsumptionDoesNotGrowOnReshape', 1000000), ('face', 'CV_Face_FacemarkLBF.test_workflow', 10000.0), ('features2d', 'Features2d/DescriptorImage.no_crash/3', 1000), ('features2d', 'Features2d/DescriptorImage.no_crash/4', 1000), ('features2d', 'Features2d/DescriptorImage.no_crash/5', 1000), ('features2d', 'Features2d/DescriptorImage.no_crash/6', 1000), ('features2d', 'Features2d/DescriptorImage.no_crash/7', 1000), ('imgcodecs', 'Imgcodecs_Png.write_big', 1000), ('imgcodecs', 'Imgcodecs_Tiff.decode_tile16384x16384', 1000), ('ml', 'ML_RTrees.regression', 1423.47), ('optflow', 'DenseOpticalFlow_DeepFlow.ReferenceAccuracy', 1360.95), ('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/0', 1881.59), ('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/1', 5608.75), ('optflow', 'DenseOpticalFlow_GlobalPatchColliderDCT.ReferenceAccuracy', 5433.84), ('optflow', 'DenseOpticalFlow_GlobalPatchColliderWHT.ReferenceAccuracy', 5232.73), ('optflow', 'DenseOpticalFlow_SimpleFlow.ReferenceAccuracy', 1542.1), ('photo', 'Photo_Denoising.speed', 1484.87), ('photo', 'Photo_DenoisingColoredMulti.regression', 2447.11), ('rgbd', 'Rgbd_Normals.compute', 1156.32), ('shape', 'Hauss.regression', 2625.72), ('shape', 'ShapeEMD_SCD.regression', 61913.7), ('shape', 'Shape_SCD.regression', 3311.46), ('tracking', 'AUKF.br_mean_squared_error', 10764.6), ('tracking', 'UKF.br_mean_squared_error', 5228.27), ('tracking', '*DistanceAndOverlap*/1', 1000.0), ('tracking', '*DistanceAndOverlap*/2', 1000.0), ('videoio', 'videoio/videoio_ffmpeg.write_big*', 1000), ('videoio', 'videoio_ffmpeg.parallel', 1000), ('videoio', '*videocapture_acceleration*', 1000), ('videoio', '*videowriter_acceleration*', 1000), ('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_BoostDesc_LBGM.regression', 1124.51), ('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG120.regression', 2198.1), ('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG48.regression', 1958.52), ('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG64.regression', 2113.12), ('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG80.regression', 2167.16), ('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_BoostDesc_LBGM.regression', 1511.39), ('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG120.regression', 1222.07), ('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG48.regression', 1059.14), ('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG64.regression', 1163.41), ('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG80.regression', 1179.06), ('ximgproc', 'L0SmoothTest.SplatSurfaceAccuracy', 6382.26), ('ximgproc', 'perf*/1*:perf*/2*:perf*/3*:perf*/4*:perf*/5*:perf*/6*:perf*/7*:perf*/8*:perf*/9*', 1000.0), ('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/5', 1086.33), ('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/7', 1405.05), ('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/5', 1253.07), ('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/7', 1599.98), ('ximgproc', '*MultiThreadReproducibility*/1:*MultiThreadReproducibility*/2:*MultiThreadReproducibility*/3:*MultiThreadReproducibility*/4:*MultiThreadReproducibility*/5:*MultiThreadReproducibility*/6:*MultiThreadReproducibility*/7:*MultiThreadReproducibility*/8:*MultiThreadReproducibility*/9:*MultiThreadReproducibility*/1*', 1000.0), ('ximgproc', '*AdaptiveManifoldRefImplTest*/1:*AdaptiveManifoldRefImplTest*/2:*AdaptiveManifoldRefImplTest*/3', 1000.0), ('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0), ('ximgproc', '*RollingGuidanceFilterTest_BilateralRef*/1*:*RollingGuidanceFilterTest_BilateralRef*/2*:*RollingGuidanceFilterTest_BilateralRef*/3*', 1000.0), ('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0)] + +def longTestFilter(data, module=None): + res = ['*', '-'] + [v for (m, v, _time) in data if module is None or m == module] + return '--gtest_filter={}'.format(':'.join(res)) + +def parseOneFile(filename, timeLimit): + tree = ET.parse(filename) + root = tree.getroot() + + def guess(s, delims): + for delim in delims: + tmp = s.partition(delim) + if len(tmp[1]) != 0: + return tmp[0] + return None + module = guess(filename, ['_posix_', '_nt_', '__']) or root.get('cv_module_name') + if not module: + return (None, None) + res = [] + for elem in root.findall('.//testcase'): + key = '{}.{}'.format(elem.get('classname'), elem.get('name')) + val = elem.get('time') + if float(val) >= timeLimit: + res.append((module, key, float(val))) + return (module, res) +if __name__ == '__main__': + LIMIT = 1000 + res = [] + xmls = glob('*.xml') + for xml in xmls: + print('Parsing file', xml, '...') + (module, testinfo) = parseOneFile(xml, LIMIT) + if not module: + print('SKIP') + continue + res.extend(testinfo) + print('========= RESULTS =========') + PP(indent=4, width=100).pprint(sorted(res)) + +# File: opencv-master/modules/ts/misc/run_suite.py +"""""" +import os +import re +import sys +from run_utils import Err, log, execute, getPlatformVersion, isColorEnabled, TempEnvDir +from run_long import LONG_TESTS_DEBUG_VALGRIND, longTestFilter + +class TestSuite(object): + + def __init__(self, options, cache, id): + self.options = options + self.cache = cache + self.nameprefix = 'opencv_' + self.options.mode + '_' + self.tests = self.cache.gatherTests(self.nameprefix + '*', self.isTest) + self.id = id + + def getOS(self): + return getPlatformVersion() or self.cache.getOS() + + def getLogName(self, app): + return self.getAlias(app) + '_' + str(self.id) + '.xml' + + def listTests(self, short=False, main=False): + if len(self.tests) == 0: + raise Err('No tests found') + for t in self.tests: + if short: + t = self.getAlias(t) + if not main or self.cache.isMainModule(t): + log.info('%s', t) + + def getAlias(self, fname): + return sorted(self.getAliases(fname), key=len)[0] + + def getAliases(self, fname): + + def getCuts(fname, prefix): + noext = re.sub('\\.(exe|apk)$', '', fname) + nopref = fname + if fname.startswith(prefix): + nopref = fname[len(prefix):] + noprefext = noext + if noext.startswith(prefix): + noprefext = noext[len(prefix):] + return (noext, nopref, noprefext) + res = [fname] + fname = os.path.basename(fname) + res.append(fname) + for s in getCuts(fname, self.nameprefix): + res.append(s) + if self.cache.build_type == 'Debug' and 'Visual Studio' in self.cache.cmake_generator: + res.append(re.sub('d$', '', s)) + log.debug('Aliases: %s', set(res)) + return set(res) + + def getTest(self, name): + for t in self.tests: + if name in self.getAliases(t): + return t + raise Err('Can not find test: %s', name) + + def getTestList(self, white, black): + res = [t for t in white or self.tests if self.getAlias(t) not in black] + if len(res) == 0: + raise Err('No tests found') + return set(res) + + def isTest(self, fullpath): + if fullpath in ['java', 'python2', 'python3']: + return self.options.mode == 'test' + if not os.path.isfile(fullpath): + return False + if self.cache.getOS() == 'nt' and (not fullpath.endswith('.exe')): + return False + return os.access(fullpath, os.X_OK) + + def wrapCommand(self, module, cmd, env): + if self.options.valgrind: + res = ['valgrind'] + supp = self.options.valgrind_supp or [] + for f in supp: + if os.path.isfile(f): + res.append('--suppressions=%s' % f) + else: + print('WARNING: Valgrind suppression file is missing, SKIP: %s' % f) + res.extend(self.options.valgrind_opt) + has_gtest_filter = next((True for x in cmd if x.startswith('--gtest_filter=')), False) + return res + cmd + ([longTestFilter(LONG_TESTS_DEBUG_VALGRIND, module)] if not has_gtest_filter else []) + elif self.options.qemu: + import shlex + res = shlex.split(self.options.qemu) + for (name, value) in [entry for entry in os.environ.items() if entry[0].startswith('OPENCV') and (not entry[0] in env)]: + res += ['-E', '"{}={}"'.format(name, value)] + for (name, value) in env.items(): + res += ['-E', '"{}={}"'.format(name, value)] + return res + ['--'] + cmd + return cmd + + def tryCommand(self, cmd, workingDir): + try: + if 0 == execute(cmd, cwd=workingDir): + return True + except: + pass + return False + + def runTest(self, module, path, logfile, workingDir, args=[]): + args = args[:] + exe = os.path.abspath(path) + if module == 'java': + cmd = [self.cache.ant_executable, '-Dopencv.build.type=%s' % self.cache.build_type] + if self.options.package: + cmd += ['-Dopencv.test.package=%s' % self.options.package] + if self.options.java_test_exclude: + cmd += ['-Dopencv.test.exclude=%s' % self.options.java_test_exclude] + cmd += ['buildAndTest'] + ret = execute(cmd, cwd=self.cache.java_test_dir) + return (None, ret) + elif module in ['python2', 'python3']: + executable = os.getenv('OPENCV_PYTHON_BINARY', None) + if executable is None or module == 'python{}'.format(sys.version_info[0]): + executable = sys.executable + if executable is None: + executable = path + if not self.tryCommand([executable, '--version'], workingDir): + executable = 'python' + cmd = [executable, self.cache.opencv_home + '/modules/python/test/test.py', '--repo', self.cache.opencv_home, '-v'] + args + module_suffix = '' if 'Visual Studio' not in self.cache.cmake_generator else '/' + self.cache.build_type + env = {} + env['PYTHONPATH'] = self.cache.opencv_build + '/lib' + module_suffix + os.pathsep + os.getenv('PYTHONPATH', '') + if self.cache.getOS() == 'nt': + env['PATH'] = self.cache.opencv_build + '/bin' + module_suffix + os.pathsep + os.getenv('PATH', '') + else: + env['LD_LIBRARY_PATH'] = self.cache.opencv_build + '/bin' + os.pathsep + os.getenv('LD_LIBRARY_PATH', '') + ret = execute(cmd, cwd=workingDir, env=env) + return (None, ret) + else: + if isColorEnabled(args): + args.append('--gtest_color=yes') + env = {} + if not self.options.valgrind and self.options.trace: + env['OPENCV_TRACE'] = '1' + env['OPENCV_TRACE_LOCATION'] = 'OpenCVTrace-{}'.format(self.getLogBaseName(exe)) + env['OPENCV_TRACE_SYNC_OPENCL'] = '1' + tempDir = TempEnvDir('OPENCV_TEMP_PATH', '__opencv_temp.') + tempDir.init() + cmd = self.wrapCommand(module, [exe] + args, env) + log.warning('Run: %s' % ' '.join(cmd)) + ret = execute(cmd, cwd=workingDir, env=env) + try: + if not self.options.valgrind and self.options.trace and (int(self.options.trace_dump) >= 0): + import trace_profiler + trace = trace_profiler.Trace(env['OPENCV_TRACE_LOCATION'] + '.txt') + trace.process() + trace.dump(max_entries=int(self.options.trace_dump)) + except: + import traceback + traceback.print_exc() + pass + tempDir.clean() + hostlogpath = os.path.join(workingDir, logfile) + if os.path.isfile(hostlogpath): + return (hostlogpath, ret) + return (None, ret) + + def runTests(self, tests, black, workingDir, args=[]): + args = args[:] + logs = [] + test_list = self.getTestList(tests, black) + if len(test_list) != 1: + args = [a for a in args if not a.startswith('--gtest_output=')] + ret = 0 + for test in test_list: + more_args = [] + exe = self.getTest(test) + if exe in ['java', 'python2', 'python3']: + logname = None + else: + userlog = [a for a in args if a.startswith('--gtest_output=')] + if len(userlog) == 0: + logname = self.getLogName(exe) + more_args.append('--gtest_output=xml:' + logname) + else: + logname = userlog[0][userlog[0].find(':') + 1:] + log.debug('Running the test: %s (%s) ==> %s in %s', exe, args + more_args, logname, workingDir) + if self.options.dry_run: + (logfile, r) = (None, 0) + else: + (logfile, r) = self.runTest(test, exe, logname, workingDir, args + more_args) + log.debug('Test returned: %s ==> %s', r, logfile) + if r != 0: + ret = r + if logfile: + logs.append(os.path.relpath(logfile, workingDir)) + return (logs, ret) +if __name__ == '__main__': + log.error('This is utility file, please execute run.py script') + +# File: opencv-master/modules/ts/misc/run_utils.py +"""""" +import sys +import os +import platform +import re +import tempfile +import glob +import logging +import shutil +from subprocess import check_call, check_output, CalledProcessError, STDOUT + +def initLogger(): + logger = logging.getLogger('run.py') + logger.setLevel(logging.DEBUG) + ch = logging.StreamHandler(sys.stderr) + ch.setFormatter(logging.Formatter('%(message)s')) + logger.addHandler(ch) + return logger +log = initLogger() +hostos = os.name + +class Err(Exception): + + def __init__(self, msg, *args): + self.msg = msg % args + +def execute(cmd, silent=False, cwd='.', env=None): + try: + log.debug('Run: %s', cmd) + if env is not None: + for k in env: + log.debug(' Environ: %s=%s', k, env[k]) + new_env = os.environ.copy() + new_env.update(env) + env = new_env + if sys.platform == 'darwin': + if env is None: + env = os.environ.copy() + if 'DYLD_LIBRARY_PATH' in env: + env['OPENCV_SAVED_DYLD_LIBRARY_PATH'] = env['DYLD_LIBRARY_PATH'] + if silent: + return check_output(cmd, stderr=STDOUT, cwd=cwd, env=env).decode('latin-1') + else: + return check_call(cmd, cwd=cwd, env=env) + except CalledProcessError as e: + if silent: + log.debug('Process returned: %d', e.returncode) + return e.output.decode('latin-1') + else: + log.error('Process returned: %d', e.returncode) + return e.returncode + +def isColorEnabled(args): + usercolor = [a for a in args if a.startswith('--gtest_color=')] + return len(usercolor) == 0 and sys.stdout.isatty() and (hostos != 'nt') + +def getPlatformVersion(): + mv = platform.mac_ver() + if mv[0]: + return 'Darwin' + mv[0] + else: + wv = platform.win32_ver() + if wv[0]: + return 'Windows' + wv[0] + else: + lv = platform.linux_distribution() + if lv[0]: + return lv[0] + lv[1] + return None +parse_patterns = ({'name': 'cmake_home', 'default': None, 'pattern': re.compile('^CMAKE_HOME_DIRECTORY:\\w+=(.+)$')}, {'name': 'opencv_home', 'default': None, 'pattern': re.compile('^OpenCV_SOURCE_DIR:\\w+=(.+)$')}, {'name': 'opencv_build', 'default': None, 'pattern': re.compile('^OpenCV_BINARY_DIR:\\w+=(.+)$')}, {'name': 'tests_dir', 'default': None, 'pattern': re.compile('^EXECUTABLE_OUTPUT_PATH:\\w+=(.+)$')}, {'name': 'build_type', 'default': 'Release', 'pattern': re.compile('^CMAKE_BUILD_TYPE:\\w+=(.*)$')}, {'name': 'android_abi', 'default': None, 'pattern': re.compile('^ANDROID_ABI:\\w+=(.*)$')}, {'name': 'android_executable', 'default': None, 'pattern': re.compile('^ANDROID_EXECUTABLE:\\w+=(.*android.*)$')}, {'name': 'ant_executable', 'default': None, 'pattern': re.compile('^ANT_EXECUTABLE:\\w+=(.*ant.*)$')}, {'name': 'java_test_dir', 'default': None, 'pattern': re.compile('^OPENCV_JAVA_TEST_DIR:\\w+=(.*)$')}, {'name': 'is_x64', 'default': 'OFF', 'pattern': re.compile('^CUDA_64_BIT_DEVICE_CODE:\\w+=(ON)$')}, {'name': 'cmake_generator', 'default': None, 'pattern': re.compile('^CMAKE_GENERATOR:\\w+=(.+)$')}, {'name': 'python2', 'default': None, 'pattern': re.compile('^BUILD_opencv_python2:\\w+=(.*)$')}, {'name': 'python3', 'default': None, 'pattern': re.compile('^BUILD_opencv_python3:\\w+=(.*)$')}) + +class CMakeCache: + + def __init__(self, cfg=None): + self.setDefaultAttrs() + self.main_modules = [] + if cfg: + self.build_type = cfg + + def setDummy(self, path): + self.tests_dir = os.path.normpath(path) + + def read(self, path, fname): + rx = re.compile('^OPENCV_MODULE_opencv_(\\w+)_LOCATION:INTERNAL=(.*)$') + module_paths = {} + with open(fname, 'rt') as cachefile: + for l in cachefile.readlines(): + ll = l.strip() + if not ll or ll.startswith('#'): + continue + for p in parse_patterns: + match = p['pattern'].match(ll) + if match: + value = match.groups()[0] + if value and (not value.endswith('-NOTFOUND')): + setattr(self, p['name'], value) + match = rx.search(ll) + if match: + module_paths[match.group(1)] = match.group(2) + if not self.tests_dir: + self.tests_dir = path + else: + rel = os.path.relpath(self.tests_dir, self.opencv_build) + self.tests_dir = os.path.join(path, rel) + self.tests_dir = os.path.normpath(self.tests_dir) + if 'Visual Studio' in self.cmake_generator: + self.tests_dir = os.path.join(self.tests_dir, self.build_type) + for (module, path) in module_paths.items(): + rel = os.path.relpath(path, self.opencv_home) + if '..' not in rel: + self.main_modules.append(module) + + def setDefaultAttrs(self): + for p in parse_patterns: + setattr(self, p['name'], p['default']) + + def gatherTests(self, mask, isGood=None): + if self.tests_dir and os.path.isdir(self.tests_dir): + d = os.path.abspath(self.tests_dir) + files = glob.glob(os.path.join(d, mask)) + if not self.getOS() == 'android' and self.withJava(): + files.append('java') + if self.withPython2(): + files.append('python2') + if self.withPython3(): + files.append('python3') + return [f for f in files if isGood(f)] + return [] + + def isMainModule(self, name): + return name in self.main_modules + ['python2', 'python3'] + + def withJava(self): + return self.ant_executable and self.java_test_dir and os.path.exists(self.java_test_dir) + + def withPython2(self): + return self.python2 == 'ON' + + def withPython3(self): + return self.python3 == 'ON' + + def getOS(self): + if self.android_executable: + return 'android' + else: + return hostos + +class TempEnvDir: + + def __init__(self, envname, prefix): + self.envname = envname + self.prefix = prefix + self.saved_name = None + self.new_name = None + + def init(self): + self.saved_name = os.environ.get(self.envname) + self.new_name = tempfile.mkdtemp(prefix=self.prefix, dir=self.saved_name or None) + os.environ[self.envname] = self.new_name + + def clean(self): + if self.saved_name: + os.environ[self.envname] = self.saved_name + else: + del os.environ[self.envname] + try: + shutil.rmtree(self.new_name) + except: + pass +if __name__ == '__main__': + log.error('This is utility file, please execute run.py script') + +# File: opencv-master/modules/ts/misc/summary.py +"""""" +from __future__ import print_function +import testlog_parser, sys, os, xml, glob, re +from table_formatter import * +from optparse import OptionParser +numeric_re = re.compile('(\\d+)') +cvtype_re = re.compile('(8U|8S|16U|16S|32S|32F|64F)C(\\d{1,3})') +cvtypes = {'8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6} +convert = lambda text: int(text) if text.isdigit() else text +keyselector = lambda a: cvtype_re.sub(lambda match: ' ' + str(cvtypes.get(match.group(1), 7) + (int(match.group(2)) - 1) * 8) + ' ', a) +alphanum_keyselector = lambda key: [convert(c) for c in numeric_re.split(keyselector(key))] + +def getSetName(tset, idx, columns, short=True): + if columns and len(columns) > idx: + prefix = columns[idx] + else: + prefix = None + if short and prefix: + return prefix + name = tset[0].replace('.xml', '').replace('_', '\n') + if prefix: + return prefix + '\n' + '-' * int(len(max(prefix.split('\n'), key=len)) * 1.5) + '\n' + name + return name +if __name__ == '__main__': + if len(sys.argv) < 2: + print('Usage:\n', os.path.basename(sys.argv[0]), '.xml [.xml ...]', file=sys.stderr) + exit(0) + parser = OptionParser() + parser.add_option('-o', '--output', dest='format', help="output results in text format (can be 'txt', 'html', 'markdown', 'tabs' or 'auto' - default)", metavar='FMT', default='auto') + parser.add_option('-m', '--metric', dest='metric', help='output metric', metavar='NAME', default='gmean') + parser.add_option('-u', '--units', dest='units', help='units for output values (s, ms (default), us, ns or ticks)', metavar='UNITS', default='ms') + parser.add_option('-f', '--filter', dest='filter', help='regex to filter tests', metavar='REGEX', default=None) + parser.add_option('', '--module', dest='module', default=None, metavar='NAME', help='module prefix for test names') + parser.add_option('', '--columns', dest='columns', default=None, metavar='NAMES', help='comma-separated list of column aliases') + parser.add_option('', '--no-relatives', action='store_false', dest='calc_relatives', default=True, help='do not output relative values') + parser.add_option('', '--with-cycles-reduction', action='store_true', dest='calc_cr', default=False, help='output cycle reduction percentages') + parser.add_option('', '--with-score', action='store_true', dest='calc_score', default=False, help='output automatic classification of speedups') + parser.add_option('', '--progress', action='store_true', dest='progress_mode', default=False, help='enable progress mode') + parser.add_option('', '--regressions', dest='regressions', default=None, metavar='LIST', help='comma-separated custom regressions map: "[r][c]#current-#reference" (indexes of columns are 0-based, "r" - reverse flag, "c" - color flag for base data)') + parser.add_option('', '--show-all', action='store_true', dest='showall', default=False, help='also include empty and "notrun" lines') + parser.add_option('', '--match', dest='match', default=None) + parser.add_option('', '--match-replace', dest='match_replace', default='') + parser.add_option('', '--regressions-only', dest='regressionsOnly', default=None, metavar='X-FACTOR', help='show only tests with performance regressions not') + parser.add_option('', '--intersect-logs', dest='intersect_logs', default=False, help='show only tests present in all log files') + parser.add_option('', '--show_units', action='store_true', dest='show_units', help='append units into table cells') + (options, args) = parser.parse_args() + options.generateHtml = detectHtmlOutputType(options.format) + if options.metric not in metrix_table: + options.metric = 'gmean' + if options.metric.endswith('%') or options.metric.endswith('$'): + options.calc_relatives = False + options.calc_cr = False + if options.columns: + options.columns = [s.strip().replace('\\n', '\n') for s in options.columns.split(',')] + if options.regressions: + assert not options.progress_mode, 'unsupported mode' + + def parseRegressionColumn(s): + reverse = s.startswith('r') + if reverse: + s = s[1:] + addColor = s.startswith('c') + if addColor: + s = s[1:] + parts = s.split('-', 1) + link = (int(parts[0]), int(parts[1]), reverse, addColor) + assert link[0] != link[1] + return link + options.regressions = [parseRegressionColumn(s) for s in options.regressions.split(',')] + show_units = options.units if options.show_units else None + files = [] + seen = set() + for arg in args: + if '*' in arg or '?' in arg: + flist = [os.path.abspath(f) for f in glob.glob(arg)] + flist = sorted(flist, key=lambda text: str(text).replace('M', '_')) + files.extend([x for x in flist if x not in seen and (not seen.add(x))]) + else: + fname = os.path.abspath(arg) + if fname not in seen and (not seen.add(fname)): + files.append(fname) + test_sets = [] + for arg in files: + try: + tests = testlog_parser.parseLogFile(arg) + if options.filter: + expr = re.compile(options.filter) + tests = [t for t in tests if expr.search(str(t))] + if options.match: + tests = [t for t in tests if t.get('status') != 'notrun'] + if tests: + test_sets.append((os.path.basename(arg), tests)) + except IOError as err: + sys.stderr.write('IOError reading "' + arg + '" - ' + str(err) + os.linesep) + except xml.parsers.expat.ExpatError as err: + sys.stderr.write('ExpatError reading "' + arg + '" - ' + str(err) + os.linesep) + if not test_sets: + sys.stderr.write('Error: no test data found' + os.linesep) + quit() + setsCount = len(test_sets) + if options.regressions is None: + reference = -1 if options.progress_mode else 0 + options.regressions = [(i, reference, False, True) for i in range(1, len(test_sets))] + for link in options.regressions: + (i, ref, reverse, addColor) = link + assert i >= 0 and i < setsCount + assert ref < setsCount + test_cases = {} + name_extractor = lambda name: str(name) + if options.match: + reg = re.compile(options.match) + name_extractor = lambda name: reg.sub(options.match_replace, str(name)) + for i in range(setsCount): + for case in test_sets[i][1]: + name = name_extractor(case) + if options.module: + name = options.module + '::' + name + if name not in test_cases: + test_cases[name] = [None] * setsCount + test_cases[name][i] = case + getter = metrix_table[options.metric][1] + getter_score = metrix_table['score'][1] if options.calc_score else None + getter_p = metrix_table[options.metric + '%'][1] if options.calc_relatives else None + getter_cr = metrix_table[options.metric + '$'][1] if options.calc_cr else None + tbl = table('%s (%s)' % (metrix_table[options.metric][0], options.units), options.format) + tbl.newColumn('name', 'Name of Test', align='left', cssclass='col_name') + for i in range(setsCount): + tbl.newColumn(str(i), getSetName(test_sets[i], i, options.columns, False), align='center') + + def addHeaderColumns(suffix, description, cssclass): + for link in options.regressions: + (i, ref, reverse, addColor) = link + if reverse: + (i, ref) = (ref, i) + current_set = test_sets[i] + current = getSetName(current_set, i, options.columns) + if ref >= 0: + reference_set = test_sets[ref] + reference = getSetName(reference_set, ref, options.columns) + else: + reference = 'previous' + tbl.newColumn(str(i) + '-' + str(ref) + suffix, '%s\nvs\n%s\n(%s)' % (current, reference, description), align='center', cssclass=cssclass) + if options.calc_cr: + addHeaderColumns(suffix='$', description='cycles reduction', cssclass='col_cr') + if options.calc_relatives: + addHeaderColumns(suffix='%', description='x-factor', cssclass='col_rel') + if options.calc_score: + addHeaderColumns(suffix='S', description='score', cssclass='col_name') + prevGroupName = None + needNewRow = True + lastRow = None + for name in sorted(test_cases.keys(), key=alphanum_keyselector): + cases = test_cases[name] + if needNewRow: + lastRow = tbl.newRow() + if not options.showall: + needNewRow = False + tbl.newCell('name', name) + groupName = next((c for c in cases if c)).shortName() + if groupName != prevGroupName: + prop = lastRow.props.get('cssclass', '') + if 'firstingroup' not in prop: + lastRow.props['cssclass'] = prop + ' firstingroup' + prevGroupName = groupName + for i in range(setsCount): + case = cases[i] + if case is None: + if options.intersect_logs: + needNewRow = False + break + tbl.newCell(str(i), '-') + else: + status = case.get('status') + if status != 'run': + tbl.newCell(str(i), status, color='red') + else: + val = getter(case, cases[0], options.units) + if val: + needNewRow = True + tbl.newCell(str(i), formatValue(val, options.metric, show_units), val) + if needNewRow: + for link in options.regressions: + (i, reference, reverse, addColor) = link + if reverse: + (i, reference) = (reference, i) + tblCellID = str(i) + '-' + str(reference) + case = cases[i] + if case is None: + if options.calc_relatives: + tbl.newCell(tblCellID + '%', '-') + if options.calc_cr: + tbl.newCell(tblCellID + '$', '-') + if options.calc_score: + tbl.newCell(tblCellID + '$', '-') + else: + status = case.get('status') + if status != 'run': + tbl.newCell(str(i), status, color='red') + if status != 'notrun': + needNewRow = True + if options.calc_relatives: + tbl.newCell(tblCellID + '%', '-', color='red') + if options.calc_cr: + tbl.newCell(tblCellID + '$', '-', color='red') + if options.calc_score: + tbl.newCell(tblCellID + 'S', '-', color='red') + else: + val = getter(case, cases[0], options.units) + + def getRegression(fn): + if fn and val: + for j in reversed(range(i)) if reference < 0 else [reference]: + r = cases[j] + if r is not None and r.get('status') == 'run': + return fn(case, r, options.units) + valp = getRegression(getter_p) if options.calc_relatives or options.progress_mode else None + valcr = getRegression(getter_cr) if options.calc_cr else None + val_score = getRegression(getter_score) if options.calc_score else None + if not valp: + color = None + elif valp > 1.05: + color = 'green' + elif valp < 0.95: + color = 'red' + else: + color = None + if addColor: + if not reverse: + tbl.newCell(str(i), formatValue(val, options.metric, show_units), val, color=color) + else: + r = cases[reference] + if r is not None and r.get('status') == 'run': + val = getter(r, cases[0], options.units) + tbl.newCell(str(reference), formatValue(val, options.metric, show_units), val, color=color) + if options.calc_relatives: + tbl.newCell(tblCellID + '%', formatValue(valp, '%'), valp, color=color, bold=color) + if options.calc_cr: + tbl.newCell(tblCellID + '$', formatValue(valcr, '$'), valcr, color=color, bold=color) + if options.calc_score: + tbl.newCell(tblCellID + 'S', formatValue(val_score, 'S'), val_score, color=color, bold=color) + if not needNewRow: + tbl.trimLastRow() + if options.regressionsOnly: + for r in reversed(range(len(tbl.rows))): + for i in range(1, len(options.regressions) + 1): + val = tbl.rows[r].cells[len(tbl.rows[r].cells) - i].value + if val is not None and val < float(options.regressionsOnly): + break + else: + tbl.rows.pop(r) + if options.generateHtml: + if options.format == 'moinwiki': + tbl.htmlPrintTable(sys.stdout, True) + else: + htmlPrintHeader(sys.stdout, 'Summary report for %s tests from %s test logs' % (len(test_cases), setsCount)) + tbl.htmlPrintTable(sys.stdout) + htmlPrintFooter(sys.stdout) + else: + tbl.consolePrintTable(sys.stdout) + if options.regressionsOnly: + sys.exit(len(tbl.rows)) + +# File: opencv-master/modules/ts/misc/table_formatter.py +"""""" +from __future__ import print_function +import sys, re, os.path, stat, math +try: + from html import escape +except ImportError: + from cgi import escape +from optparse import OptionParser +from color import getColorizer, dummyColorizer + +class tblCell(object): + + def __init__(self, text, value=None, props=None): + self.text = text + self.value = value + self.props = props + +class tblColumn(object): + + def __init__(self, caption, title=None, props=None): + self.text = caption + self.title = title + self.props = props + +class tblRow(object): + + def __init__(self, colsNum, props=None): + self.cells = [None] * colsNum + self.props = props + +def htmlEncode(str): + return '
'.join([escape(s) for s in str]) + +class table(object): + def_align = 'left' + def_valign = 'middle' + def_color = None + def_colspan = 1 + def_rowspan = 1 + def_bold = False + def_italic = False + def_text = '-' + + def __init__(self, caption=None, format=None): + self.format = format + self.is_markdown = self.format == 'markdown' + self.is_tabs = self.format == 'tabs' + self.columns = {} + self.rows = [] + self.ridx = -1 + self.caption = caption + pass + + def newRow(self, **properties): + if len(self.rows) - 1 == self.ridx: + self.rows.append(tblRow(len(self.columns), properties)) + else: + self.rows[self.ridx + 1].props = properties + self.ridx += 1 + return self.rows[self.ridx] + + def trimLastRow(self): + if self.rows: + self.rows.pop() + if self.ridx >= len(self.rows): + self.ridx = len(self.rows) - 1 + + def newColumn(self, name, caption, title=None, **properties): + if name in self.columns: + index = self.columns[name].index + else: + index = len(self.columns) + if isinstance(caption, tblColumn): + caption.index = index + self.columns[name] = caption + return caption + else: + col = tblColumn(caption, title, properties) + col.index = index + self.columns[name] = col + return col + + def getColumn(self, name): + if isinstance(name, str): + return self.columns.get(name, None) + else: + vals = [v for v in self.columns.values() if v.index == name] + if vals: + return vals[0] + return None + + def newCell(self, col_name, text, value=None, **properties): + if self.ridx < 0: + self.newRow() + col = self.getColumn(col_name) + row = self.rows[self.ridx] + if not col: + return None + if isinstance(text, tblCell): + cl = text + else: + cl = tblCell(text, value, properties) + row.cells[col.index] = cl + return cl + + def layoutTable(self): + columns = self.columns.values() + columns = sorted(columns, key=lambda c: c.index) + colspanned = [] + rowspanned = [] + self.headerHeight = 1 + rowsToAppend = 0 + for col in columns: + self.measureCell(col) + if col.height > self.headerHeight: + self.headerHeight = col.height + col.minwidth = col.width + col.line = None + for r in range(len(self.rows)): + row = self.rows[r] + row.minheight = 1 + for i in range(len(row.cells)): + cell = row.cells[i] + if row.cells[i] is None: + continue + cell.line = None + self.measureCell(cell) + colspan = int(self.getValue('colspan', cell)) + rowspan = int(self.getValue('rowspan', cell)) + if colspan > 1: + colspanned.append((r, i)) + if i + colspan > len(columns): + colspan = len(columns) - i + cell.colspan = colspan + for j in range(i + 1, min(len(row.cells), i + colspan)): + row.cells[j] = None + elif columns[i].minwidth < cell.width: + columns[i].minwidth = cell.width + if rowspan > 1: + rowspanned.append((r, i)) + rowsToAppend2 = r + colspan - len(self.rows) + if rowsToAppend2 > rowsToAppend: + rowsToAppend = rowsToAppend2 + cell.rowspan = rowspan + for j in range(r + 1, min(len(self.rows), r + rowspan)): + if len(self.rows[j].cells) > i: + self.rows[j].cells[i] = None + elif row.minheight < cell.height: + row.minheight = cell.height + self.ridx = len(self.rows) - 1 + for r in range(rowsToAppend): + self.newRow() + self.rows[len(self.rows) - 1].minheight = 1 + while colspanned: + colspanned_new = [] + for (r, c) in colspanned: + cell = self.rows[r].cells[c] + sum([col.minwidth for col in columns[c:c + cell.colspan]]) + cell.awailable = sum([col.minwidth for col in columns[c:c + cell.colspan]]) + cell.colspan - 1 + if cell.awailable < cell.width: + colspanned_new.append((r, c)) + colspanned = colspanned_new + if colspanned: + (r, c) = colspanned[0] + cell = self.rows[r].cells[c] + cols = columns[c:c + cell.colspan] + total = cell.awailable - cell.colspan + 1 + budget = cell.width - cell.awailable + spent = 0 + s = 0 + for col in cols: + s += col.minwidth + addition = s * budget / total - spent + spent += addition + col.minwidth += addition + while rowspanned: + rowspanned_new = [] + for (r, c) in rowspanned: + cell = self.rows[r].cells[c] + cell.awailable = sum([row.minheight for row in self.rows[r:r + cell.rowspan]]) + if cell.awailable < cell.height: + rowspanned_new.append((r, c)) + rowspanned = rowspanned_new + if rowspanned: + (r, c) = rowspanned[0] + cell = self.rows[r].cells[c] + rows = self.rows[r:r + cell.rowspan] + total = cell.awailable + budget = cell.height - cell.awailable + spent = 0 + s = 0 + for row in rows: + s += row.minheight + addition = s * budget / total - spent + spent += addition + row.minheight += addition + return columns + + def measureCell(self, cell): + text = self.getValue('text', cell) + cell.text = self.reformatTextValue(text) + cell.height = len(cell.text) + cell.width = len(max(cell.text, key=lambda line: len(line))) + + def reformatTextValue(self, value): + if sys.version_info >= (2, 7): + unicode = str + if isinstance(value, str): + vstr = value + elif isinstance(value, unicode): + vstr = str(value) + else: + try: + vstr = '\n'.join([str(v) for v in value]) + except TypeError: + vstr = str(value) + return vstr.splitlines() + + def adjustColWidth(self, cols, width): + total = sum([c.minWidth for c in cols]) + if total + len(cols) - 1 >= width: + return + budget = width - len(cols) + 1 - total + spent = 0 + s = 0 + for col in cols: + s += col.minWidth + addition = s * budget / total - spent + spent += addition + col.minWidth += addition + + def getValue(self, name, *elements): + for el in elements: + try: + return getattr(el, name) + except AttributeError: + pass + try: + val = el.props[name] + if val: + return val + except AttributeError: + pass + except KeyError: + pass + try: + return getattr(self.__class__, 'def_' + name) + except AttributeError: + return None + + def consolePrintTable(self, out): + columns = self.layoutTable() + colrizer = getColorizer(out) if not (self.is_markdown or self.is_tabs) else dummyColorizer(out) + if self.caption: + out.write('%s%s%s' % (os.linesep, os.linesep.join(self.reformatTextValue(self.caption)), os.linesep * 2)) + headerRow = tblRow(len(columns), {'align': 'center', 'valign': 'top', 'bold': True, 'header': True}) + headerRow.cells = columns + headerRow.minheight = self.headerHeight + self.consolePrintRow2(colrizer, headerRow, columns) + for i in range(0, len(self.rows)): + self.consolePrintRow2(colrizer, i, columns) + + def consolePrintRow2(self, out, r, columns): + if isinstance(r, tblRow): + row = r + r = -1 + else: + row = self.rows[r] + i = 0 + while i < len(row.cells): + cell = row.cells[i] + colspan = self.getValue('colspan', cell) + if cell is not None: + cell.wspace = sum([col.minwidth for col in columns[i:i + colspan]]) + colspan - 1 + if cell.line is None: + if r < 0: + rows = [row] + else: + rows = self.rows[r:r + self.getValue('rowspan', cell)] + cell.line = self.evalLine(cell, rows, columns[i]) + if len(rows) > 1: + for rw in rows: + rw.cells[i] = cell + i += colspan + if self.is_markdown: + out.write('|') + for c in row.cells: + text = ' '.join(self.getValue('text', c) or []) + out.write(text + '|') + out.write(os.linesep) + elif self.is_tabs: + cols_to_join = [' '.join(self.getValue('text', c) or []) for c in row.cells] + out.write('\t'.join(cols_to_join)) + out.write(os.linesep) + else: + for ln in range(row.minheight): + i = 0 + while i < len(row.cells): + if i > 0: + out.write(' ') + cell = row.cells[i] + column = columns[i] + if cell is None: + out.write(' ' * column.minwidth) + i += 1 + else: + self.consolePrintLine(cell, row, column, out) + i += self.getValue('colspan', cell) + if self.is_markdown: + out.write('|') + out.write(os.linesep) + if self.is_markdown and row.props.get('header', False): + out.write('|') + for th in row.cells: + align = self.getValue('align', th) + if align == 'center': + out.write(':-:|') + elif align == 'right': + out.write('--:|') + else: + out.write('---|') + out.write(os.linesep) + + def consolePrintLine(self, cell, row, column, out): + if cell.line < 0 or cell.line >= cell.height: + line = '' + else: + line = cell.text[cell.line] + width = cell.wspace + align = self.getValue('align', (None, cell)[isinstance(cell, tblCell)], row, column) + if align == 'right': + pattern = '%' + str(width) + 's' + elif align == 'center': + pattern = '%' + str((width - len(line)) // 2 + len(line)) + 's' + ' ' * (width - len(line) - (width - len(line)) // 2) + else: + pattern = '%-' + str(width) + 's' + out.write(pattern % line, color=self.getValue('color', cell, row, column)) + cell.line += 1 + + def evalLine(self, cell, rows, column): + height = cell.height + valign = self.getValue('valign', cell, rows[0], column) + space = sum([row.minheight for row in rows]) + if valign == 'bottom': + return height - space + if valign == 'middle': + return (height - space + 1) // 2 + return 0 + + def htmlPrintTable(self, out, embeedcss=False): + columns = self.layoutTable() + if embeedcss: + out.write('
\n\n') + else: + out.write('
\n
\n') + if self.caption: + if embeedcss: + out.write(' \n' % htmlEncode(self.reformatTextValue(self.caption))) + else: + out.write(' \n' % htmlEncode(self.reformatTextValue(self.caption))) + out.write(' \n') + headerRow = tblRow(len(columns), {'align': 'center', 'valign': 'top', 'bold': True, 'header': True}) + headerRow.cells = columns + header_rows = [headerRow] + header_rows.extend([row for row in self.rows if self.getValue('header')]) + last_row = header_rows[len(header_rows) - 1] + for row in header_rows: + out.write(' \n') + for th in row.cells: + align = self.getValue('align', (None, th)[isinstance(th, tblCell)], row, row) + valign = self.getValue('valign', th, row) + cssclass = self.getValue('cssclass', th) + attr = '' + if align: + attr += ' align="%s"' % align + if valign: + attr += ' valign="%s"' % valign + if cssclass: + attr += ' class="%s"' % cssclass + css = '' + if embeedcss: + css = ' style="border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;"' + if row == last_row: + css = css[:-1] + 'padding-bottom:5px;"' + out.write(' \n' % (attr, css)) + if th is not None: + out.write(' %s\n' % htmlEncode(th.text)) + out.write(' \n') + out.write(' \n') + out.write(' \n \n') + rows = [row for row in self.rows if not self.getValue('header')] + for r in range(len(rows)): + row = rows[r] + rowattr = '' + cssclass = self.getValue('cssclass', row) + if cssclass: + rowattr += ' class="%s"' % cssclass + out.write(' \n' % rowattr) + i = 0 + while i < len(row.cells): + column = columns[i] + td = row.cells[i] + if isinstance(td, int): + i += td + continue + colspan = self.getValue('colspan', td) + rowspan = self.getValue('rowspan', td) + align = self.getValue('align', td, row, column) + valign = self.getValue('valign', td, row, column) + color = self.getValue('color', td, row, column) + bold = self.getValue('bold', td, row, column) + italic = self.getValue('italic', td, row, column) + style = '' + attr = '' + if color: + style += 'color:%s;' % color + if bold: + style += 'font-weight: bold;' + if italic: + style += 'font-style: italic;' + if align and align != 'left': + attr += ' align="%s"' % align + if valign and valign != 'middle': + attr += ' valign="%s"' % valign + if colspan > 1: + attr += ' colspan="%s"' % colspan + if rowspan > 1: + attr += ' rowspan="%s"' % rowspan + for q in range(r + 1, min(r + rowspan, len(rows))): + rows[q].cells[i] = colspan + if style: + attr += ' style="%s"' % style + css = '' + if embeedcss: + css = ' style="border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;"' + if r == 0: + css = css[:-1] + 'border-top:2px solid #6678B1;"' + out.write(' \n' % (attr, css)) + if td is not None: + out.write(' %s\n' % htmlEncode(td.text)) + out.write(' \n') + i += colspan + out.write(' \n') + out.write(' \n
%s%s
\n
\n') + +def htmlPrintHeader(out, title=None): + if title: + titletag = '%s\n' % htmlEncode([str(title)]) + else: + titletag = '' + out.write('\n\n\n\n%s\n\n\n\n\n' % titletag) + +def htmlPrintFooter(out): + out.write('\n') + +def getStdoutFilename(): + try: + if os.name == 'nt': + import msvcrt, ctypes + handle = msvcrt.get_osfhandle(sys.stdout.fileno()) + size = ctypes.c_ulong(1024) + nameBuffer = ctypes.create_string_buffer(size.value) + ctypes.windll.kernel32.GetFinalPathNameByHandleA(handle, nameBuffer, size, 4) + return nameBuffer.value + else: + return os.readlink('/proc/self/fd/1') + except: + return '' + +def detectHtmlOutputType(requestedType): + if requestedType in ['txt', 'markdown']: + return False + elif requestedType in ['html', 'moinwiki']: + return True + elif sys.stdout.isatty(): + return False + else: + outname = getStdoutFilename() + if outname: + if outname.endswith('.htm') or outname.endswith('.html'): + return True + else: + return False + else: + return False + +def getRelativeVal(test, test0, metric): + if not test or not test0: + return None + val0 = test0.get(metric, 's') + if not val0: + return None + val = test.get(metric, 's') + if not val or val == 0: + return None + return float(val0) / val + +def getCycleReduction(test, test0, metric): + if not test or not test0: + return None + val0 = test0.get(metric, 's') + if not val0 or val0 == 0: + return None + val = test.get(metric, 's') + if not val: + return None + return (1.0 - float(val) / val0) * 100 + +def getScore(test, test0, metric): + if not test or not test0: + return None + m0 = float(test.get('gmean', None)) + m1 = float(test0.get('gmean', None)) + if m0 == 0 or m1 == 0: + return None + s0 = float(test.get('gstddev', None)) + s1 = float(test0.get('gstddev', None)) + s = math.sqrt(s0 * s0 + s1 * s1) + m0 = math.log(m0) + m1 = math.log(m1) + if s == 0: + return None + return (m0 - m1) / s +metrix_table = {'name': ('Name of Test', lambda test, test0, units: str(test)), 'samples': ('Number of\ncollected samples', lambda test, test0, units: test.get('samples', units)), 'outliers': ('Number of\noutliers', lambda test, test0, units: test.get('outliers', units)), 'gmean': ('Geometric mean', lambda test, test0, units: test.get('gmean', units)), 'mean': ('Mean', lambda test, test0, units: test.get('mean', units)), 'min': ('Min', lambda test, test0, units: test.get('min', units)), 'median': ('Median', lambda test, test0, units: test.get('median', units)), 'stddev': ('Standard deviation', lambda test, test0, units: test.get('stddev', units)), 'gstddev': ('Standard deviation of Ln(time)', lambda test, test0, units: test.get('gstddev')), 'gmean%': ('Geometric mean (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'gmean')), 'mean%': ('Mean (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'mean')), 'min%': ('Min (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'min')), 'median%': ('Median (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'median')), 'stddev%': ('Standard deviation (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'stddev')), 'gstddev%': ('Standard deviation of Ln(time) (relative)', lambda test, test0, units: getRelativeVal(test, test0, 'gstddev')), 'gmean$': ('Geometric mean (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'gmean')), 'mean$': ('Mean (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'mean')), 'min$': ('Min (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'min')), 'median$': ('Median (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'median')), 'stddev$': ('Standard deviation (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'stddev')), 'gstddev$': ('Standard deviation of Ln(time) (cycle reduction)', lambda test, test0, units: getCycleReduction(test, test0, 'gstddev')), 'score': ('SCORE', lambda test, test0, units: getScore(test, test0, 'gstddev'))} + +def formatValue(val, metric, units=None): + if val is None: + return '-' + if metric.endswith('%'): + return '%.2f' % val + if metric.endswith('$'): + return '%.2f%%' % val + if metric.endswith('S'): + if val > 3.5: + return 'SLOWER' + if val < -3.5: + return 'FASTER' + if val > -1.5 and val < 1.5: + return ' ' + if val < 0: + return 'faster' + if val > 0: + return 'slower' + if units: + return '%.3f %s' % (val, units) + else: + return '%.3f' % val +if __name__ == '__main__': + if len(sys.argv) < 2: + print('Usage:\n', os.path.basename(sys.argv[0]), '.xml') + exit(0) + parser = OptionParser() + parser.add_option('-o', '--output', dest='format', help="output results in text format (can be 'txt', 'html', 'markdown' or 'auto' - default)", metavar='FMT', default='auto') + parser.add_option('-m', '--metric', dest='metric', help='output metric', metavar='NAME', default='gmean') + parser.add_option('-u', '--units', dest='units', help='units for output values (s, ms (default), us, ns or ticks)', metavar='UNITS', default='ms') + (options, args) = parser.parse_args() + options.generateHtml = detectHtmlOutputType(options.format) + if options.metric not in metrix_table: + options.metric = 'gmean' + import testlog_parser + if options.generateHtml: + htmlPrintHeader(sys.stdout, 'Tables demo') + getter = metrix_table[options.metric][1] + for arg in args: + tests = testlog_parser.parseLogFile(arg) + tbl = table(arg, format=options.format) + tbl.newColumn('name', 'Name of Test', align='left') + tbl.newColumn('value', metrix_table[options.metric][0], align='center', bold='true') + for t in sorted(tests): + tbl.newRow() + tbl.newCell('name', str(t)) + status = t.get('status') + if status != 'run': + tbl.newCell('value', status) + else: + val = getter(t, None, options.units) + if val: + if options.metric.endswith('%'): + tbl.newCell('value', '%.2f' % val, val) + else: + tbl.newCell('value', '%.3f %s' % (val, options.units), val) + else: + tbl.newCell('value', '-') + if options.generateHtml: + tbl.htmlPrintTable(sys.stdout) + else: + tbl.consolePrintTable(sys.stdout) + if options.generateHtml: + htmlPrintFooter(sys.stdout) + +# File: opencv-master/modules/ts/misc/trace_profiler.py +"""""" +from __future__ import print_function +import os +import sys +import csv +from pprint import pprint +from collections import deque +try: + long +except NameError: + long = int +REGION_FLAG_IMPL_MASK = 15 << 16 +REGION_FLAG_IMPL_IPP = 1 << 16 +REGION_FLAG_IMPL_OPENCL = 2 << 16 +DEBUG = False +if DEBUG: + dprint = print + dpprint = pprint +else: + + def dprint(args, **kwargs): + pass + + def dpprint(args, **kwargs): + pass + +def tryNum(s): + if s.startswith('0x'): + try: + return int(s, 16) + except ValueError: + pass + try: + return int(s) + except ValueError: + pass + if sys.version_info[0] < 3: + try: + return long(s) + except ValueError: + pass + return s + +def formatTimestamp(t): + return '%.3f' % (t * 1e-06) +try: + from statistics import median +except ImportError: + + def median(lst): + sortedLst = sorted(lst) + lstLen = len(lst) + index = (lstLen - 1) // 2 + if lstLen % 2: + return sortedLst[index] + else: + return (sortedLst[index] + sortedLst[index + 1]) * 0.5 + +def getCXXFunctionName(spec): + + def dropParams(spec): + pos = len(spec) - 1 + depth = 0 + while pos >= 0: + if spec[pos] == ')': + depth = depth + 1 + elif spec[pos] == '(': + depth = depth - 1 + if depth == 0: + if pos == 0 or spec[pos - 1] in ['#', ':']: + res = dropParams(spec[pos + 1:-1]) + return (spec[:pos] + res[0], res[1]) + return (spec[:pos], spec[pos:]) + pos = pos - 1 + return (spec, '') + + def extractName(spec): + pos = len(spec) - 1 + inName = False + while pos >= 0: + if spec[pos] == ' ': + if inName: + return spec[pos + 1:] + elif spec[pos].isalnum(): + inName = True + pos = pos - 1 + return spec + if spec.startswith('IPP') or spec.startswith('OpenCL'): + prefix_size = len('IPP') if spec.startswith('IPP') else len('OpenCL') + prefix = spec[:prefix_size] + if prefix_size < len(spec) and spec[prefix_size] in ['#', ':']: + prefix = prefix + spec[prefix_size] + prefix_size = prefix_size + 1 + begin = prefix_size + while begin < len(spec): + if spec[begin].isalnum() or spec[begin] in ['_', ':']: + break + begin = begin + 1 + if begin == len(spec): + return spec + end = begin + while end < len(spec): + if not (spec[end].isalnum() or spec[end] in ['_', ':']): + break + end = end + 1 + return prefix + spec[begin:end] + spec = spec.replace(') const', ')') + (ret_type_name, params) = dropParams(spec) + name = extractName(ret_type_name) + if 'operator' in name: + return name + params + if name.startswith('&'): + return name[1:] + return name +stack_size = 10 + +class Trace: + + def __init__(self, filename=None): + self.tasks = {} + self.tasks_list = [] + self.locations = {} + self.threads_stack = {} + self.pending_files = deque() + if filename: + self.load(filename) + + class TraceTask: + + def __init__(self, threadID, taskID, locationID, beginTimestamp): + self.threadID = threadID + self.taskID = taskID + self.locationID = locationID + self.beginTimestamp = beginTimestamp + self.endTimestamp = None + self.parentTaskID = None + self.parentThreadID = None + self.childTask = [] + self.selfTimeIPP = 0 + self.selfTimeOpenCL = 0 + self.totalTimeIPP = 0 + self.totalTimeOpenCL = 0 + + def __repr__(self): + return 'TID={} ID={} loc={} parent={}:{} begin={} end={} IPP={}/{} OpenCL={}/{}'.format(self.threadID, self.taskID, self.locationID, self.parentThreadID, self.parentTaskID, self.beginTimestamp, self.endTimestamp, self.totalTimeIPP, self.selfTimeIPP, self.totalTimeOpenCL, self.selfTimeOpenCL) + + class TraceLocation: + + def __init__(self, locationID, filename, line, name, flags): + self.locationID = locationID + self.filename = os.path.split(filename)[1] + self.line = line + self.name = getCXXFunctionName(name) + self.flags = flags + + def __str__(self): + return '{}#{}:{}'.format(self.name, self.filename, self.line) + + def __repr__(self): + return 'ID={} {}:{}:{}'.format(self.locationID, self.filename, self.line, self.name) + + def parse_file(self, filename): + dprint("Process file: '{}'".format(filename)) + with open(filename) as infile: + for line in infile: + line = str(line).strip() + if line[0] == '#': + if line.startswith('#thread file:'): + name = str(line.split(':', 1)[1]).strip() + self.pending_files.append(os.path.join(os.path.split(filename)[0], name)) + continue + self.parse_line(line) + + def parse_line(self, line): + opts = line.split(',') + dpprint(opts) + if opts[0] == 'l': + opts = list(csv.reader([line]))[0] + locationID = int(opts[1]) + filename = str(opts[2]) + line = int(opts[3]) + name = opts[4] + flags = tryNum(opts[5]) + self.locations[locationID] = self.TraceLocation(locationID, filename, line, name, flags) + return + extra_opts = {} + for e in opts[5:]: + if not '=' in e: + continue + (k, v) = e.split('=') + extra_opts[k] = tryNum(v) + if extra_opts: + dpprint(extra_opts) + threadID = None + taskID = None + locationID = None + ts = None + if opts[0] in ['b', 'e']: + threadID = int(opts[1]) + taskID = int(opts[4]) + locationID = int(opts[3]) + ts = tryNum(opts[2]) + thread_stack = None + currentTask = (None, None) + if threadID is not None: + if not threadID in self.threads_stack: + thread_stack = deque() + self.threads_stack[threadID] = thread_stack + else: + thread_stack = self.threads_stack[threadID] + currentTask = None if not thread_stack else thread_stack[-1] + t = (threadID, taskID) + if opts[0] == 'b': + assert not t in self.tasks, 'Duplicate task: ' + str(t) + repr(self.tasks[t]) + task = self.TraceTask(threadID, taskID, locationID, ts) + self.tasks[t] = task + self.tasks_list.append(task) + thread_stack.append((threadID, taskID)) + if currentTask: + task.parentThreadID = currentTask[0] + task.parentTaskID = currentTask[1] + if 'parentThread' in extra_opts: + task.parentThreadID = extra_opts['parentThread'] + if 'parent' in extra_opts: + task.parentTaskID = extra_opts['parent'] + if opts[0] == 'e': + task = self.tasks[t] + task.endTimestamp = ts + if 'tIPP' in extra_opts: + task.selfTimeIPP = extra_opts['tIPP'] + if 'tOCL' in extra_opts: + task.selfTimeOpenCL = extra_opts['tOCL'] + thread_stack.pop() + + def load(self, filename): + self.pending_files.append(filename) + if DEBUG: + with open(filename, 'r') as f: + print(f.read(), end='') + while self.pending_files: + self.parse_file(self.pending_files.pop()) + + def getParentTask(self, task): + return self.tasks.get((task.parentThreadID, task.parentTaskID), None) + + def process(self): + self.tasks_list.sort(key=lambda x: x.beginTimestamp) + parallel_for_location = None + for (id, l) in self.locations.items(): + if l.name == 'parallel_for': + parallel_for_location = l.locationID + break + for task in self.tasks_list: + try: + task.duration = task.endTimestamp - task.beginTimestamp + task.selfDuration = task.duration + except: + task.duration = None + task.selfDuration = None + task.totalTimeIPP = task.selfTimeIPP + task.totalTimeOpenCL = task.selfTimeOpenCL + dpprint(self.tasks) + dprint('Calculate total times') + for task in self.tasks_list: + parentTask = self.getParentTask(task) + if parentTask: + parentTask.selfDuration = parentTask.selfDuration - task.duration + parentTask.childTask.append(task) + timeIPP = task.selfTimeIPP + timeOpenCL = task.selfTimeOpenCL + while parentTask: + if parentTask.locationID == parallel_for_location: + break + parentLocation = self.locations[parentTask.locationID] + if parentLocation.flags & REGION_FLAG_IMPL_MASK == REGION_FLAG_IMPL_IPP: + parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP + timeIPP = 0 + else: + parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP + if parentLocation.flags & REGION_FLAG_IMPL_MASK == REGION_FLAG_IMPL_OPENCL: + parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL + timeOpenCL = 0 + else: + parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL + parentTask = self.getParentTask(parentTask) + dpprint(self.tasks) + dprint('Calculate total times (parallel_for)') + for task in self.tasks_list: + if task.locationID == parallel_for_location: + task.selfDuration = 0 + childDuration = sum([t.duration for t in task.childTask]) + if task.duration == 0 or childDuration == 0: + continue + timeCoef = task.duration / float(childDuration) + childTimeIPP = sum([t.totalTimeIPP for t in task.childTask]) + childTimeOpenCL = sum([t.totalTimeOpenCL for t in task.childTask]) + if childTimeIPP == 0 and childTimeOpenCL == 0: + continue + timeIPP = childTimeIPP * timeCoef + timeOpenCL = childTimeOpenCL * timeCoef + parentTask = task + while parentTask: + parentLocation = self.locations[parentTask.locationID] + if parentLocation.flags & REGION_FLAG_IMPL_MASK == REGION_FLAG_IMPL_IPP: + parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP + timeIPP = 0 + else: + parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP + if parentLocation.flags & REGION_FLAG_IMPL_MASK == REGION_FLAG_IMPL_OPENCL: + parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL + timeOpenCL = 0 + else: + parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL + parentTask = self.getParentTask(parentTask) + dpprint(self.tasks) + dprint('Done') + + def dump(self, max_entries): + assert isinstance(max_entries, int) + + class CallInfo: + + def __init__(self, callID): + self.callID = callID + self.totalTimes = [] + self.selfTimes = [] + self.threads = set() + self.selfTimesIPP = [] + self.selfTimesOpenCL = [] + self.totalTimesIPP = [] + self.totalTimesOpenCL = [] + calls = {} + for currentTask in self.tasks_list: + task = currentTask + callID = [] + for i in range(stack_size): + callID.append(task.locationID) + task = self.getParentTask(task) + if not task: + break + callID = tuple(callID) + if not callID in calls: + call = CallInfo(callID) + calls[callID] = call + else: + call = calls[callID] + call.totalTimes.append(currentTask.duration) + call.selfTimes.append(currentTask.selfDuration) + call.threads.add(currentTask.threadID) + call.selfTimesIPP.append(currentTask.selfTimeIPP) + call.selfTimesOpenCL.append(currentTask.selfTimeOpenCL) + call.totalTimesIPP.append(currentTask.totalTimeIPP) + call.totalTimesOpenCL.append(currentTask.totalTimeOpenCL) + dpprint(self.tasks) + dpprint(self.locations) + dpprint(calls) + calls_self_sum = {k: sum(v.selfTimes) for (k, v) in calls.items()} + calls_total_sum = {k: sum(v.totalTimes) for (k, v) in calls.items()} + calls_median = {k: median(v.selfTimes) for (k, v) in calls.items()} + calls_sorted = sorted(calls.keys(), key=lambda x: calls_self_sum[x], reverse=True) + calls_self_sum_IPP = {k: sum(v.selfTimesIPP) for (k, v) in calls.items()} + calls_total_sum_IPP = {k: sum(v.totalTimesIPP) for (k, v) in calls.items()} + calls_self_sum_OpenCL = {k: sum(v.selfTimesOpenCL) for (k, v) in calls.items()} + calls_total_sum_OpenCL = {k: sum(v.totalTimesOpenCL) for (k, v) in calls.items()} + if max_entries > 0 and len(calls_sorted) > max_entries: + calls_sorted = calls_sorted[:max_entries] + + def formatPercents(p): + if p is not None: + return '{:>3d}'.format(int(p * 100)) + return '' + name_width = 70 + timestamp_width = 12 + + def fmtTS(): + return '{:>' + str(timestamp_width) + '}' + fmt = '{:>3} {:<' + str(name_width) + '} {:>8} {:>3}' + (' ' + fmtTS()) * 5 + (' ' + fmtTS() + ' {:>3}') * 2 + fmt2 = '{:>3} {:<' + str(name_width) + '} {:>8} {:>3}' + (' ' + fmtTS()) * 5 + (' ' + fmtTS() + ' {:>3}') * 2 + print(fmt.format('ID', 'name', 'count', 'thr', 'min', 'max', 'median', 'avg', '*self*', 'IPP', '%', 'OpenCL', '%')) + print(fmt2.format('', '', '', '', 't-min', 't-max', 't-median', 't-avg', 'total', 't-IPP', '%', 't-OpenCL', '%')) + for (index, callID) in enumerate(calls_sorted): + call_self_times = calls[callID].selfTimes + loc0 = self.locations[callID[0]] + loc_array = [] + for (i, l) in enumerate(callID): + loc = self.locations[l] + loc_array.append(loc.name if i > 0 else str(loc)) + loc_str = '|'.join(loc_array) + if len(loc_str) > name_width: + loc_str = loc_str[:name_width - 3] + '...' + print(fmt.format(index + 1, loc_str, len(call_self_times), len(calls[callID].threads), formatTimestamp(min(call_self_times)), formatTimestamp(max(call_self_times)), formatTimestamp(calls_median[callID]), formatTimestamp(sum(call_self_times) / float(len(call_self_times))), formatTimestamp(sum(call_self_times)), formatTimestamp(calls_self_sum_IPP[callID]), formatPercents(calls_self_sum_IPP[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None), formatTimestamp(calls_self_sum_OpenCL[callID]), formatPercents(calls_self_sum_OpenCL[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None))) + call_total_times = calls[callID].totalTimes + print(fmt2.format('', '', '', '', formatTimestamp(min(call_total_times)), formatTimestamp(max(call_total_times)), formatTimestamp(median(call_total_times)), formatTimestamp(sum(call_total_times) / float(len(call_total_times))), formatTimestamp(sum(call_total_times)), formatTimestamp(calls_total_sum_IPP[callID]), formatPercents(calls_total_sum_IPP[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None), formatTimestamp(calls_total_sum_OpenCL[callID]), formatPercents(calls_total_sum_OpenCL[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None))) + print() +if __name__ == '__main__': + tracefile = sys.argv[1] if len(sys.argv) > 1 else 'OpenCVTrace.txt' + count = int(sys.argv[2]) if len(sys.argv) > 2 else 10 + trace = Trace(tracefile) + trace.process() + trace.dump(max_entries=count) + print('OK') + +# File: opencv-master/modules/ts/misc/xls-report.py +"""""" +from __future__ import division +import ast +import errno +import fnmatch +import logging +import numbers +import os, os.path +import re +from argparse import ArgumentParser +from glob import glob +from itertools import ifilter +import xlwt +from testlog_parser import parseLogFile +re_image_size = re.compile('^ \\d+ x \\d+$', re.VERBOSE) +re_data_type = re.compile('^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE) +time_style = xlwt.easyxf(num_format_str='#0.00') +no_time_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25') +failed_style = xlwt.easyxf('pattern: pattern solid, fore_color red') +noimpl_style = xlwt.easyxf('pattern: pattern solid, fore_color orange') +style_dict = {'failed': failed_style, 'noimpl': noimpl_style} +speedup_style = time_style +good_speedup_style = xlwt.easyxf('font: color green', num_format_str='#0.00') +bad_speedup_style = xlwt.easyxf('font: color red', num_format_str='#0.00') +no_speedup_style = no_time_style +error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange') +header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True') +subheader_style = xlwt.easyxf('alignment: horizontal centre, vertical top') + +class Collector(object): + + def __init__(self, config_match_func, include_unmatched): + self.__config_cache = {} + self.config_match_func = config_match_func + self.include_unmatched = include_unmatched + self.tests = {} + self.extra_configurations = set() + + @staticmethod + def __format_config_cache_key(pairs, multiline=False): + return ('{\n' if multiline else '{') + (',\n' if multiline else ', ').join(((' ' if multiline else '') + repr(k) + ': ' + repr(v) for (k, v) in pairs)) + ('\n}\n' if multiline else '}') + + def collect_from(self, xml_path, default_configuration): + run = parseLogFile(xml_path) + module = run.properties['module_name'] + properties = run.properties.copy() + del properties['module_name'] + props_key = tuple(sorted(properties.iteritems())) + if props_key in self.__config_cache: + configuration = self.__config_cache[props_key] + else: + configuration = self.config_match_func(properties) + if configuration is None: + if self.include_unmatched: + if default_configuration is not None: + configuration = default_configuration + else: + configuration = Collector.__format_config_cache_key(props_key, multiline=True) + self.extra_configurations.add(configuration) + else: + logging.warning('failed to match properties to a configuration: %s', Collector.__format_config_cache_key(props_key)) + else: + same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration] + if len(same_config_props) > 0: + logging.warning('property set %s matches the same configuration %r as property set %s', Collector.__format_config_cache_key(props_key), configuration, Collector.__format_config_cache_key(same_config_props[0])) + self.__config_cache[props_key] = configuration + if configuration is None: + return + module_tests = self.tests.setdefault(module, {}) + for test in run.tests: + test_results = module_tests.setdefault((test.shortName(), test.param()), {}) + new_result = test.get('gmean') if test.status == 'run' else test.status + test_results[configuration] = min(test_results.get(configuration), new_result, key=lambda r: (1, r) if isinstance(r, numbers.Number) else (2,) if r is not None else (3,)) + +def make_match_func(matchers): + + def match_func(properties): + for matcher in matchers: + if all((properties.get(name) == value for (name, value) in matcher['properties'].iteritems())): + return matcher['name'] + return None + return match_func + +def main(): + arg_parser = ArgumentParser(description='Build an XLS performance report.') + arg_parser.add_argument('sheet_dirs', nargs='+', metavar='DIR', help='directory containing perf test logs') + arg_parser.add_argument('-o', '--output', metavar='XLS', default='report.xls', help='name of output file') + arg_parser.add_argument('-c', '--config', metavar='CONF', help='global configuration file') + arg_parser.add_argument('--include-unmatched', action='store_true', help='include results from XML files that were not recognized by configuration matchers') + arg_parser.add_argument('--show-times-per-pixel', action='store_true', help='for tests that have an image size parameter, show per-pixel time, as well as total time') + args = arg_parser.parse_args() + logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG) + if args.config is not None: + with open(args.config) as global_conf_file: + global_conf = ast.literal_eval(global_conf_file.read()) + else: + global_conf = {} + wb = xlwt.Workbook() + for sheet_path in args.sheet_dirs: + try: + with open(os.path.join(sheet_path, 'sheet.conf')) as sheet_conf_file: + sheet_conf = ast.literal_eval(sheet_conf_file.read()) + except IOError as ioe: + if ioe.errno != errno.ENOENT: + raise + sheet_conf = {} + logging.debug('no sheet.conf for %s', sheet_path) + sheet_conf = dict(global_conf.items() + sheet_conf.items()) + config_names = sheet_conf.get('configurations', []) + config_matchers = sheet_conf.get('configuration_matchers', []) + collector = Collector(make_match_func(config_matchers), args.include_unmatched) + for (root, _, filenames) in os.walk(sheet_path): + logging.info('looking in %s', root) + for filename in fnmatch.filter(filenames, '*.xml'): + if os.path.normpath(sheet_path) == os.path.normpath(root): + default_conf = None + else: + default_conf = os.path.relpath(root, sheet_path) + collector.collect_from(os.path.join(root, filename), default_conf) + config_names.extend(sorted(collector.extra_configurations - set(config_names))) + sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path)))) + sheet_properties = sheet_conf.get('sheet_properties', []) + sheet.write(0, 0, 'Properties:') + sheet.write(0, 1, 'N/A' if len(sheet_properties) == 0 else ' '.join((str(k) + '=' + repr(v) for (k, v) in sheet_properties))) + sheet.row(2).height = 800 + sheet.panes_frozen = True + sheet.remove_splits = True + sheet_comparisons = sheet_conf.get('comparisons', []) + row = 2 + col = 0 + for (w, caption) in [(2500, 'Module'), (10000, 'Test'), (2000, 'Image\nwidth'), (2000, 'Image\nheight'), (2000, 'Data\ntype'), (7500, 'Other parameters')]: + sheet.col(col).width = w + if args.show_times_per_pixel: + sheet.write_merge(row, row + 1, col, col, caption, header_style) + else: + sheet.write(row, col, caption, header_style) + col += 1 + for config_name in config_names: + if args.show_times_per_pixel: + sheet.col(col).width = 3000 + sheet.col(col + 1).width = 3000 + sheet.write_merge(row, row, col, col + 1, config_name, header_style) + sheet.write(row + 1, col, 'total, ms', subheader_style) + sheet.write(row + 1, col + 1, 'per pixel, ns', subheader_style) + col += 2 + else: + sheet.col(col).width = 4000 + sheet.write(row, col, config_name, header_style) + col += 1 + col += 1 + for comp in sheet_comparisons: + sheet.col(col).width = 4000 + caption = comp['to'] + '\nvs\n' + comp['from'] + if args.show_times_per_pixel: + sheet.write_merge(row, row + 1, col, col, caption, header_style) + else: + sheet.write(row, col, caption, header_style) + col += 1 + row += 2 if args.show_times_per_pixel else 1 + sheet.horz_split_pos = row + sheet.horz_split_first_visible = row + module_colors = sheet_conf.get('module_colors', {}) + module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color)) for (module, color) in module_colors.iteritems()} + for (module, tests) in sorted(collector.tests.iteritems()): + for ((test, param), configs) in sorted(tests.iteritems()): + sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style)) + sheet.write(row, 1, test) + param_list = param[1:-1].split(', ') if param.startswith('(') and param.endswith(')') else [param] + image_size = next(ifilter(re_image_size.match, param_list), None) + if image_size is not None: + (image_width, image_height) = map(int, image_size.split('x', 1)) + sheet.write(row, 2, image_width) + sheet.write(row, 3, image_height) + del param_list[param_list.index(image_size)] + data_type = next(ifilter(re_data_type.match, param_list), None) + if data_type is not None: + sheet.write(row, 4, data_type) + del param_list[param_list.index(data_type)] + sheet.row(row).write(5, ' | '.join(param_list)) + col = 6 + for c in config_names: + if c in configs: + sheet.write(row, col, configs[c], style_dict.get(configs[c], time_style)) + else: + sheet.write(row, col, None, no_time_style) + col += 1 + if args.show_times_per_pixel: + sheet.write(row, col, xlwt.Formula('{0} * 1000000 / ({1} * {2})'.format(xlwt.Utils.rowcol_to_cell(row, col - 1), xlwt.Utils.rowcol_to_cell(row, 2), xlwt.Utils.rowcol_to_cell(row, 3))), time_style) + col += 1 + col += 1 + for comp in sheet_comparisons: + cmp_from = configs.get(comp['from']) + cmp_to = configs.get(comp['to']) + if isinstance(cmp_from, numbers.Number) and isinstance(cmp_to, numbers.Number): + try: + speedup = cmp_from / cmp_to + sheet.write(row, col, speedup, good_speedup_style if speedup > 1.1 else bad_speedup_style if speedup < 0.9 else speedup_style) + except ArithmeticError as e: + sheet.write(row, col, None, error_speedup_style) + else: + sheet.write(row, col, None, no_speedup_style) + col += 1 + row += 1 + if row % 1000 == 0: + sheet.flush_row_data() + wb.save(args.output) +if __name__ == '__main__': + main() + +# File: opencv-master/platforms/android/build_java_shared_aar.py +import argparse +from os import path +import os +import re +import shutil +import string +import subprocess +COPY_FROM_SDK_TO_ANDROID_PROJECT = [['sdk/native/jni/include', 'OpenCV/src/main/cpp/include'], ['sdk/java/src/org', 'OpenCV/src/main/java/org'], ['sdk/java/res', 'OpenCV/src/main/res']] +COPY_FROM_SDK_TO_APK = [['sdk/native/libs//lib.so', 'jni//lib.so'], ['sdk/native/libs//lib.so', 'prefab/modules//libs/android./lib.so']] +ANDROID_PROJECT_TEMPLATE_DIR = path.join(path.dirname(__file__), 'aar-template') +TEMP_DIR = 'build_java_shared' +ANDROID_PROJECT_DIR = path.join(TEMP_DIR, 'AndroidProject') +COMPILED_AAR_PATH_1 = path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/outputs/aar/OpenCV-release.aar') +COMPILED_AAR_PATH_2 = path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/outputs/aar/opencv-release.aar') +AAR_UNZIPPED_DIR = path.join(TEMP_DIR, 'aar_unzipped') +FINAL_AAR_PATH_TEMPLATE = 'outputs/opencv_java_shared_.aar' +FINAL_REPO_PATH = 'outputs/maven_repo' +MAVEN_PACKAGE_NAME = 'opencv' + +def fill_template(src_path, dst_path, args_dict): + with open(src_path, 'r') as f: + template_text = f.read() + template = string.Template(template_text) + text = template.safe_substitute(args_dict) + with open(dst_path, 'w') as f: + f.write(text) + +def get_opencv_version(opencv_sdk_path): + version_hpp_path = path.join(opencv_sdk_path, 'sdk/native/jni/include/opencv2/core/version.hpp') + with open(version_hpp_path, 'rt') as f: + data = f.read() + major = re.search('^#define\\W+CV_VERSION_MAJOR\\W+(\\d+)$', data, re.MULTILINE).group(1) + minor = re.search('^#define\\W+CV_VERSION_MINOR\\W+(\\d+)$', data, re.MULTILINE).group(1) + revision = re.search('^#define\\W+CV_VERSION_REVISION\\W+(\\d+)$', data, re.MULTILINE).group(1) + return '%(major)s.%(minor)s.%(revision)s' % locals() + +def get_compiled_aar_path(path1, path2): + if path.exists(path1): + return path1 + elif path.exists(path2): + return path2 + else: + raise Exception("Can't find compiled AAR path in [" + path1 + ', ' + path2 + ']') + +def cleanup(paths_to_remove): + exists = False + for p in paths_to_remove: + if path.exists(p): + exists = True + if path.isdir(p): + shutil.rmtree(p) + else: + os.remove(p) + print('Removed', p) + if not exists: + print('Nothing to remove') + +def main(args): + opencv_version = get_opencv_version(args.opencv_sdk_path) + abis = os.listdir(path.join(args.opencv_sdk_path, 'sdk/native/libs')) + lib_name = 'opencv_java' + opencv_version.split('.')[0] + final_aar_path = FINAL_AAR_PATH_TEMPLATE.replace('', opencv_version) + print('Removing data from previous runs...') + cleanup([TEMP_DIR, final_aar_path, path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME)]) + print('Preparing Android project...') + shutil.copytree(ANDROID_PROJECT_TEMPLATE_DIR, ANDROID_PROJECT_DIR) + shutil.rmtree(path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/include')) + fill_template(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build.gradle.template'), path.join(ANDROID_PROJECT_DIR, 'OpenCV/build.gradle'), {'LIB_NAME': lib_name, 'LIB_TYPE': 'c++_shared', 'PACKAGE_NAME': MAVEN_PACKAGE_NAME, 'OPENCV_VERSION': opencv_version, 'COMPILE_SDK': args.android_compile_sdk, 'MIN_SDK': args.android_min_sdk, 'TARGET_SDK': args.android_target_sdk, 'ABI_FILTERS': ', '.join(['"' + x + '"' for x in abis]), 'JAVA_VERSION': args.java_version}) + fill_template(path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/CMakeLists.txt.template'), path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/CMakeLists.txt'), {'LIB_NAME': lib_name, 'LIB_TYPE': 'SHARED'}) + local_props = '' + if args.ndk_location: + local_props += 'ndk.dir=' + args.ndk_location + '\n' + if args.cmake_location: + local_props += 'cmake.dir=' + args.cmake_location + '\n' + if local_props: + with open(path.join(ANDROID_PROJECT_DIR, 'local.properties'), 'wt') as f: + f.write(local_props) + for (src, dst) in COPY_FROM_SDK_TO_ANDROID_PROJECT: + shutil.copytree(path.join(args.opencv_sdk_path, src), path.join(ANDROID_PROJECT_DIR, dst)) + print('Running gradle assembleRelease...') + cmd = ['./gradlew', 'assembleRelease'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + print('Adding libs to AAR...') + complied_aar_path = get_compiled_aar_path(COMPILED_AAR_PATH_1, COMPILED_AAR_PATH_2) + shutil.unpack_archive(complied_aar_path, AAR_UNZIPPED_DIR, 'zip') + for abi in abis: + for (src, dst) in COPY_FROM_SDK_TO_APK: + src = src.replace('', abi).replace('', lib_name) + dst = dst.replace('', abi).replace('', lib_name) + shutil.copy(path.join(args.opencv_sdk_path, src), path.join(AAR_UNZIPPED_DIR, dst)) + os.makedirs('outputs', exist_ok=True) + shutil.make_archive(final_aar_path, 'zip', AAR_UNZIPPED_DIR, '.') + os.rename(final_aar_path + '.zip', final_aar_path) + print('Creating local maven repo...') + shutil.copy(final_aar_path, path.join(ANDROID_PROJECT_DIR, 'OpenCV/opencv-release.aar')) + print('Creating a maven repo from project sources (with sources jar and javadoc jar)...') + cmd = ['./gradlew', 'publishReleasePublicationToMyrepoRepository'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + os.makedirs(path.join(FINAL_REPO_PATH, 'org/opencv'), exist_ok=True) + shutil.move(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/repo/org/opencv', MAVEN_PACKAGE_NAME), path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME)) + print('Creating a maven repo from modified AAR (with cpp libraries)...') + cmd = ['./gradlew', 'publishModifiedPublicationToMyrepoRepository'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + shutil.copytree(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/repo/org/opencv', MAVEN_PACKAGE_NAME), path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME), dirs_exist_ok=True) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Builds AAR with Java and shared C++ libs from OpenCV SDK') + parser.add_argument('opencv_sdk_path') + parser.add_argument('--android_compile_sdk', default='31') + parser.add_argument('--android_min_sdk', default='21') + parser.add_argument('--android_target_sdk', default='31') + parser.add_argument('--java_version', default='1_8') + parser.add_argument('--ndk_location', default='') + parser.add_argument('--cmake_location', default='') + parser.add_argument('--offline', action='store_true', help='Force Gradle use offline mode') + args = parser.parse_args() + main(args) + +# File: opencv-master/platforms/android/build_sdk.py +import os, sys +import argparse +import glob +import re +import shutil +import subprocess +import time +import logging as log +import xml.etree.ElementTree as ET +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +class Fail(Exception): + + def __init__(self, text=None): + self.t = text + + def __str__(self): + return 'ERROR' if self.t is None else self.t + +def execute(cmd, shell=False): + try: + log.debug('Executing: %s' % cmd) + log.info('Executing: ' + ' '.join(cmd)) + retcode = subprocess.call(cmd, shell=shell) + if retcode < 0: + raise Fail('Child was terminated by signal: %s' % -retcode) + elif retcode > 0: + raise Fail('Child returned: %s' % retcode) + except OSError as e: + raise Fail('Execution failed: %d / %s' % (e.errno, e.strerror)) + +def rm_one(d): + d = os.path.abspath(d) + if os.path.exists(d): + if os.path.isdir(d): + log.info('Removing dir: %s', d) + shutil.rmtree(d) + elif os.path.isfile(d): + log.info('Removing file: %s', d) + os.remove(d) + +def check_dir(d, create=False, clean=False): + d = os.path.abspath(d) + log.info('Check dir %s (create: %s, clean: %s)', d, create, clean) + if os.path.exists(d): + if not os.path.isdir(d): + raise Fail('Not a directory: %s' % d) + if clean: + for x in glob.glob(os.path.join(d, '*')): + rm_one(x) + elif create: + os.makedirs(d) + return d + +def check_executable(cmd): + try: + log.debug('Executing: %s' % cmd) + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(result, str): + result = result.decode('utf-8') + log.debug('Result: %s' % (result + '\n').split('\n')[0]) + return True + except Exception as e: + log.debug('Failed: %s' % e) + return False + +def determine_opencv_version(version_hpp_path): + with open(version_hpp_path, 'rt') as f: + data = f.read() + major = re.search('^#define\\W+CV_VERSION_MAJOR\\W+(\\d+)$', data, re.MULTILINE).group(1) + minor = re.search('^#define\\W+CV_VERSION_MINOR\\W+(\\d+)$', data, re.MULTILINE).group(1) + revision = re.search('^#define\\W+CV_VERSION_REVISION\\W+(\\d+)$', data, re.MULTILINE).group(1) + version_status = re.search('^#define\\W+CV_VERSION_STATUS\\W+"([^"]*)"$', data, re.MULTILINE).group(1) + return '%(major)s.%(minor)s.%(revision)s%(version_status)s' % locals() + +def move_smart(src, dst): + + def move_recurse(subdir): + s = os.path.join(src, subdir) + d = os.path.join(dst, subdir) + if os.path.exists(d): + if os.path.isdir(d): + for item in os.listdir(s): + move_recurse(os.path.join(subdir, item)) + elif os.path.isfile(s): + shutil.move(s, d) + else: + shutil.move(s, d) + move_recurse('') + +def copytree_smart(src, dst): + + def copy_recurse(subdir): + s = os.path.join(src, subdir) + d = os.path.join(dst, subdir) + if os.path.exists(d): + if os.path.isdir(d): + for item in os.listdir(s): + copy_recurse(os.path.join(subdir, item)) + elif os.path.isfile(s): + shutil.copy2(s, d) + elif os.path.isdir(s): + shutil.copytree(s, d) + elif os.path.isfile(s): + shutil.copy2(s, d) + copy_recurse('') + +def get_highest_version(subdirs): + return max(subdirs, key=lambda dir: [int(comp) for comp in os.path.split(dir)[-1].split('.')]) + +class ABI: + + def __init__(self, platform_id, name, toolchain, ndk_api_level=None, cmake_vars=dict()): + self.platform_id = platform_id + self.name = name + self.toolchain = toolchain + self.cmake_vars = dict(ANDROID_STL='gnustl_static', ANDROID_ABI=self.name, ANDROID_PLATFORM_ID=platform_id) + if toolchain is not None: + self.cmake_vars['ANDROID_TOOLCHAIN_NAME'] = toolchain + else: + self.cmake_vars['ANDROID_TOOLCHAIN'] = 'clang' + self.cmake_vars['ANDROID_STL'] = 'c++_shared' + if ndk_api_level: + self.cmake_vars['ANDROID_NATIVE_API_LEVEL'] = ndk_api_level + self.cmake_vars.update(cmake_vars) + + def __str__(self): + return '%s (%s)' % (self.name, self.toolchain) + + def haveIPP(self): + return self.name == 'x86' or self.name == 'x86_64' + +class Builder: + + def __init__(self, workdir, opencvdir, config): + self.workdir = check_dir(workdir, create=True) + self.opencvdir = check_dir(opencvdir) + self.config = config + self.libdest = check_dir(os.path.join(self.workdir, 'o4a'), create=True, clean=True) + self.resultdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk'), create=True, clean=True) + self.docdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk', 'sdk', 'java', 'javadoc'), create=True, clean=True) + self.extra_packs = [] + self.opencv_version = determine_opencv_version(os.path.join(self.opencvdir, 'modules', 'core', 'include', 'opencv2', 'core', 'version.hpp')) + self.use_ccache = False if config.no_ccache else True + self.cmake_path = self.get_cmake() + self.ninja_path = self.get_ninja() + self.debug = True if config.debug else False + self.debug_info = True if config.debug_info else False + self.no_samples_build = True if config.no_samples_build else False + self.hwasan = True if config.hwasan else False + self.opencl = True if config.opencl else False + self.no_kotlin = True if config.no_kotlin else False + self.shared = True if config.shared else False + self.disable = args.disable + + def get_cmake(self): + if not self.config.use_android_buildtools and check_executable(['cmake', '--version']): + log.info('Using cmake from PATH') + return 'cmake' + android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake') + if os.path.exists(android_cmake): + cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'cmake'), '--version'])] + if len(cmake_subdirs) > 0: + cmake_from_sdk = os.path.join(android_cmake, get_highest_version(cmake_subdirs), 'bin', 'cmake') + log.info('Using cmake from Android SDK: %s', cmake_from_sdk) + return cmake_from_sdk + raise Fail("Can't find cmake") + + def get_ninja(self): + if not self.config.use_android_buildtools and check_executable(['ninja', '--version']): + log.info('Using ninja from PATH') + return 'ninja' + android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake') + if os.path.exists(android_cmake): + cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'ninja'), '--version'])] + if len(cmake_subdirs) > 0: + ninja_from_sdk = os.path.join(android_cmake, cmake_subdirs[0], 'bin', 'ninja') + log.info('Using ninja from Android SDK: %s', ninja_from_sdk) + return ninja_from_sdk + raise Fail("Can't find ninja") + + def get_toolchain_file(self): + if not self.config.force_opencv_toolchain: + toolchain = os.path.join(os.environ['ANDROID_NDK'], 'build', 'cmake', 'android.toolchain.cmake') + if os.path.exists(toolchain): + return toolchain + toolchain = os.path.join(SCRIPT_DIR, 'android.toolchain.cmake') + if os.path.exists(toolchain): + return toolchain + else: + raise Fail("Can't find toolchain") + + def get_engine_apk_dest(self, engdest): + return os.path.join(engdest, 'platforms', 'android', 'service', 'engine', '.build') + + def add_extra_pack(self, ver, path): + if path is None: + return + self.extra_packs.append((ver, check_dir(path))) + + def clean_library_build_dir(self): + for d in ['CMakeCache.txt', 'CMakeFiles/', 'bin/', 'libs/', 'lib/', 'package/', 'install/samples/']: + rm_one(d) + + def build_library(self, abi, do_install, no_media_ndk): + cmd = [self.cmake_path, '-GNinja'] + cmake_vars = dict(CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(), INSTALL_CREATE_DISTRIB='ON', WITH_OPENCL='OFF', BUILD_KOTLIN_EXTENSIONS='ON', WITH_IPP='ON' if abi.haveIPP() else 'OFF', WITH_TBB='ON', BUILD_EXAMPLES='OFF', BUILD_TESTS='OFF', BUILD_PERF_TESTS='OFF', BUILD_DOCS='OFF', BUILD_ANDROID_EXAMPLES='OFF' if self.no_samples_build else 'ON', INSTALL_ANDROID_EXAMPLES='OFF' if self.no_samples_build else 'ON') + if self.ninja_path != 'ninja': + cmake_vars['CMAKE_MAKE_PROGRAM'] = self.ninja_path + if self.debug: + cmake_vars['CMAKE_BUILD_TYPE'] = 'Debug' + if self.debug_info: + cmake_vars['BUILD_WITH_DEBUG_INFO'] = 'ON' + if self.opencl: + cmake_vars['WITH_OPENCL'] = 'ON' + if self.no_kotlin: + cmake_vars['BUILD_KOTLIN_EXTENSIONS'] = 'OFF' + if self.shared: + cmake_vars['BUILD_SHARED_LIBS'] = 'ON' + if self.config.modules_list is not None: + cmake_vars['BUILD_LIST'] = '%s' % self.config.modules_list + if self.config.extra_modules_path is not None: + cmake_vars['OPENCV_EXTRA_MODULES_PATH'] = '%s' % self.config.extra_modules_path + if self.use_ccache == True: + cmake_vars['NDK_CCACHE'] = 'ccache' + if do_install: + cmake_vars['BUILD_TESTS'] = 'ON' + cmake_vars['INSTALL_TESTS'] = 'ON' + if no_media_ndk: + cmake_vars['WITH_ANDROID_MEDIANDK'] = 'OFF' + if self.hwasan and 'arm64' in abi.name: + cmake_vars['OPENCV_ENABLE_MEMORY_SANITIZER'] = 'ON' + hwasan_flags = '-fno-omit-frame-pointer -fsanitize=hwaddress' + for s in ['OPENCV_EXTRA_C_FLAGS', 'OPENCV_EXTRA_CXX_FLAGS', 'OPENCV_EXTRA_EXE_LINKER_FLAGS', 'OPENCV_EXTRA_SHARED_LINKER_FLAGS', 'OPENCV_EXTRA_MODULE_LINKER_FLAGS']: + if s in cmake_vars.keys(): + cmake_vars[s] = cmake_vars[s] + ' ' + hwasan_flags + else: + cmake_vars[s] = hwasan_flags + cmake_vars.update(abi.cmake_vars) + if len(self.disable) > 0: + cmake_vars.update({'WITH_%s' % f: 'OFF' for f in self.disable}) + cmd += ["-D%s='%s'" % (k, v) for (k, v) in cmake_vars.items() if v is not None] + cmd.append(self.opencvdir) + execute(cmd) + execute([self.ninja_path, 'opencv_modules']) + if self.no_samples_build: + execute([self.ninja_path, 'install' if self.debug_info or self.debug else 'install/strip']) + else: + execute([self.ninja_path, '-j1', 'install' if self.debug_info or self.debug else 'install/strip']) + + def build_javadoc(self): + classpaths = [] + for (dir, _, files) in os.walk(os.environ['ANDROID_SDK']): + for f in files: + if f == 'android.jar' or f == 'annotations.jar': + classpaths.append(os.path.join(dir, f)) + srcdir = os.path.join(self.resultdest, 'sdk', 'java', 'src') + dstdir = self.docdest + shutil.copy2(os.path.join(SCRIPT_DIR, '../../doc/mymath.js'), dstdir) + cmd = ['javadoc', '-windowtitle', 'OpenCV %s Java documentation' % self.opencv_version, '-doctitle', 'OpenCV Java documentation (%s)' % self.opencv_version, '-nodeprecated', '-public', '-sourcepath', srcdir, '-encoding', 'UTF-8', '-charset', 'UTF-8', '-docencoding', 'UTF-8', '--allow-script-in-comments', '-header', "\n \n" % 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0', '-bottom', 'Generated on %s / OpenCV %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), self.opencv_version), '-d', dstdir, '-classpath', ':'.join(classpaths), '-subpackages', 'org.opencv'] + execute(cmd) + + def gather_results(self): + root = os.path.join(self.libdest, 'install') + for item in os.listdir(root): + src = os.path.join(root, item) + dst = os.path.join(self.resultdest, item) + if os.path.isdir(src): + log.info('Copy dir: %s', item) + if self.config.force_copy: + copytree_smart(src, dst) + else: + move_smart(src, dst) + elif os.path.isfile(src): + log.info('Copy file: %s', item) + if self.config.force_copy: + shutil.copy2(src, dst) + else: + shutil.move(src, dst) + +def get_ndk_dir(): + android_sdk_ndk = os.path.join(os.environ['ANDROID_SDK'], 'ndk') + android_sdk_ndk_bundle = os.path.join(os.environ['ANDROID_SDK'], 'ndk-bundle') + if os.path.exists(android_sdk_ndk): + ndk_subdirs = [f for f in os.listdir(android_sdk_ndk) if os.path.exists(os.path.join(android_sdk_ndk, f, 'package.xml'))] + if len(ndk_subdirs) > 0: + ndk_from_sdk = os.path.join(android_sdk_ndk, get_highest_version(ndk_subdirs)) + log.info('Using NDK (side-by-side) from Android SDK: %s', ndk_from_sdk) + return ndk_from_sdk + if os.path.exists(os.path.join(android_sdk_ndk_bundle, 'package.xml')): + log.info('Using NDK bundle from Android SDK: %s', android_sdk_ndk_bundle) + return android_sdk_ndk_bundle + return None +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Build OpenCV for Android SDK') + parser.add_argument('work_dir', nargs='?', default='.', help='Working directory (and output)') + parser.add_argument('opencv_dir', nargs='?', default=os.path.join(SCRIPT_DIR, '../..'), help='Path to OpenCV source dir') + parser.add_argument('--config', default='ndk-18-api-level-21.config.py', type=str, help='Package build configuration') + parser.add_argument('--ndk_path', help='Path to Android NDK to use for build') + parser.add_argument('--sdk_path', help='Path to Android SDK to use for build') + parser.add_argument('--use_android_buildtools', action='store_true', help='Use cmake/ninja build tools from Android SDK') + parser.add_argument('--modules_list', help='List of modules to include for build') + parser.add_argument('--extra_modules_path', help='Path to extra modules to use for build') + parser.add_argument('--sign_with', help='Certificate to sign the Manager apk') + parser.add_argument('--build_doc', action='store_true', help='Build javadoc') + parser.add_argument('--no_ccache', action='store_true', help='Do not use ccache during library build') + parser.add_argument('--force_copy', action='store_true', help='Do not use file move during library build (useful for debug)') + parser.add_argument('--force_opencv_toolchain', action='store_true', help='Do not use toolchain from Android NDK') + parser.add_argument('--debug', action='store_true', help="Build 'Debug' binaries (CMAKE_BUILD_TYPE=Debug)") + parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') + parser.add_argument('--no_samples_build', action='store_true', help='Do not build samples (speeds up build)') + parser.add_argument('--opencl', action='store_true', help='Enable OpenCL support') + parser.add_argument('--no_kotlin', action='store_true', help='Disable Kotlin extensions') + parser.add_argument('--shared', action='store_true', help='Build shared libraries') + parser.add_argument('--no_media_ndk', action='store_true', help='Do not link Media NDK (required for video I/O support)') + parser.add_argument('--hwasan', action='store_true', help='Enable Hardware Address Sanitizer on ARM64') + parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF). To disable multiple, specify this flag again, e.g. "--disable TBB --disable OPENMP"') + args = parser.parse_args() + log.basicConfig(format='%(message)s', level=log.DEBUG) + log.debug('Args: %s', args) + if args.ndk_path is not None: + os.environ['ANDROID_NDK'] = args.ndk_path + if args.sdk_path is not None: + os.environ['ANDROID_SDK'] = args.sdk_path + if not 'ANDROID_HOME' in os.environ and 'ANDROID_SDK' in os.environ: + os.environ['ANDROID_HOME'] = os.environ['ANDROID_SDK'] + if not 'ANDROID_SDK' in os.environ: + raise Fail('SDK location not set. Either pass --sdk_path or set ANDROID_SDK environment variable') + if not 'ANDROID_NDK' in os.environ and 'ANDROID_SDK' in os.environ: + sdk_ndk_dir = get_ndk_dir() + if sdk_ndk_dir: + os.environ['ANDROID_NDK'] = sdk_ndk_dir + if not 'ANDROID_NDK' in os.environ: + raise Fail('NDK location not set. Either pass --ndk_path or set ANDROID_NDK environment variable') + show_samples_build_warning = False + if not 'ANDROID_NDK_HOME' in os.environ and 'ANDROID_NDK' in os.environ: + os.environ['ANDROID_NDK_HOME'] = os.environ['ANDROID_NDK'] + show_samples_build_warning = True + if not check_executable(['ccache', '--version']): + log.info('ccache not found - disabling ccache support') + args.no_ccache = True + if os.path.realpath(args.work_dir) == os.path.realpath(SCRIPT_DIR): + raise Fail('Specify workdir (building from script directory is not supported)') + if os.path.realpath(args.work_dir) == os.path.realpath(args.opencv_dir): + raise Fail('Specify workdir (building from OpenCV source directory is not supported)') + if args.opencv_dir is not None and (not os.path.isabs(args.opencv_dir)): + args.opencv_dir = os.path.abspath(args.opencv_dir) + if args.extra_modules_path is not None and (not os.path.isabs(args.extra_modules_path)): + args.extra_modules_path = os.path.abspath(args.extra_modules_path) + cpath = args.config + if not os.path.exists(cpath): + cpath = os.path.join(SCRIPT_DIR, cpath) + if not os.path.exists(cpath): + raise Fail('Config "%s" is missing' % args.config) + with open(cpath, 'r') as f: + cfg = f.read() + print('Package configuration:') + print('=' * 80) + print(cfg.strip()) + print('=' * 80) + ABIs = None + exec(compile(cfg, cpath, 'exec')) + log.info('Android NDK path: %s', os.environ['ANDROID_NDK']) + log.info('Android SDK path: %s', os.environ['ANDROID_SDK']) + builder = Builder(args.work_dir, args.opencv_dir, args) + log.info('Detected OpenCV version: %s', builder.opencv_version) + for (i, abi) in enumerate(ABIs): + do_install = i == 0 + log.info('=====') + log.info('===== Building library for %s', abi) + log.info('=====') + os.chdir(builder.libdest) + builder.clean_library_build_dir() + builder.build_library(abi, do_install, args.no_media_ndk) + builder.gather_results() + if args.build_doc: + builder.build_javadoc() + log.info('=====') + log.info('===== Build finished') + log.info('=====') + if show_samples_build_warning: + log.info('ANDROID_NDK_HOME environment variable required by the samples project is not set') + log.info('SDK location: %s', builder.resultdest) + log.info('Documentation location: %s', builder.docdest) + +# File: opencv-master/platforms/android/build_static_aar.py +import argparse +import json +from os import path +import os +import shutil +import subprocess +from build_java_shared_aar import cleanup, fill_template, get_compiled_aar_path, get_opencv_version +ANDROID_PROJECT_TEMPLATE_DIR = path.join(path.dirname(__file__), 'aar-template') +TEMP_DIR = 'build_static' +ANDROID_PROJECT_DIR = path.join(TEMP_DIR, 'AndroidProject') +COMPILED_AAR_PATH_1 = path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/outputs/aar/OpenCV-release.aar') +COMPILED_AAR_PATH_2 = path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/outputs/aar/opencv-release.aar') +AAR_UNZIPPED_DIR = path.join(TEMP_DIR, 'aar_unzipped') +FINAL_AAR_PATH_TEMPLATE = 'outputs/opencv_static_.aar' +FINAL_REPO_PATH = 'outputs/maven_repo' +MAVEN_PACKAGE_NAME = 'opencv-static' + +def get_list_of_opencv_libs(sdk_dir): + files = os.listdir(path.join(sdk_dir, 'sdk/native/staticlibs/arm64-v8a')) + libs = [f[3:-2] for f in files if f[:3] == 'lib' and f[-2:] == '.a'] + return libs + +def get_list_of_3rdparty_libs(sdk_dir, abis): + libs = [] + for abi in abis: + files = os.listdir(path.join(sdk_dir, 'sdk/native/3rdparty/libs/' + abi)) + cur_libs = [f[3:-2] for f in files if f[:3] == 'lib' and f[-2:] == '.a'] + for lib in cur_libs: + if lib not in libs: + libs.append(lib) + return libs + +def add_printing_linked_libs(sdk_dir, opencv_libs): + sdk_jni_dir = sdk_dir + '/sdk/native/jni' + with open(path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/CMakeLists.txt'), 'a') as f: + f.write('\nset(OpenCV_DIR "' + sdk_jni_dir + '")\n') + f.write('find_package(OpenCV REQUIRED)\n') + for lib_name in opencv_libs: + output_filename_prefix = 'linkedlibs.' + lib_name + '.' + f.write('get_target_property(OUT "' + lib_name + '" INTERFACE_LINK_LIBRARIES)\n') + f.write('file(WRITE "' + output_filename_prefix + '${ANDROID_ABI}.txt" "${OUT}")\n') + +def read_linked_libs(lib_name, abis): + deps_lists = [] + for abi in abis: + with open(path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp', f'linkedlibs.{lib_name}.{abi}.txt')) as f: + text = f.read() + linked_libs = text.split(';') + linked_libs = [x.replace('$', '') for x in linked_libs] + deps_lists.append(linked_libs) + return merge_dependencies_lists(deps_lists) + +def merge_dependencies_lists(deps_lists): + result = [] + for d_list in deps_lists: + for i in range(len(d_list)): + if d_list[i] not in result: + if i == 0: + result.append(d_list[i]) + else: + index = result.index(d_list[i - 1]) + result = result[:index + 1] + [d_list[i]] + result[index + 1:] + return result + +def convert_deps_list_to_prefab(linked_libs, opencv_libs, external_libs): + prefab_linked_libs = [] + for lib in linked_libs: + if lib in opencv_libs or lib in external_libs: + prefab_linked_libs.append(':' + lib) + elif lib[:3] == 'lib' and lib[3:] in external_libs: + prefab_linked_libs.append(':' + lib[3:]) + elif lib == 'ocv.3rdparty.android_mediandk': + prefab_linked_libs += ['-landroid', '-llog', '-lmediandk'] + print('Warning: manualy handled ocv.3rdparty.android_mediandk dependency') + elif lib == 'ocv.3rdparty.flatbuffers': + print('Warning: manualy handled ocv.3rdparty.flatbuffers dependency') + elif lib.startswith('ocv.3rdparty'): + raise Exception('Unknown lib ' + lib) + else: + prefab_linked_libs.append('-l' + lib) + return prefab_linked_libs + +def main(args): + opencv_version = get_opencv_version(args.opencv_sdk_path) + abis = os.listdir(path.join(args.opencv_sdk_path, 'sdk/native/libs')) + final_aar_path = FINAL_AAR_PATH_TEMPLATE.replace('', opencv_version) + sdk_dir = args.opencv_sdk_path + print('Removing data from previous runs...') + cleanup([TEMP_DIR, final_aar_path, path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME)]) + print('Preparing Android project...') + shutil.copytree(ANDROID_PROJECT_TEMPLATE_DIR, ANDROID_PROJECT_DIR) + fill_template(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build.gradle.template'), path.join(ANDROID_PROJECT_DIR, 'OpenCV/build.gradle'), {'LIB_NAME': 'templib', 'LIB_TYPE': 'c++_static', 'PACKAGE_NAME': MAVEN_PACKAGE_NAME, 'OPENCV_VERSION': opencv_version, 'COMPILE_SDK': args.android_compile_sdk, 'MIN_SDK': args.android_min_sdk, 'TARGET_SDK': args.android_target_sdk, 'ABI_FILTERS': ', '.join(['"' + x + '"' for x in abis]), 'JAVA_VERSION': args.java_version}) + fill_template(path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/CMakeLists.txt.template'), path.join(ANDROID_PROJECT_DIR, 'OpenCV/src/main/cpp/CMakeLists.txt'), {'LIB_NAME': 'templib', 'LIB_TYPE': 'STATIC'}) + local_props = '' + if args.ndk_location: + local_props += 'ndk.dir=' + args.ndk_location + '\n' + if args.cmake_location: + local_props += 'cmake.dir=' + args.cmake_location + '\n' + if local_props: + with open(path.join(ANDROID_PROJECT_DIR, 'local.properties'), 'wt') as f: + f.write(local_props) + opencv_libs = get_list_of_opencv_libs(sdk_dir) + external_libs = get_list_of_3rdparty_libs(sdk_dir, abis) + add_printing_linked_libs(sdk_dir, opencv_libs) + print('Running gradle assembleRelease...') + cmd = ['./gradlew', 'assembleRelease'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + complied_aar_path = get_compiled_aar_path(COMPILED_AAR_PATH_1, COMPILED_AAR_PATH_2) + shutil.unpack_archive(complied_aar_path, AAR_UNZIPPED_DIR, 'zip') + print('Adding libs to AAR...') + for lib in external_libs: + for abi in abis: + os.makedirs(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi)) + if path.exists(path.join(sdk_dir, 'sdk/native/3rdparty/libs/' + abi, 'lib' + lib + '.a')): + shutil.copy(path.join(sdk_dir, 'sdk/native/3rdparty/libs/' + abi, 'lib' + lib + '.a'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi, 'lib' + lib + '.a')) + else: + shutil.copy(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/templib/libs/android.' + abi, 'libtemplib.a'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi, 'lib' + lib + '.a')) + shutil.copy(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/templib/libs/android.' + abi + '/abi.json'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi + '/abi.json')) + shutil.copy(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/templib/module.json'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/module.json')) + for lib in opencv_libs: + for abi in abis: + os.makedirs(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi)) + shutil.copy(path.join(sdk_dir, 'sdk/native/staticlibs/' + abi, 'lib' + lib + '.a'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi, 'lib' + lib + '.a')) + shutil.copy(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/templib/libs/android.' + abi + '/abi.json'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/libs/android.' + abi + '/abi.json')) + os.makedirs(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/include/opencv2')) + shutil.copy(path.join(sdk_dir, 'sdk/native/jni/include/opencv2/' + lib.replace('opencv_', '') + '.hpp'), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/include/opencv2/' + lib.replace('opencv_', '') + '.hpp')) + shutil.copytree(path.join(sdk_dir, 'sdk/native/jni/include/opencv2/' + lib.replace('opencv_', '')), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/include/opencv2/' + lib.replace('opencv_', ''))) + module_json_text = {'export_libraries': convert_deps_list_to_prefab(read_linked_libs(lib, abis), opencv_libs, external_libs), 'android': {}} + with open(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/' + lib + '/module.json'), 'w') as f: + json.dump(module_json_text, f) + for h_file in ('cvconfig.h', 'opencv.hpp', 'opencv_modules.hpp'): + shutil.copy(path.join(sdk_dir, 'sdk/native/jni/include/opencv2/' + h_file), path.join(AAR_UNZIPPED_DIR, 'prefab/modules/opencv_core/include/opencv2/' + h_file)) + shutil.rmtree(path.join(AAR_UNZIPPED_DIR, 'prefab/modules/templib')) + os.makedirs('outputs', exist_ok=True) + shutil.make_archive(final_aar_path, 'zip', AAR_UNZIPPED_DIR, '.') + os.rename(final_aar_path + '.zip', final_aar_path) + print('Creating local maven repo...') + shutil.copy(final_aar_path, path.join(ANDROID_PROJECT_DIR, 'OpenCV/opencv-release.aar')) + print('Creating a maven repo from project sources (with sources jar and javadoc jar)...') + cmd = ['./gradlew', 'publishReleasePublicationToMyrepoRepository'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + os.makedirs(path.join(FINAL_REPO_PATH, 'org/opencv'), exist_ok=True) + shutil.move(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/repo/org/opencv', MAVEN_PACKAGE_NAME), path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME)) + print('Creating a maven repo from modified AAR (with cpp libraries)...') + cmd = ['./gradlew', 'publishModifiedPublicationToMyrepoRepository'] + if args.offline: + cmd = cmd + ['--offline'] + subprocess.run(cmd, shell=False, cwd=ANDROID_PROJECT_DIR, check=True) + shutil.copytree(path.join(ANDROID_PROJECT_DIR, 'OpenCV/build/repo/org/opencv', MAVEN_PACKAGE_NAME), path.join(FINAL_REPO_PATH, 'org/opencv', MAVEN_PACKAGE_NAME), dirs_exist_ok=True) + print('Done') +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Builds AAR with static C++ libs from OpenCV SDK') + parser.add_argument('opencv_sdk_path') + parser.add_argument('--android_compile_sdk', default='31') + parser.add_argument('--android_min_sdk', default='21') + parser.add_argument('--android_target_sdk', default='31') + parser.add_argument('--java_version', default='1_8') + parser.add_argument('--ndk_location', default='') + parser.add_argument('--cmake_location', default='') + parser.add_argument('--offline', action='store_true', help='Force Gradle use offline mode') + args = parser.parse_args() + main(args) + +# File: opencv-master/platforms/android/ndk-25.config.py +ANDROID_NATIVE_API_LEVEL = int(os.environ.get('ANDROID_NATIVE_API_LEVEL', 32)) +cmake_common_vars = {'ANDROID_COMPILE_SDK_VERSION': os.environ.get('ANDROID_COMPILE_SDK_VERSION', 32), 'ANDROID_TARGET_SDK_VERSION': os.environ.get('ANDROID_TARGET_SDK_VERSION', 32), 'ANDROID_MIN_SDK_VERSION': os.environ.get('ANDROID_MIN_SDK_VERSION', ANDROID_NATIVE_API_LEVEL), 'ANDROID_GRADLE_PLUGIN_VERSION': '7.3.1', 'GRADLE_VERSION': '7.5.1', 'KOTLIN_PLUGIN_VERSION': '1.8.20'} +ABIs = [ABI('2', 'armeabi-v7a', None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars), ABI('3', 'arm64-v8a', None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars), ABI('5', 'x86_64', None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars), ABI('4', 'x86', None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars)] + +# File: opencv-master/platforms/apple/build_xcframework.py +"""""" +import sys, os, argparse, pathlib, traceback, contextlib, shutil +from cv_build_utils import execute, print_error, print_header, get_xcode_version, get_cmake_version +if __name__ == '__main__': + assert sys.version_info >= (3, 6), 'Python 3.6 or later is required! Current version is {}'.format(sys.version_info) + assert get_cmake_version() >= (3, 18, 5), 'CMake 3.18.5 or later is required. Current version is {}'.format(get_cmake_version()) + assert get_xcode_version() >= (12, 2), 'Xcode 12.2 command line tools or later are required! Current version is {}. '.format(get_xcode_version()) + 'Run xcode-select to switch if you have multiple Xcode installs.' + description = '\n This script builds OpenCV into an xcframework supporting the Apple platforms of your choice.\n ' + epilog = '\n Any arguments that are not recognized by this script are passed through to the ios/osx build_framework.py scripts.\n ' + parser = argparse.ArgumentParser(description=description, epilog=epilog) + parser.add_argument('-o', '--out', metavar='OUTDIR', help=' The directory where the xcframework will be created', required=True) + parser.add_argument('--framework_name', default='opencv2', help='Name of OpenCV xcframework (default: opencv2, will change to OpenCV in future version)') + parser.add_argument('--iphoneos_archs', default=None, help='select iPhoneOS target ARCHS. Default is "armv7,arm64"') + parser.add_argument('--iphonesimulator_archs', default=None, help='select iPhoneSimulator target ARCHS. Default is "x86_64,arm64"') + parser.add_argument('--visionos_archs', default=None, help='select visionOS target ARCHS. Default is "arm64"') + parser.add_argument('--visionsimulator_archs', default=None, help='select visionSimulator target ARCHS. Default is "arm64"') + parser.add_argument('--macos_archs', default=None, help='Select MacOS ARCHS. Default is "x86_64,arm64"') + parser.add_argument('--catalyst_archs', default=None, help='Select Catalyst ARCHS. Default is "x86_64,arm64"') + parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored') + (args, unknown_args) = parser.parse_known_args() + if unknown_args: + print('The following args are not recognized by this script and will be passed through to the ios/osx build_framework.py scripts: {}'.format(unknown_args)) + iphoneos_archs = args.iphoneos_archs + if not iphoneos_archs and (not args.build_only_specified_archs): + iphoneos_archs = 'armv7,arm64' + print('Using iPhoneOS ARCHS={}'.format(iphoneos_archs)) + iphonesimulator_archs = args.iphonesimulator_archs + if not iphonesimulator_archs and (not args.build_only_specified_archs): + iphonesimulator_archs = 'x86_64,arm64' + print('Using iPhoneSimulator ARCHS={}'.format(iphonesimulator_archs)) + visionos_archs = args.visionos_archs + print('Using visionOS ARCHS={}'.format(visionos_archs)) + visionsimulator_archs = args.visionsimulator_archs + print('Using visionSimulator ARCHS={}'.format(visionsimulator_archs)) + macos_archs = args.macos_archs + if not macos_archs and (not args.build_only_specified_archs): + macos_archs = 'x86_64,arm64' + print('Using MacOS ARCHS={}'.format(macos_archs)) + catalyst_archs = args.catalyst_archs + if not catalyst_archs and (not args.build_only_specified_archs): + catalyst_archs = 'x86_64,arm64' + print('Using Catalyst ARCHS={}'.format(catalyst_archs)) + try: + osx_script_path = os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../osx/build_framework.py') + ios_script_path = os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../ios/build_framework.py') + visionos_script_path = os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../ios/build_visionos_framework.py') + build_folders = [] + + def get_or_create_build_folder(base_dir, platform): + build_folder = '{}/{}'.format(base_dir, platform).replace(' ', '\\ ') + pathlib.Path(build_folder).mkdir(parents=True, exist_ok=True) + return build_folder + if iphoneos_archs: + build_folder = get_or_create_build_folder(args.out, 'iphoneos') + build_folders.append(build_folder) + command = ['python3', ios_script_path, build_folder, '--iphoneos_archs', iphoneos_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building iPhoneOS frameworks') + print(command) + execute(command, cwd=os.getcwd()) + if iphonesimulator_archs: + build_folder = get_or_create_build_folder(args.out, 'iphonesimulator') + build_folders.append(build_folder) + command = ['python3', ios_script_path, build_folder, '--iphonesimulator_archs', iphonesimulator_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building iPhoneSimulator frameworks') + execute(command, cwd=os.getcwd()) + if visionos_archs: + build_folder = get_or_create_build_folder(args.out, 'visionos') + build_folders.append(build_folder) + command = ['python3', visionos_script_path, build_folder, '--visionos_archs', visionos_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building visionOS frameworks') + print(command) + execute(command, cwd=os.getcwd()) + if visionsimulator_archs: + build_folder = get_or_create_build_folder(args.out, 'visionsimulator') + build_folders.append(build_folder) + command = ['python3', visionos_script_path, build_folder, '--visionsimulator_archs', visionsimulator_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building visionSimulator frameworks') + execute(command, cwd=os.getcwd()) + if macos_archs: + build_folder = get_or_create_build_folder(args.out, 'macos') + build_folders.append(build_folder) + command = ['python3', osx_script_path, build_folder, '--macos_archs', macos_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building MacOS frameworks') + execute(command, cwd=os.getcwd()) + if catalyst_archs: + build_folder = get_or_create_build_folder(args.out, 'catalyst') + build_folders.append(build_folder) + command = ['python3', osx_script_path, build_folder, '--catalyst_archs', catalyst_archs, '--framework_name', args.framework_name, '--build_only_specified_archs'] + unknown_args + print_header('Building Catalyst frameworks') + execute(command, cwd=os.getcwd()) + xcframework_path = '{}/{}.xcframework'.format(args.out, args.framework_name) + print_header('Building {}'.format(xcframework_path)) + with contextlib.suppress(FileNotFoundError): + shutil.rmtree(xcframework_path) + print('Removed existing xcframework at {}'.format(xcframework_path)) + xcframework_build_command = ['xcodebuild', '-create-xcframework', '-output', xcframework_path] + for folder in build_folders: + xcframework_build_command += ['-framework', '{}/{}.framework'.format(folder, args.framework_name)] + execute(xcframework_build_command, cwd=os.getcwd()) + print('') + print_header('Finished building {}'.format(xcframework_path)) + except Exception as e: + print_error(e) + traceback.print_exc(file=sys.stderr) + sys.exit(1) + +# File: opencv-master/platforms/apple/cv_build_utils.py +"""""" +from __future__ import print_function +import sys, re +from subprocess import check_call, check_output, CalledProcessError + +def execute(cmd, cwd=None): + print('Executing: %s in %s' % (cmd, cwd), file=sys.stderr) + print('Executing: ' + ' '.join(cmd)) + retcode = check_call(cmd, cwd=cwd) + if retcode != 0: + raise Exception('Child returned:', retcode) + +def print_header(text): + print('=' * 60) + print(text) + print('=' * 60) + +def print_error(text): + print('=' * 60, file=sys.stderr) + print('ERROR: %s' % text, file=sys.stderr) + print('=' * 60, file=sys.stderr) + +def get_xcode_major(): + ret = check_output(['xcodebuild', '-version']).decode('utf-8') + m = re.match('Xcode\\s+(\\d+)\\..*', ret, flags=re.IGNORECASE) + if m: + return int(m.group(1)) + else: + raise Exception('Failed to parse Xcode version') + +def get_xcode_version(): + ret = check_output(['xcodebuild', '-version']).decode('utf-8') + m = re.match('Xcode\\s+(\\d+)\\.(\\d+)', ret, flags=re.IGNORECASE) + if m: + return (int(m.group(1)), int(m.group(2))) + else: + raise Exception('Failed to parse Xcode version') + +def get_xcode_setting(var, projectdir): + ret = check_output(['xcodebuild', '-showBuildSettings'], cwd=projectdir).decode('utf-8') + m = re.search('\\s' + var + ' = (.*)', ret) + if m: + return m.group(1) + else: + raise Exception('Failed to parse Xcode settings') + +def get_cmake_version(): + ret = check_output(['cmake', '--version']).decode('utf-8') + m = re.match('cmake\\sversion\\s+(\\d+)\\.(\\d+).(\\d+)', ret, flags=re.IGNORECASE) + if m: + return (int(m.group(1)), int(m.group(2)), int(m.group(3))) + else: + raise Exception('Failed to parse CMake version') + +# File: opencv-master/platforms/ios/build_docs.py +"""""" +from __future__ import print_function +import os, sys, multiprocessing, argparse, traceback +from subprocess import check_call, check_output, CalledProcessError, Popen + +def execute(cmd, cwd=None, output=None): + if not output: + print('Executing: %s in %s' % (cmd, cwd), file=sys.stderr) + print('Executing: ' + ' '.join(cmd)) + retcode = check_call(cmd, cwd=cwd) + if retcode != 0: + raise Exception('Child returned:', retcode) + else: + with open(output, 'a') as f: + f.flush() + p = Popen(cmd, cwd=cwd, stdout=f) + os.waitpid(p.pid, 0) + +class DocBuilder: + + def __init__(self, script_dir, framework_dir, output_dir, framework_header, framework_name, arch, target): + self.script_dir = script_dir + self.framework_dir = framework_dir + self.output_dir = output_dir + self.framework_header = framework_header + self.framework_name = framework_name + self.arch = arch + self.target = target + + def _build(self): + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + self.buildDocs() + + def build(self): + try: + self._build() + except Exception as e: + print('=' * 60, file=sys.stderr) + print('ERROR: %s' % e, file=sys.stderr) + print('=' * 60, file=sys.stderr) + traceback.print_exc(file=sys.stderr) + sys.exit(1) + + def getToolchain(self): + return None + + def getSourceKitten(self): + ret = check_output(['gem', 'which', 'jazzy']) + if ret.find('ERROR:') == 0: + raise Exception('Failed to find jazzy') + else: + return os.path.join(ret[0:ret.rfind('/')], '../bin/sourcekitten') + + def buildDocs(self): + sourceKitten = self.getSourceKitten() + sourceKittenSwiftDoc = [sourceKitten, 'doc', '--module-name', self.framework_name, '--', '-project', self.framework_name + '.xcodeproj', 'ARCHS=' + self.arch, '-sdk', self.target, '-configuration', 'Release', '-parallelizeTargets', '-jobs', str(multiprocessing.cpu_count()), '-target', 'opencv_objc_framework'] + execute(sourceKittenSwiftDoc, cwd=self.framework_dir, output=os.path.join(self.output_dir, 'swiftDoc.json')) + sdk_dir = check_output(['xcrun', '--show-sdk-path', '--sdk', self.target]).rstrip() + sourceKittenObjcDoc = [sourceKitten, 'doc', '--objc', self.framework_header, '--', '-x', 'objective-c', '-isysroot', sdk_dir, '-fmodules'] + print(sourceKittenObjcDoc) + execute(sourceKittenObjcDoc, cwd=self.framework_dir, output=os.path.join(self.output_dir, 'objcDoc.json')) + execute(['jazzy', '--author', 'OpenCV', '--author_url', 'http://opencv.org', '--github_url', 'https://github.com/opencv/opencv', '--module', self.framework_name, '--undocumented-text', '""', '--sourcekitten-sourcefile', 'swiftDoc.json,objcDoc.json'], cwd=self.output_dir) + +class iOSDocBuilder(DocBuilder): + + def getToolchain(self): + return None +if __name__ == '__main__': + script_dir = os.path.abspath(os.path.dirname(sys.argv[0])) + parser = argparse.ArgumentParser(description='The script builds OpenCV docs for iOS.') + parser.add_argument('framework_dir', metavar='FRAMEWORK_DIR', help='folder where framework build files are located') + parser.add_argument('--output_dir', default=None, help='folder where docs will be built (default is "../doc_build" relative to framework_dir)') + parser.add_argument('--framework_header', default=None, help='umbrella header for OpenCV framework (default is "../../../lib/Release/{framework_name}.framework/Headers/{framework_name}.h")') + parser.add_argument('--framework_name', default='opencv2', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)') + args = parser.parse_args() + arch = 'x86_64' + target = 'iphonesimulator' + b = iOSDocBuilder(script_dir, args.framework_dir, args.output_dir if args.output_dir else os.path.join(args.framework_dir, '../doc_build'), args.framework_header if args.framework_header else os.path.join(args.framework_dir, '../../../lib/Release/' + args.framework_name + '.framework/Headers/' + args.framework_name + '.h'), args.framework_name, arch, target) + b.build() + +# File: opencv-master/platforms/ios/build_framework.py +"""""" +from __future__ import print_function, unicode_literals +import glob, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing, codecs, io +from subprocess import check_call, check_output, CalledProcessError +if sys.version_info >= (3, 8): + + def copy_tree(src, dst): + shutil.copytree(src, dst, dirs_exist_ok=True) +else: + from distutils.dir_util import copy_tree +sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../apple')) +from cv_build_utils import execute, print_error, get_xcode_major, get_xcode_setting, get_xcode_version, get_cmake_version +IPHONEOS_DEPLOYMENT_TARGET = '9.0' +CURRENT_FILE_DIR = os.path.dirname(__file__) + +class Builder: + + def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, disable, enablenonfree, targets, debug, debug_info, framework_name, run_tests, build_docs, swiftdisabled): + self.opencv = os.path.abspath(opencv) + self.contrib = None + if contrib: + modpath = os.path.join(contrib, 'modules') + if os.path.isdir(modpath): + self.contrib = os.path.abspath(modpath) + else: + print('Note: contrib repository is bad - modules subfolder not found', file=sys.stderr) + self.dynamic = dynamic + self.bitcodedisabled = bitcodedisabled + self.exclude = exclude + self.build_objc_wrapper = not 'objc' in self.exclude + self.disable = disable + self.enablenonfree = enablenonfree + self.targets = targets + self.debug = debug + self.debug_info = debug_info + self.framework_name = framework_name + self.run_tests = run_tests + self.build_docs = build_docs + self.swiftdisabled = swiftdisabled + + def checkCMakeVersion(self): + if get_xcode_version() >= (12, 2): + assert get_cmake_version() >= (3, 19), 'CMake 3.19 or later is required when building with Xcode 12.2 or greater. Current version is {}'.format(get_cmake_version()) + else: + assert get_cmake_version() >= (3, 17), 'CMake 3.17 or later is required. Current version is {}'.format(get_cmake_version()) + + def getBuildDir(self, parent, target): + res = os.path.join(parent, 'build-%s-%s' % (target[0].lower(), target[1].lower())) + if not os.path.isdir(res): + os.makedirs(res) + return os.path.abspath(res) + + def _build(self, outdir): + self.checkCMakeVersion() + outdir = os.path.abspath(outdir) + if not os.path.isdir(outdir): + os.makedirs(outdir) + main_working_dir = os.path.join(outdir, 'build') + dirs = [] + xcode_ver = get_xcode_major() + alltargets = [] + for target_group in self.targets: + for arch in target_group[0]: + current = (arch, target_group[1]) + alltargets.append(current) + for target in alltargets: + main_build_dir = self.getBuildDir(main_working_dir, target) + dirs.append(main_build_dir) + cmake_flags = [] + if self.contrib: + cmake_flags.append('-DOPENCV_EXTRA_MODULES_PATH=%s' % self.contrib) + if xcode_ver >= 7 and target[1] == 'iPhoneOS' and (self.bitcodedisabled == False): + cmake_flags.append('-DCMAKE_C_FLAGS=-fembed-bitcode') + cmake_flags.append('-DCMAKE_CXX_FLAGS=-fembed-bitcode') + if xcode_ver >= 7 and target[1] == 'Catalyst': + sdk_path = check_output(['xcodebuild', '-version', '-sdk', 'macosx', 'Path']).decode('utf-8').rstrip() + c_flags = ['-target %s-apple-ios14.0-macabi' % target[0], '-isysroot %s' % sdk_path, '-iframework %s/System/iOSSupport/System/Library/Frameworks' % sdk_path, '-isystem %s/System/iOSSupport/usr/include' % sdk_path] + if self.bitcodedisabled == False: + c_flags.append('-fembed-bitcode') + cmake_flags.append('-DCMAKE_C_FLAGS=' + ' '.join(c_flags)) + cmake_flags.append('-DCMAKE_CXX_FLAGS=' + ' '.join(c_flags)) + cmake_flags.append('-DCMAKE_EXE_LINKER_FLAGS=' + ' '.join(c_flags)) + cmake_flags.append('-DSWIFT_DISABLED=1') + cmake_flags.append('-DIOS=1') + cmake_flags.append('-DMAC_CATALYST=1') + cmake_flags.append('-DWITH_OPENCL=OFF') + cmake_flags.append('-DCMAKE_OSX_SYSROOT=%s' % sdk_path) + cmake_flags.append('-DCMAKE_CXX_COMPILER_WORKS=TRUE') + cmake_flags.append('-DCMAKE_C_COMPILER_WORKS=TRUE') + self.buildOne(target[0], target[1], main_build_dir, cmake_flags) + if not self.dynamic: + self.mergeLibs(main_build_dir) + else: + self.makeDynamicLib(main_build_dir) + self.makeFramework(outdir, dirs) + if self.build_objc_wrapper: + if self.run_tests: + check_call([sys.argv[0].replace('build_framework', 'run_tests'), '--framework_dir=' + outdir, '--framework_name=' + self.framework_name, dirs[0] + '/modules/objc_bindings_generator/{}/test'.format(self.getObjcTarget(target[1]))]) + else: + print('To run tests call:') + print(sys.argv[0].replace('build_framework', 'run_tests') + ' --framework_dir=' + outdir + ' --framework_name=' + self.framework_name + ' ' + dirs[0] + '/modules/objc_bindings_generator/{}/test'.format(self.getObjcTarget(target[1]))) + if self.build_docs: + check_call([sys.argv[0].replace('build_framework', 'build_docs'), dirs[0] + '/modules/objc/framework_build']) + doc_path = os.path.join(dirs[0], 'modules', 'objc', 'doc_build', 'docs') + if os.path.exists(doc_path): + shutil.copytree(doc_path, os.path.join(outdir, 'docs')) + shutil.copyfile(os.path.join(self.opencv, 'doc', 'opencv.ico'), os.path.join(outdir, 'docs', 'favicon.ico')) + else: + print('To build docs call:') + print(sys.argv[0].replace('build_framework', 'build_docs') + ' ' + dirs[0] + '/modules/objc/framework_build') + self.copy_samples(outdir) + if self.swiftdisabled: + swift_sources_dir = os.path.join(outdir, 'SwiftSources') + if not os.path.exists(swift_sources_dir): + os.makedirs(swift_sources_dir) + for (root, dirs, files) in os.walk(dirs[0]): + for file in files: + if file.endswith('.swift') and file.find('Test') == -1: + with io.open(os.path.join(root, file), encoding='utf-8', errors='ignore') as file_in: + body = file_in.read() + if body.find('import Foundation') != -1: + insert_pos = body.find('import Foundation') + len('import Foundation') + 1 + body = body[:insert_pos] + 'import ' + self.framework_name + '\n' + body[insert_pos:] + else: + body = 'import ' + self.framework_name + '\n\n' + body + with codecs.open(os.path.join(swift_sources_dir, file), 'w', 'utf-8') as file_out: + file_out.write(body) + + def build(self, outdir): + try: + self._build(outdir) + except Exception as e: + print_error(e) + traceback.print_exc(file=sys.stderr) + sys.exit(1) + + def getToolchain(self, arch, target): + return None + + def getConfiguration(self): + return 'Debug' if self.debug else 'Release' + + def getCMakeArgs(self, arch, target): + args = ['cmake', '-GXcode', '-DAPPLE_FRAMEWORK=ON', '-DCMAKE_INSTALL_PREFIX=install', '-DCMAKE_BUILD_TYPE=%s' % self.getConfiguration(), '-DOPENCV_INCLUDE_INSTALL_PATH=include', '-DOPENCV_3P_LIB_INSTALL_PATH=lib/3rdparty', '-DFRAMEWORK_NAME=%s' % self.framework_name] + if self.dynamic: + args += ['-DDYNAMIC_PLIST=ON'] + if self.enablenonfree: + args += ['-DOPENCV_ENABLE_NONFREE=ON'] + if self.debug_info: + args += ['-DBUILD_WITH_DEBUG_INFO=ON'] + if len(self.exclude) > 0: + args += ['-DBUILD_opencv_%s=OFF' % m for m in self.exclude] + if len(self.disable) > 0: + args += ['-DWITH_%s=OFF' % f for f in self.disable] + return args + + def getBuildCommand(self, arch, target): + buildcmd = ['xcodebuild'] + if (self.dynamic or self.build_objc_wrapper) and (not self.bitcodedisabled) and (target == 'iPhoneOS'): + buildcmd.append('BITCODE_GENERATION_MODE=bitcode') + buildcmd += ['IPHONEOS_DEPLOYMENT_TARGET=' + os.environ['IPHONEOS_DEPLOYMENT_TARGET'], 'ARCHS=%s' % arch, '-sdk', target.lower(), '-configuration', self.getConfiguration(), '-parallelizeTargets', '-jobs', str(multiprocessing.cpu_count())] + return buildcmd + + def getInfoPlist(self, builddirs): + return os.path.join(builddirs[0], 'ios', 'Info.plist') + + def getObjcTarget(self, target): + return 'ios' + + def makeCMakeCmd(self, arch, target, dir, cmakeargs=[]): + toolchain = self.getToolchain(arch, target) + cmakecmd = self.getCMakeArgs(arch, target) + (['-DCMAKE_TOOLCHAIN_FILE=%s' % toolchain] if toolchain is not None else []) + if target.lower().startswith('iphoneos') or target.lower().startswith('xros'): + cmakecmd.append('-DCPU_BASELINE=DETECT') + if target.lower().startswith('iphonesimulator') or target.lower().startswith('xrsimulator'): + build_arch = check_output(['uname', '-m']).decode('utf-8').rstrip() + if build_arch != arch: + print('build_arch (%s) != arch (%s)' % (build_arch, arch)) + cmakecmd.append('-DCMAKE_SYSTEM_PROCESSOR=' + arch) + cmakecmd.append('-DCMAKE_OSX_ARCHITECTURES=' + arch) + cmakecmd.append('-DCPU_BASELINE=DETECT') + cmakecmd.append('-DCMAKE_CROSSCOMPILING=ON') + cmakecmd.append('-DOPENCV_WORKAROUND_CMAKE_20989=ON') + if target.lower() == 'catalyst': + build_arch = check_output(['uname', '-m']).decode('utf-8').rstrip() + if build_arch != arch: + print('build_arch (%s) != arch (%s)' % (build_arch, arch)) + cmakecmd.append('-DCMAKE_SYSTEM_PROCESSOR=' + arch) + cmakecmd.append('-DCMAKE_OSX_ARCHITECTURES=' + arch) + cmakecmd.append('-DCPU_BASELINE=DETECT') + cmakecmd.append('-DCMAKE_CROSSCOMPILING=ON') + cmakecmd.append('-DOPENCV_WORKAROUND_CMAKE_20989=ON') + if target.lower() == 'macosx': + build_arch = check_output(['uname', '-m']).decode('utf-8').rstrip() + if build_arch != arch: + print('build_arch (%s) != arch (%s)' % (build_arch, arch)) + cmakecmd.append('-DCMAKE_SYSTEM_PROCESSOR=' + arch) + cmakecmd.append('-DCMAKE_OSX_ARCHITECTURES=' + arch) + cmakecmd.append('-DCPU_BASELINE=DETECT') + cmakecmd.append('-DCMAKE_CROSSCOMPILING=ON') + cmakecmd.append('-DOPENCV_WORKAROUND_CMAKE_20989=ON') + cmakecmd.append(dir) + cmakecmd.extend(cmakeargs) + return cmakecmd + + def buildOne(self, arch, target, builddir, cmakeargs=[]): + cmakecmd = self.makeCMakeCmd(arch, target, self.opencv, cmakeargs) + print('') + print('=================================') + print('CMake') + print('=================================') + print('') + execute(cmakecmd, cwd=builddir) + print('') + print('=================================') + print('Xcodebuild') + print('=================================') + print('') + clean_dir = os.path.join(builddir, 'install') + if os.path.isdir(clean_dir): + shutil.rmtree(clean_dir) + buildcmd = self.getBuildCommand(arch, target) + execute(buildcmd + ['-target', 'ALL_BUILD', 'build'], cwd=builddir) + execute(['cmake', '-DBUILD_TYPE=%s' % self.getConfiguration(), '-P', 'cmake_install.cmake'], cwd=builddir) + if self.build_objc_wrapper: + cmakecmd = self.makeCMakeCmd(arch, target, builddir + '/modules/objc_bindings_generator/{}/gen'.format(self.getObjcTarget(target)), cmakeargs) + if self.swiftdisabled: + cmakecmd.append('-DSWIFT_DISABLED=1') + cmakecmd.append('-DBUILD_ROOT=%s' % builddir) + cmakecmd.append('-DCMAKE_INSTALL_NAME_TOOL=install_name_tool') + cmakecmd.append('--no-warn-unused-cli') + execute(cmakecmd, cwd=builddir + '/modules/objc/framework_build') + execute(buildcmd + ['-target', 'ALL_BUILD', 'build'], cwd=builddir + '/modules/objc/framework_build') + execute(['cmake', '-DBUILD_TYPE=%s' % self.getConfiguration(), '-DCMAKE_INSTALL_PREFIX=%s' % (builddir + '/install'), '-P', 'cmake_install.cmake'], cwd=builddir + '/modules/objc/framework_build') + + def mergeLibs(self, builddir): + res = os.path.join(builddir, 'lib', self.getConfiguration(), 'libopencv_merged.a') + libs = glob.glob(os.path.join(builddir, 'install', 'lib', '*.a')) + module = [os.path.join(builddir, 'install', 'lib', self.framework_name + '.framework', self.framework_name)] if self.build_objc_wrapper else [] + libs3 = glob.glob(os.path.join(builddir, 'install', 'lib', '3rdparty', '*.a')) + print('Merging libraries:\n\t%s' % '\n\t'.join(libs + libs3 + module), file=sys.stderr) + execute(['libtool', '-static', '-o', res] + libs + libs3 + module) + + def makeDynamicLib(self, builddir): + target = builddir[builddir.rfind('build-') + 6:] + target_platform = target[target.rfind('-') + 1:] + is_device = target_platform == 'iphoneos' or target_platform == 'visionos' or target_platform == 'catalyst' + framework_dir = os.path.join(builddir, 'install', 'lib', self.framework_name + '.framework') + if not os.path.exists(framework_dir): + os.makedirs(framework_dir) + res = os.path.join(framework_dir, self.framework_name) + libs = glob.glob(os.path.join(builddir, 'install', 'lib', '*.a')) + if self.build_objc_wrapper: + module = [os.path.join(builddir, 'lib', self.getConfiguration(), self.framework_name + '.framework', self.framework_name)] + else: + module = [] + libs3 = glob.glob(os.path.join(builddir, 'install', 'lib', '3rdparty', '*.a')) + if os.environ.get('IPHONEOS_DEPLOYMENT_TARGET'): + link_target = target[:target.find('-')] + '-apple-ios' + os.environ['IPHONEOS_DEPLOYMENT_TARGET'] + ('-simulator' if target.endswith('simulator') else '') + elif target_platform == 'catalyst': + link_target = '%s-apple-ios14.0-macabi' % target[:target.find('-')] + else: + link_target = '%s-apple-darwin' % target[:target.find('-')] + bitcode_flags = ['-fembed-bitcode', '-Xlinker', '-bitcode_verify'] if is_device and (not self.bitcodedisabled) else [] + toolchain_dir = get_xcode_setting('TOOLCHAIN_DIR', builddir) + sdk_dir = get_xcode_setting('SDK_DIR', builddir) + framework_options = [] + swift_link_dirs = ['-L' + toolchain_dir + '/usr/lib/swift/' + target_platform, '-L/usr/lib/swift'] + if target_platform == 'catalyst': + swift_link_dirs = ['-L' + toolchain_dir + '/usr/lib/swift/' + 'maccatalyst', '-L/usr/lib/swift'] + framework_options = ['-iframework', '%s/System/iOSSupport/System/Library/Frameworks' % sdk_dir, '-framework', 'AVFoundation', '-framework', 'UIKit', '-framework', 'CoreGraphics', '-framework', 'CoreImage', '-framework', 'CoreMedia', '-framework', 'QuartzCore'] + elif target_platform == 'macosx': + framework_options = ['-framework', 'AVFoundation', '-framework', 'AppKit', '-framework', 'CoreGraphics', '-framework', 'CoreImage', '-framework', 'CoreMedia', '-framework', 'QuartzCore', '-framework', 'Accelerate', '-framework', 'OpenCL'] + elif target_platform == 'iphoneos' or target_platform == 'iphonesimulator' or target_platform == 'xros' or (target_platform == 'xrsimulator'): + framework_options = ['-iframework', '%s/System/iOSSupport/System/Library/Frameworks' % sdk_dir, '-framework', 'AVFoundation', '-framework', 'CoreGraphics', '-framework', 'CoreImage', '-framework', 'CoreMedia', '-framework', 'QuartzCore', '-framework', 'Accelerate', '-framework', 'UIKit', '-framework', 'CoreVideo'] + execute(['clang++', '-Xlinker', '-rpath', '-Xlinker', '/usr/lib/swift', '-target', link_target, '-isysroot', sdk_dir] + framework_options + ['-install_name', '@rpath/' + self.framework_name + '.framework/' + self.framework_name, '-dynamiclib', '-dead_strip', '-fobjc-link-runtime', '-all_load', '-o', res] + swift_link_dirs + bitcode_flags + module + libs + libs3) + + def makeFramework(self, outdir, builddirs): + name = self.framework_name + framework_dir = os.path.join(outdir, '%s.framework' % name) + if os.path.isdir(framework_dir): + shutil.rmtree(framework_dir) + os.makedirs(framework_dir) + if self.dynamic: + dstdir = framework_dir + else: + dstdir = os.path.join(framework_dir, 'Versions', 'A') + shutil.copytree(os.path.join(builddirs[0], 'install', 'include', 'opencv2'), os.path.join(dstdir, 'Headers')) + if name != 'opencv2': + for (dirname, dirs, files) in os.walk(os.path.join(dstdir, 'Headers')): + for filename in files: + filepath = os.path.join(dirname, filename) + with codecs.open(filepath, 'r', 'utf-8') as file: + body = file.read() + body = body.replace('include "opencv2/', 'include "' + name + '/') + body = body.replace('include ', '#import <' + self.framework_name + '/' + self.framework_name + '.h>') + body = body.replace('OpenCV.framework', self.framework_name + '.framework') + body = body.replace('../../OpenCV/**', '../../' + self.framework_name + '/**') + with open(filepath, 'w') as file: + file.write(body) +if __name__ == '__main__': + folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) + parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for iOS.') + parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') + parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') + parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') + parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework. To exclude multiple, specify this flag again, e.g. "--without video --without objc"') + parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF). To disable multiple, specify this flag again, e.g. "--disable tbb --disable openmp"') + parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)') + parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)') + parser.add_argument('--iphoneos_deployment_target', default=os.environ.get('IPHONEOS_DEPLOYMENT_TARGET', IPHONEOS_DEPLOYMENT_TARGET), help='specify IPHONEOS_DEPLOYMENT_TARGET') + parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored') + parser.add_argument('--iphoneos_archs', default=None, help='select iPhoneOS target ARCHS. Default is "armv7,armv7s,arm64"') + parser.add_argument('--iphonesimulator_archs', default=None, help='select iPhoneSimulator target ARCHS. Default is "i386,x86_64"') + parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') + parser.add_argument('--debug', default=False, dest='debug', action='store_true', help='Build "Debug" binaries (disabled by default)') + parser.add_argument('--debug_info', default=False, dest='debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') + parser.add_argument('--framework_name', default='opencv2', dest='framework_name', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)') + parser.add_argument('--legacy_build', default=False, dest='legacy_build', action='store_true', help='Build legacy opencv2 framework (default: False, equivalent to "--framework_name=opencv2 --without=objc")') + parser.add_argument('--run_tests', default=False, dest='run_tests', action='store_true', help='Run tests') + parser.add_argument('--build_docs', default=False, dest='build_docs', action='store_true', help='Build docs') + parser.add_argument('--disable-swift', default=False, dest='swiftdisabled', action='store_true', help='Disable building of Swift extensions') + (args, unknown_args) = parser.parse_known_args() + if unknown_args: + print('The following args are not recognized and will not be used: %s' % unknown_args) + os.environ['IPHONEOS_DEPLOYMENT_TARGET'] = args.iphoneos_deployment_target + print('Using IPHONEOS_DEPLOYMENT_TARGET=' + os.environ['IPHONEOS_DEPLOYMENT_TARGET']) + iphoneos_archs = None + if args.iphoneos_archs: + iphoneos_archs = args.iphoneos_archs.split(',') + elif not args.build_only_specified_archs: + iphoneos_archs = ['armv7', 'armv7s', 'arm64'] + print('Using iPhoneOS ARCHS=' + str(iphoneos_archs)) + iphonesimulator_archs = None + if args.iphonesimulator_archs: + iphonesimulator_archs = args.iphonesimulator_archs.split(',') + elif not args.build_only_specified_archs: + iphonesimulator_archs = ['i386', 'x86_64'] + print('Using iPhoneSimulator ARCHS=' + str(iphonesimulator_archs)) + if iphoneos_archs and iphonesimulator_archs: + duplicate_archs = set(iphoneos_archs).intersection(iphonesimulator_archs) + if duplicate_archs: + print_error('Cannot have the same architecture for multiple platforms in a fat framework! Consider using build_xcframework.py in the apple platform folder instead. Duplicate archs are %s' % duplicate_archs) + exit(1) + if args.legacy_build: + args.framework_name = 'opencv2' + if not 'objc' in args.without: + args.without.append('objc') + targets = [] + if os.environ.get('BUILD_PRECOMMIT', None): + if not iphoneos_archs: + print_error('--iphoneos_archs must have at least one value') + sys.exit(1) + targets.append((iphoneos_archs, 'iPhoneOS')) + else: + if not iphoneos_archs and (not iphonesimulator_archs): + print_error('--iphoneos_archs and --iphonesimulator_archs are undefined; nothing will be built.') + sys.exit(1) + if iphoneos_archs: + targets.append((iphoneos_archs, 'iPhoneOS')) + if iphonesimulator_archs: + targets.append((iphonesimulator_archs, 'iPhoneSimulator')) + b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs, args.swiftdisabled) + b.build(args.out) + +# File: opencv-master/platforms/ios/build_visionos_framework.py +"""""" +from __future__ import print_function +import os, os.path, sys, argparse, traceback, multiprocessing +from build_framework import Builder +sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../apple')) +from cv_build_utils import print_error, get_cmake_version +XROS_DEPLOYMENT_TARGET = '1.0' + +class visionOSBuilder(Builder): + + def checkCMakeVersion(self): + assert get_cmake_version() >= (3, 17), 'CMake 3.17 or later is required. Current version is {}'.format(get_cmake_version()) + + def getObjcTarget(self, target): + return 'visionos' + + def getToolchain(self, arch, target): + toolchain = os.path.join(self.opencv, 'platforms', 'ios', 'cmake', 'Toolchains', 'Toolchain-%s_Xcode.cmake' % target) + return toolchain + + def getCMakeArgs(self, arch, target): + args = Builder.getCMakeArgs(self, arch, target) + args = args + ['-DVISIONOS_ARCH=%s' % arch] + return args + + def getBuildCommand(self, arch, target): + buildcmd = ['xcodebuild', 'XROS_DEPLOYMENT_TARGET=' + os.environ['XROS_DEPLOYMENT_TARGET'], 'ARCHS=%s' % arch, '-sdk', target.lower(), '-configuration', 'Debug' if self.debug else 'Release', '-parallelizeTargets', '-jobs', str(multiprocessing.cpu_count())] + return buildcmd + + def getInfoPlist(self, builddirs): + return os.path.join(builddirs[0], 'visionos', 'Info.plist') +if __name__ == '__main__': + folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) + parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for visionOS.') + parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') + parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') + parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') + parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework. To exclude multiple, specify this flag again, e.g. "--without video --without objc"') + parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF). To disable multiple, specify this flag again, e.g. "--disable tbb --disable openmp"') + parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)') + parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') + parser.add_argument('--visionos_deployment_target', default=os.environ.get('XROS_DEPLOYMENT_TARGET', XROS_DEPLOYMENT_TARGET), help='specify XROS_DEPLOYMENT_TARGET') + parser.add_argument('--visionos_archs', default=None, help='select visionOS target ARCHS. Default is none') + parser.add_argument('--visionsimulator_archs', default=None, help='select visionSimulator target ARCHS. Default is none') + parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)') + parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') + parser.add_argument('--framework_name', default='opencv2', dest='framework_name', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)') + parser.add_argument('--legacy_build', default=False, dest='legacy_build', action='store_true', help='Build legacy framework (default: False, equivalent to "--framework_name=opencv2 --without=objc")') + parser.add_argument('--run_tests', default=False, dest='run_tests', action='store_true', help='Run tests') + parser.add_argument('--build_docs', default=False, dest='build_docs', action='store_true', help='Build docs') + parser.add_argument('--disable-swift', default=False, dest='swiftdisabled', action='store_true', help='Disable building of Swift extensions') + (args, unknown_args) = parser.parse_known_args() + if unknown_args: + print('The following args are not recognized and will not be used: %s' % unknown_args) + os.environ['XROS_DEPLOYMENT_TARGET'] = args.visionos_deployment_target + print('Using XROS_DEPLOYMENT_TARGET=' + os.environ['XROS_DEPLOYMENT_TARGET']) + visionos_archs = None + if args.visionos_archs: + visionos_archs = args.visionos_archs.split(',') + print('Using visionOS ARCHS=' + str(visionos_archs)) + visionsimulator_archs = None + if args.visionsimulator_archs: + visionsimulator_archs = args.visionsimulator_archs.split(',') + print('Using visionOS ARCHS=' + str(visionsimulator_archs)) + if visionos_archs and visionsimulator_archs: + duplicate_archs = set(visionos_archs).intersection(visionsimulator_archs) + if duplicate_archs: + print_error('Cannot have the same architecture for multiple platforms in a fat framework! Consider using build_xcframework.py in the apple platform folder instead. Duplicate archs are %s' % duplicate_archs) + exit(1) + if args.legacy_build: + args.framework_name = 'opencv2' + if not 'objc' in args.without: + args.without.append('objc') + targets = [] + if not visionos_archs and (not visionsimulator_archs): + print_error('--visionos_archs and --visionsimulator_archs are undefined; nothing will be built.') + sys.exit(1) + if visionos_archs: + targets.append((visionos_archs, 'XROS')) + if visionsimulator_archs: + (targets.append((visionsimulator_archs, 'XRSimulator')),) + b = visionOSBuilder(args.opencv, args.contrib, args.dynamic, True, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs, args.swiftdisabled) + b.build(args.out) + +# File: opencv-master/platforms/js/build_js.py +import os, sys, subprocess, argparse, shutil, glob, re, multiprocessing +import logging as log +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +class Fail(Exception): + + def __init__(self, text=None): + self.t = text + + def __str__(self): + return 'ERROR' if self.t is None else self.t + +def execute(cmd, shell=False): + try: + log.info('Executing: %s' % cmd) + env = os.environ.copy() + env['VERBOSE'] = '1' + retcode = subprocess.call(cmd, shell=shell, env=env) + if retcode < 0: + raise Fail('Child was terminated by signal: %s' % -retcode) + elif retcode > 0: + raise Fail('Child returned: %s' % retcode) + except OSError as e: + raise Fail('Execution failed: %d / %s' % (e.errno, e.strerror)) + +def rm_one(d): + d = os.path.abspath(d) + if os.path.exists(d): + if os.path.isdir(d): + log.info('Removing dir: %s', d) + shutil.rmtree(d) + elif os.path.isfile(d): + log.info('Removing file: %s', d) + os.remove(d) + +def check_dir(d, create=False, clean=False): + d = os.path.abspath(d) + log.info('Check dir %s (create: %s, clean: %s)', d, create, clean) + if os.path.exists(d): + if not os.path.isdir(d): + raise Fail('Not a directory: %s' % d) + if clean: + for x in glob.glob(os.path.join(d, '*')): + rm_one(x) + elif create: + os.makedirs(d) + return d + +def check_file(d): + d = os.path.abspath(d) + if os.path.exists(d): + if os.path.isfile(d): + return True + else: + return False + return False + +def find_file(name, path): + for (root, dirs, files) in os.walk(path): + if name in files: + return os.path.join(root, name) + +class Builder: + + def __init__(self, options): + self.options = options + self.build_dir = check_dir(options.build_dir, create=True) + self.opencv_dir = check_dir(options.opencv_dir) + print('-----------------------------------------------------------') + print('options.opencv_dir:', options.opencv_dir) + self.emscripten_dir = check_dir(options.emscripten_dir) + + def get_toolchain_file(self): + return os.path.join(self.emscripten_dir, 'cmake', 'Modules', 'Platform', 'Emscripten.cmake') + + def clean_build_dir(self): + for d in ['CMakeCache.txt', 'CMakeFiles/', 'bin/', 'libs/', 'lib/', 'modules']: + rm_one(d) + + def get_cmake_cmd(self): + cmd = ['cmake', '-DPYTHON_DEFAULT_EXECUTABLE=%s' % sys.executable, '-DENABLE_PIC=FALSE', '-DCMAKE_BUILD_TYPE=Release', "-DCPU_BASELINE=''", '-DCMAKE_INSTALL_PREFIX=/usr/local', "-DCPU_DISPATCH=''", '-DCV_TRACE=OFF', '-DBUILD_SHARED_LIBS=OFF', '-DWITH_1394=OFF', '-DWITH_ADE=OFF', '-DWITH_VTK=OFF', '-DWITH_EIGEN=OFF', '-DWITH_FFMPEG=OFF', '-DWITH_GSTREAMER=OFF', '-DWITH_GTK=OFF', '-DWITH_GTK_2_X=OFF', '-DWITH_IPP=OFF', '-DWITH_JASPER=OFF', '-DWITH_JPEG=OFF', '-DWITH_WEBP=OFF', '-DWITH_OPENEXR=OFF', '-DWITH_OPENGL=OFF', '-DWITH_OPENVX=OFF', '-DWITH_OPENNI=OFF', '-DWITH_OPENNI2=OFF', '-DWITH_PNG=OFF', '-DWITH_TBB=OFF', '-DWITH_TIFF=OFF', '-DWITH_V4L=OFF', '-DWITH_OPENCL=OFF', '-DWITH_OPENCL_SVM=OFF', '-DWITH_OPENCLAMDFFT=OFF', '-DWITH_OPENCLAMDBLAS=OFF', '-DWITH_GPHOTO2=OFF', '-DWITH_LAPACK=OFF', '-DWITH_ITT=OFF', '-DWITH_QUIRC=OFF', '-DBUILD_ZLIB=ON', '-DBUILD_opencv_apps=OFF', '-DBUILD_opencv_calib3d=ON', '-DBUILD_opencv_dnn=ON', '-DBUILD_opencv_features2d=ON', '-DBUILD_opencv_flann=ON', '-DBUILD_opencv_gapi=OFF', '-DBUILD_opencv_ml=OFF', '-DBUILD_opencv_photo=ON', '-DBUILD_opencv_imgcodecs=OFF', '-DBUILD_opencv_shape=OFF', '-DBUILD_opencv_videoio=OFF', '-DBUILD_opencv_videostab=OFF', '-DBUILD_opencv_highgui=OFF', '-DBUILD_opencv_superres=OFF', '-DBUILD_opencv_stitching=OFF', '-DBUILD_opencv_java=OFF', '-DBUILD_opencv_js=ON', '-DBUILD_opencv_python2=OFF', '-DBUILD_opencv_python3=OFF', '-DBUILD_EXAMPLES=ON', '-DBUILD_PACKAGE=OFF', '-DBUILD_TESTS=ON', '-DBUILD_PERF_TESTS=ON'] + if self.options.cmake_option: + cmd += self.options.cmake_option + if not self.options.cmake_option or all(['-DCMAKE_TOOLCHAIN_FILE' not in opt for opt in self.options.cmake_option]): + cmd.append("-DCMAKE_TOOLCHAIN_FILE='%s'" % self.get_toolchain_file()) + if self.options.build_doc: + cmd.append('-DBUILD_DOCS=ON') + else: + cmd.append('-DBUILD_DOCS=OFF') + if self.options.threads: + cmd.append('-DWITH_PTHREADS_PF=ON') + else: + cmd.append('-DWITH_PTHREADS_PF=OFF') + if self.options.simd: + cmd.append('-DCV_ENABLE_INTRINSICS=ON') + else: + cmd.append('-DCV_ENABLE_INTRINSICS=OFF') + if self.options.build_wasm_intrin_test: + cmd.append('-DBUILD_WASM_INTRIN_TESTS=ON') + else: + cmd.append('-DBUILD_WASM_INTRIN_TESTS=OFF') + if self.options.webnn: + cmd.append('-DWITH_WEBNN=ON') + flags = self.get_build_flags() + if flags: + cmd += ["-DCMAKE_C_FLAGS='%s'" % flags, "-DCMAKE_CXX_FLAGS='%s'" % flags] + return cmd + + def get_build_flags(self): + flags = '' + if self.options.build_wasm: + flags += '-s WASM=1 ' + elif self.options.disable_wasm: + flags += '-s WASM=0 ' + if not self.options.disable_single_file: + flags += '-s SINGLE_FILE=1 ' + if self.options.threads: + flags += '-s USE_PTHREADS=1 -s PTHREAD_POOL_SIZE=4 ' + else: + flags += '-s USE_PTHREADS=0 ' + if self.options.enable_exception: + flags += '-s DISABLE_EXCEPTION_CATCHING=0 ' + if self.options.simd: + flags += '-msimd128 ' + if self.options.build_flags: + flags += self.options.build_flags + ' ' + if self.options.webnn: + flags += '-s USE_WEBNN=1 ' + flags += '-s EXPORTED_FUNCTIONS="[\'_malloc\', \'_free\']"' + return flags + + def config(self): + cmd = self.get_cmake_cmd() + cmd.append(self.opencv_dir) + execute(cmd) + + def build_opencvjs(self): + execute(['make', '-j', str(multiprocessing.cpu_count()), 'opencv.js']) + + def build_test(self): + execute(['make', '-j', str(multiprocessing.cpu_count()), 'opencv_js_test']) + + def build_perf(self): + execute(['make', '-j', str(multiprocessing.cpu_count()), 'opencv_js_perf']) + + def build_doc(self): + execute(['make', '-j', str(multiprocessing.cpu_count()), 'doxygen']) + + def build_loader(self): + execute(['make', '-j', str(multiprocessing.cpu_count()), 'opencv_js_loader']) +if __name__ == '__main__': + log.basicConfig(format='%(message)s', level=log.DEBUG) + opencv_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '../..')) + emscripten_dir = None + if 'EMSDK' in os.environ: + emscripten_dir = os.path.join(os.environ['EMSDK'], 'upstream', 'emscripten') + elif 'EMSCRIPTEN' in os.environ: + emscripten_dir = os.environ['EMSCRIPTEN'] + else: + log.warning("EMSCRIPTEN/EMSDK environment variable is not available. Please properly activate Emscripten SDK and consider using 'emcmake' launcher") + parser = argparse.ArgumentParser(description='Build OpenCV.js by Emscripten') + parser.add_argument('build_dir', help='Building directory (and output)') + parser.add_argument('--opencv_dir', default=opencv_dir, help='Opencv source directory (default is "../.." relative to script location)') + parser.add_argument('--emscripten_dir', default=emscripten_dir, help="Path to Emscripten to use for build (deprecated in favor of 'emcmake' launcher)") + parser.add_argument('--build_wasm', action='store_true', help='Build OpenCV.js in WebAssembly format') + parser.add_argument('--disable_wasm', action='store_true', help='Build OpenCV.js in Asm.js format') + parser.add_argument('--disable_single_file', action='store_true', help='Do not merge JavaScript and WebAssembly into one single file') + parser.add_argument('--threads', action='store_true', help='Build OpenCV.js with threads optimization') + parser.add_argument('--simd', action='store_true', help='Build OpenCV.js with SIMD optimization') + parser.add_argument('--build_test', action='store_true', help='Build tests') + parser.add_argument('--build_perf', action='store_true', help='Build performance tests') + parser.add_argument('--build_doc', action='store_true', help='Build tutorials') + parser.add_argument('--build_loader', action='store_true', help='Build OpenCV.js loader') + parser.add_argument('--clean_build_dir', action='store_true', help='Clean build dir') + parser.add_argument('--skip_config', action='store_true', help='Skip cmake config') + parser.add_argument('--config_only', action='store_true', help='Only do cmake config') + parser.add_argument('--enable_exception', action='store_true', help='Enable exception handling') + parser.add_argument('--cmake_option', action='append', help='Append CMake options') + parser.add_argument('--build_flags', help='Append Emscripten build options') + parser.add_argument('--build_wasm_intrin_test', default=False, action='store_true', help='Build WASM intrin tests') + parser.add_argument('--config', help='Specify configuration file with own list of exported into JS functions') + parser.add_argument('--webnn', action='store_true', help='Enable WebNN Backend') + transformed_args = ['--cmake_option=%s'.format(arg) if arg[:2] == '-D' else arg for arg in sys.argv[1:]] + args = parser.parse_args(transformed_args) + log.debug('Args: %s', args) + if args.config is not None: + os.environ['OPENCV_JS_WHITELIST'] = os.path.abspath(args.config) + if 'EMMAKEN_JUST_CONFIGURE' in os.environ: + del os.environ['EMMAKEN_JUST_CONFIGURE'] + if args.emscripten_dir is None: + log.error("Cannot get Emscripten path, please use 'emcmake' launcher or specify it either by EMSCRIPTEN/EMSDK environment variable or --emscripten_dir option.") + sys.exit(-1) + builder = Builder(args) + os.chdir(builder.build_dir) + if args.clean_build_dir: + log.info('=====') + log.info('===== Clean build dir %s', builder.build_dir) + log.info('=====') + builder.clean_build_dir() + if not args.skip_config: + target = 'default target' + if args.build_wasm: + target = 'wasm' + elif args.disable_wasm: + target = 'asm.js' + log.info('=====') + log.info('===== Config OpenCV.js build for %s' % target) + log.info('=====') + builder.config() + if args.config_only: + sys.exit(0) + log.info('=====') + log.info('===== Building OpenCV.js') + log.info('=====') + builder.build_opencvjs() + if args.build_test: + log.info('=====') + log.info('===== Building OpenCV.js tests') + log.info('=====') + builder.build_test() + if args.build_perf: + log.info('=====') + log.info('===== Building OpenCV.js performance tests') + log.info('=====') + builder.build_perf() + if args.build_doc: + log.info('=====') + log.info('===== Building OpenCV.js tutorials') + log.info('=====') + builder.build_doc() + if args.build_loader: + log.info('=====') + log.info('===== Building OpenCV.js loader') + log.info('=====') + builder.build_loader() + log.info('=====') + log.info('===== Build finished') + log.info('=====') + opencvjs_path = os.path.join(builder.build_dir, 'bin', 'opencv.js') + if check_file(opencvjs_path): + log.info('OpenCV.js location: %s', opencvjs_path) + if args.build_test: + opencvjs_test_path = os.path.join(builder.build_dir, 'bin', 'tests.html') + if check_file(opencvjs_test_path): + log.info('OpenCV.js tests location: %s', opencvjs_test_path) + if args.build_perf: + opencvjs_perf_path = os.path.join(builder.build_dir, 'bin', 'perf') + opencvjs_perf_base_path = os.path.join(builder.build_dir, 'bin', 'perf', 'base.js') + if check_file(opencvjs_perf_base_path): + log.info('OpenCV.js performance tests location: %s', opencvjs_perf_path) + if args.build_doc: + opencvjs_tutorial_path = find_file('tutorial_js_root.html', os.path.join(builder.build_dir, 'doc', 'doxygen', 'html')) + if check_file(opencvjs_tutorial_path): + log.info('OpenCV.js tutorials location: %s', opencvjs_tutorial_path) + if args.build_loader: + opencvjs_loader_path = os.path.join(builder.build_dir, 'bin', 'loader.js') + if check_file(opencvjs_loader_path): + log.info('OpenCV.js loader location: %s', opencvjs_loader_path) + +# File: opencv-master/platforms/js/opencv_js.config.py +core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar', 'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', 'exp', 'flip', 'getOptimalDFTSize', 'gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', 'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', 'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed', 'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat', 'setLogLevel', 'getLogLevel', 'LUT'], 'Algorithm': []} +imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr', 'Sobel', 'adaptiveThreshold', 'approxPolyDP', 'arcLength', 'bilateralFilter', 'blur', 'boundingRect', 'boxFilter', 'calcBackProject', 'calcHist', 'circle', 'compareHist', 'connectedComponents', 'connectedComponentsWithStats', 'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris', 'cornerMinEigenVal', 'createCLAHE', 'createLineSegmentDetector', 'cvtColor', 'demosaicing', 'dilate', 'distanceTransform', 'distanceTransformWithLabels', 'drawContours', 'ellipse', 'ellipse2Poly', 'equalizeHist', 'erode', 'filter2D', 'findContours', 'fitEllipse', 'fitLine', 'floodFill', 'getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', 'goodFeaturesToTrack', 'grabCut', 'integral', 'integral2', 'isContourConvex', 'line', 'matchShapes', 'matchTemplate', 'medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', 'pointPolygonTest', 'putText', 'pyrDown', 'pyrUp', 'rectangle', 'remap', 'resize', 'sepFilter2D', 'threshold', 'warpAffine', 'warpPerspective', 'warpPolar', 'watershed', 'fillPoly', 'fillConvexPoly', 'polylines'], 'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize'], 'segmentation_IntelligentScissorsMB': ['IntelligentScissorsMB', 'setWeights', 'setGradientMagnitudeMaxLimit', 'setEdgeFeatureZeroCrossingParameters', 'setEdgeFeatureCannyParameters', 'applyImage', 'applyImageFeatures', 'buildMap', 'getContour']} +objdetect = {'': ['groupRectangles', 'getPredefinedDictionary', 'extendDictionary', 'drawDetectedMarkers', 'generateImageMarker', 'drawDetectedCornersCharuco', 'drawDetectedDiamonds'], 'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'], 'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale'], 'GraphicalCodeDetector': ['decode', 'detect', 'detectAndDecode', 'detectMulti', 'decodeMulti', 'detectAndDecodeMulti'], 'QRCodeDetector': ['QRCodeDetector', 'decode', 'detect', 'detectAndDecode', 'detectMulti', 'decodeMulti', 'detectAndDecodeMulti', 'decodeCurved', 'detectAndDecodeCurved', 'setEpsX', 'setEpsY'], 'aruco_PredefinedDictionaryType': [], 'aruco_Dictionary': ['Dictionary', 'getDistanceToId', 'generateImageMarker', 'getByteListFromBits', 'getBitsFromByteList'], 'aruco_Board': ['Board', 'matchImagePoints', 'generateImage'], 'aruco_GridBoard': ['GridBoard', 'generateImage', 'getGridSize', 'getMarkerLength', 'getMarkerSeparation', 'matchImagePoints'], 'aruco_CharucoParameters': ['CharucoParameters'], 'aruco_CharucoBoard': ['CharucoBoard', 'generateImage', 'getChessboardCorners', 'getNearestMarkerCorners', 'checkCharucoCornersCollinear', 'matchImagePoints', 'getLegacyPattern', 'setLegacyPattern'], 'aruco_DetectorParameters': ['DetectorParameters'], 'aruco_RefineParameters': ['RefineParameters'], 'aruco_ArucoDetector': ['ArucoDetector', 'detectMarkers', 'refineDetectedMarkers', 'setDictionary', 'setDetectorParameters', 'setRefineParameters'], 'aruco_CharucoDetector': ['CharucoDetector', 'setBoard', 'setCharucoParameters', 'setDetectorParameters', 'setRefineParameters', 'detectBoard', 'detectDiamonds'], 'QRCodeDetectorAruco_Params': ['Params'], 'QRCodeDetectorAruco': ['QRCodeDetectorAruco', 'decode', 'detect', 'detectAndDecode', 'detectMulti', 'decodeMulti', 'detectAndDecodeMulti', 'setDetectorParameters', 'setArucoParameters'], 'barcode_BarcodeDetector': ['BarcodeDetector', 'decode', 'detect', 'detectAndDecode', 'detectMulti', 'decodeMulti', 'detectAndDecodeMulti', 'decodeWithType', 'detectAndDecodeWithType'], 'FaceDetectorYN': ['setInputSize', 'getInputSize', 'setScoreThreshold', 'getScoreThreshold', 'setNMSThreshold', 'getNMSThreshold', 'setTopK', 'getTopK', 'detect', 'create']} +video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', 'findTransformECC', 'meanShift'], 'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'], 'BackgroundSubtractor': ['apply', 'getBackgroundImage'], 'TrackerMIL': ['create'], 'TrackerMIL_Params': []} +dnn = {'dnn_Net': ['setInput', 'forward', 'setPreferableBackend', 'getUnconnectedOutLayersNames'], '': ['readNetFromCaffe', 'readNetFromTensorflow', 'readNetFromTorch', 'readNetFromDarknet', 'readNetFromONNX', 'readNetFromTFLite', 'readNet', 'blobFromImage']} +features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptorSize', 'descriptorType', 'defaultNorm', 'empty', 'getDefaultName'], 'BRISK': ['create', 'getDefaultName'], 'ORB': ['create', 'setMaxFeatures', 'setScaleFactor', 'setNLevels', 'setEdgeThreshold', 'setFastThreshold', 'setFirstLevel', 'setWTA_K', 'setScoreType', 'setPatchSize', 'getFastThreshold', 'getDefaultName'], 'MSER': ['create', 'detectRegions', 'setDelta', 'getDelta', 'setMinArea', 'getMinArea', 'setMaxArea', 'getMaxArea', 'setPass2Only', 'getPass2Only', 'getDefaultName'], 'FastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'], 'AgastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'], 'GFTTDetector': ['create', 'setMaxFeatures', 'getMaxFeatures', 'setQualityLevel', 'getQualityLevel', 'setMinDistance', 'getMinDistance', 'setBlockSize', 'getBlockSize', 'setHarrisDetector', 'getHarrisDetector', 'setK', 'getK', 'getDefaultName'], 'SimpleBlobDetector': ['create', 'setParams', 'getParams', 'getDefaultName'], 'SimpleBlobDetector_Params': [], 'KAZE': ['create', 'setExtended', 'getExtended', 'setUpright', 'getUpright', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'], 'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'], 'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'], 'BFMatcher': ['isMaskSupported', 'create'], '': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']} +photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', 'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', 'createTonemapDrago', 'createTonemapMantiuk', 'createTonemapReinhard', 'inpaint'], 'CalibrateCRF': ['process'], 'AlignMTB': ['calculateShift', 'shiftMat', 'computeBitmaps', 'getMaxBits', 'setMaxBits', 'getExcludeRange', 'setExcludeRange', 'getCut', 'setCut'], 'CalibrateDebevec': ['getLambda', 'setLambda', 'getSamples', 'setSamples', 'getRandom', 'setRandom'], 'CalibrateRobertson': ['getMaxIter', 'setMaxIter', 'getThreshold', 'setThreshold', 'getRadiance'], 'MergeExposures': ['process'], 'MergeDebevec': ['process'], 'MergeMertens': ['process', 'getContrastWeight', 'setContrastWeight', 'getSaturationWeight', 'setSaturationWeight', 'getExposureWeight', 'setExposureWeight'], 'MergeRobertson': ['process'], 'Tonemap': ['process', 'getGamma', 'setGamma'], 'TonemapDrago': ['getSaturation', 'setSaturation', 'getBias', 'setBias', 'getSigmaColor', 'setSigmaColor', 'getSigmaSpace', 'setSigmaSpace'], 'TonemapMantiuk': ['getScale', 'setScale', 'getSaturation', 'setSaturation'], 'TonemapReinhard': ['getIntensity', 'setIntensity', 'getLightAdaptation', 'setLightAdaptation', 'getColorAdaptation', 'setColorAdaptation']} +calib3d = {'': ['findHomography', 'calibrateCameraExtended', 'drawFrameAxes', 'estimateAffine2D', 'getDefaultNewCameraMatrix', 'initUndistortRectifyMap', 'Rodrigues', 'solvePnP', 'solvePnPRansac', 'solvePnPRefineLM', 'projectPoints', 'undistort', 'fisheye_initUndistortRectifyMap', 'fisheye_projectPoints']} +white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, calib3d]) + +# File: opencv-master/platforms/osx/build_docs.py +"""""" +from __future__ import print_function +import os, sys, multiprocessing, argparse, traceback +from subprocess import check_call, check_output, CalledProcessError, Popen +sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../ios')) +from build_docs import DocBuilder + +class OSXDocBuilder(DocBuilder): + + def getToolchain(self): + return None +if __name__ == '__main__': + script_dir = os.path.abspath(os.path.dirname(sys.argv[0])) + parser = argparse.ArgumentParser(description='The script builds OpenCV docs for macOS.') + parser.add_argument('framework_dir', metavar='FRAMEWORK_DIR', help='folder where framework build files are located') + parser.add_argument('--output_dir', default=None, help='folder where docs will be built (default is "../doc_build" relative to framework_dir)') + parser.add_argument('--framework_header', default=None, help='umbrella header for OpenCV framework (default is "../../../lib/Release/{framework_name}.framework/Headers/{framework_name}.h")') + parser.add_argument('--framework_name', default='opencv2', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)') + args = parser.parse_args() + arch = 'x86_64' + target = 'macosx' + b = OSXDocBuilder(script_dir, args.framework_dir, args.output_dir if args.output_dir else os.path.join(args.framework_dir, '../doc_build'), args.framework_header if args.framework_header else os.path.join(args.framework_dir, '../../../lib/Release/' + args.framework_name + '.framework/Headers/' + args.framework_name + '.h'), args.framework_name, arch, target) + b.build() + +# File: opencv-master/platforms/osx/build_framework.py +"""""" +from __future__ import print_function +import os, os.path, sys, argparse, traceback, multiprocessing +sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../ios')) +from build_framework import Builder +sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../apple')) +from cv_build_utils import print_error, get_cmake_version +MACOSX_DEPLOYMENT_TARGET = '10.12' + +class OSXBuilder(Builder): + + def checkCMakeVersion(self): + assert get_cmake_version() >= (3, 17), 'CMake 3.17 or later is required. Current version is {}'.format(get_cmake_version()) + + def getObjcTarget(self, target): + if target == 'Catalyst': + return 'ios' + else: + return 'osx' + + def getToolchain(self, arch, target): + return None + + def getBuildCommand(self, arch, target): + buildcmd = ['xcodebuild', 'MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'], 'ARCHS=%s' % arch, '-sdk', 'macosx' if target == 'Catalyst' else target.lower(), '-configuration', 'Debug' if self.debug else 'Release', '-parallelizeTargets', '-jobs', str(multiprocessing.cpu_count())] + if target == 'Catalyst': + buildcmd.append("-destination 'platform=macOS,arch=%s,variant=Mac Catalyst'" % arch) + buildcmd.append('-UseModernBuildSystem=YES') + buildcmd.append('SKIP_INSTALL=NO') + buildcmd.append('BUILD_LIBRARY_FOR_DISTRIBUTION=YES') + buildcmd.append('TARGETED_DEVICE_FAMILY="1,2"') + buildcmd.append('SDKROOT=iphoneos') + buildcmd.append('SUPPORTS_MAC_CATALYST=YES') + return buildcmd + + def getInfoPlist(self, builddirs): + return os.path.join(builddirs[0], 'osx', 'Info.plist') +if __name__ == '__main__': + folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) + parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.') + parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') + parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') + parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') + parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework. To exclude multiple, specify this flag again, e.g. "--without video --without objc"') + parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF). To disable multiple, specify this flag again, e.g. "--disable tbb --disable openmp"') + parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)') + parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') + parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET') + parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored') + parser.add_argument('--archs', default=None, help='(Deprecated! Prefer --macos_archs instead.) Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is "x86_64".') + parser.add_argument('--macos_archs', default=None, help='Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is "x86_64"') + parser.add_argument('--catalyst_archs', default=None, help='Select target ARCHS (set to "x86_64,arm64" to build Universal Binary for Big Sur and later). Default is None') + parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)') + parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') + parser.add_argument('--framework_name', default='opencv2', dest='framework_name', help='Name of OpenCV framework (default: opencv2, will change to OpenCV in future version)') + parser.add_argument('--legacy_build', default=False, dest='legacy_build', action='store_true', help='Build legacy framework (default: False, equivalent to "--framework_name=opencv2 --without=objc")') + parser.add_argument('--run_tests', default=False, dest='run_tests', action='store_true', help='Run tests') + parser.add_argument('--build_docs', default=False, dest='build_docs', action='store_true', help='Build docs') + parser.add_argument('--disable-swift', default=False, dest='swiftdisabled', action='store_true', help='Disable building of Swift extensions') + (args, unknown_args) = parser.parse_known_args() + if unknown_args: + print('The following args are not recognized and will not be used: %s' % unknown_args) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target + print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET']) + macos_archs = None + if args.archs: + args.macos_archs = args.archs + print('--archs is deprecated! Prefer --macos_archs instead.') + if args.macos_archs: + macos_archs = args.macos_archs.split(',') + elif not args.build_only_specified_archs: + macos_archs = ['x86_64'] + print('Using MacOS ARCHS=' + str(macos_archs)) + catalyst_archs = None + if args.catalyst_archs: + catalyst_archs = args.catalyst_archs.split(',') + print('Using Catalyst ARCHS=' + str(catalyst_archs)) + if macos_archs and catalyst_archs: + duplicate_archs = set(macos_archs).intersection(catalyst_archs) + if duplicate_archs: + print_error('Cannot have the same architecture for multiple platforms in a fat framework! Consider using build_xcframework.py in the apple platform folder instead. Duplicate archs are %s' % duplicate_archs) + exit(1) + if args.legacy_build: + args.framework_name = 'opencv2' + if not 'objc' in args.without: + args.without.append('objc') + targets = [] + if not macos_archs and (not catalyst_archs): + print_error('--macos_archs and --catalyst_archs are undefined; nothing will be built.') + sys.exit(1) + if macos_archs: + targets.append((macos_archs, 'MacOSX')) + if catalyst_archs: + (targets.append((catalyst_archs, 'Catalyst')),) + b = OSXBuilder(args.opencv, args.contrib, args.dynamic, True, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs, args.swiftdisabled) + b.build(args.out) + +# File: opencv-master/platforms/winpack_dldt/2020.1/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cldnn_global_custom_kernels') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_nn_builder') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_nn_builder.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builder.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_nn_builderd.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builderd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2020.2/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cldnn_global_custom_kernels') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_nn_builder') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_nn_builder.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builder.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_nn_builderd.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builderd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2020.3.0/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cldnn_global_custom_kernels') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_nn_builder') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_nn_builder.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builder.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_nn_builderd.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builderd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2020.4/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.1/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.2/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copy_bin('pcie-ma2x8x.elf') +copy_bin('usb-ma2x8x.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.3/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_ir_v7_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copy_bin('pcie-ma2x8x.elf') +copy_bin('usb-ma2x8x.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(build_bin_dir / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(build_bin_dir / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.4.1/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copy_bin('pcie-ma2x8x.elf') +copy_bin('usb-ma2x8x.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(build_bin_dir / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(build_bin_dir / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.4.2/sysroot.config.py +copytree(self.cpath / 'cmake', self.sysrootdir / 'deployment_tools' / 'inference_engine' / 'cmake') +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copy_bin('pcie-ma2x8x.elf') +copy_bin('usb-ma2x8x.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(build_bin_dir / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(build_bin_dir / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/2021.4/sysroot.config.py +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) +dll_suffix = 'd' if self.config.build_debug else '' + +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') +copy_dll('inference_engine_lp_transformations') +copy_dll('MKLDNNPlugin') +copy_dll('myriadPlugin') +copy_dll('ngraph') +copy_bin('plugins.xml') +copy_bin('pcie-ma2x8x.elf') +copy_bin('usb-ma2x8x.mvcmd') +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(build_bin_dir / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(build_bin_dir / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') + +# File: opencv-master/platforms/winpack_dldt/build_package.py +import os, sys +import argparse +import glob +import re +import shutil +import subprocess +import time +import logging as log +if sys.version_info[0] == 2: + sys.exit('FATAL: Python 2.x is not supported') +from pathlib import Path +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +class Fail(Exception): + + def __init__(self, text=None): + self.t = text + + def __str__(self): + return 'ERROR' if self.t is None else self.t + +def execute(cmd, cwd=None, shell=False): + try: + log.debug('Executing: %s' % cmd) + log.info('Executing: ' + ' '.join(cmd)) + if cwd: + log.info(' in: %s' % cwd) + retcode = subprocess.call(cmd, shell=shell, cwd=str(cwd) if cwd else None) + if retcode < 0: + raise Fail('Child was terminated by signal: %s' % -retcode) + elif retcode > 0: + raise Fail('Child returned: %s' % retcode) + except OSError as e: + raise Fail('Execution failed: %d / %s' % (e.errno, e.strerror)) + +def check_executable(cmd): + try: + log.debug('Executing: %s' % cmd) + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(result, str): + result = result.decode('utf-8') + log.debug('Result: %s' % (result + '\n').split('\n')[0]) + return True + except OSError as e: + log.debug('Failed: %s' % e) + return False + +def rm_one(d): + d = str(d) + d = os.path.abspath(d) + if os.path.exists(d): + if os.path.isdir(d): + log.info('Removing dir: %s', d) + shutil.rmtree(d) + elif os.path.isfile(d): + log.info('Removing file: %s', d) + os.remove(d) + +def prepare_dir(d, clean=False): + d = str(d) + d = os.path.abspath(d) + log.info("Preparing directory: '%s' (clean: %r)", d, clean) + if os.path.exists(d): + if not os.path.isdir(d): + raise Fail('Not a directory: %s' % d) + if clean: + for item in os.listdir(d): + rm_one(os.path.join(d, item)) + else: + os.makedirs(d) + return Path(d) + +def check_dir(d): + d = str(d) + d = os.path.abspath(d) + log.info("Check directory: '%s'", d) + if os.path.exists(d): + if not os.path.isdir(d): + raise Fail('Not a directory: %s' % d) + else: + raise Fail('The directory is missing: %s' % d) + return Path(d) + +def copytree(src, dst, exclude=None): + log.debug('copytree(%s, %s)', src, dst) + src = str(src) + dst = str(dst) + if os.path.isfile(src): + shutil.copy2(src, dst) + return + + def copy_recurse(subdir): + if exclude and subdir in exclude: + log.debug(' skip: %s', subdir) + return + s = os.path.join(src, subdir) + d = os.path.join(dst, subdir) + if os.path.exists(d) or exclude: + if os.path.isfile(s): + shutil.copy2(s, d) + elif os.path.isdir(s): + if not os.path.isdir(d): + os.makedirs(d) + for item in os.listdir(s): + copy_recurse(os.path.join(subdir, item)) + else: + assert False, s + ' => ' + d + elif os.path.isfile(s): + shutil.copy2(s, d) + elif os.path.isdir(s): + shutil.copytree(s, d) + else: + assert False, s + ' => ' + d + copy_recurse('') + +def git_checkout(dst, url, branch, revision, clone_extra_args, noFetch=False): + assert isinstance(dst, Path) + log.info("Git checkout: '%s' (%s @ %s)", dst, url, revision) + if noFetch: + pass + elif not os.path.exists(str(dst / '.git')): + execute(cmd=['git', 'clone'] + (['-b', branch] if branch else []) + clone_extra_args + [url, '.'], cwd=dst) + else: + execute(cmd=['git', 'fetch', 'origin'] + ([branch + ':' + branch] if branch else []), cwd=dst) + execute(cmd=['git', 'reset', '--hard'], cwd=dst) + execute(cmd=['git', 'clean', '-f', '-d'], cwd=dst) + execute(cmd=['git', 'checkout', '--force', '-B', 'winpack_dldt', revision], cwd=dst) + execute(cmd=['git', 'clean', '-f', '-d'], cwd=dst) + execute(cmd=['git', 'submodule', 'init'], cwd=dst) + execute(cmd=['git', 'submodule', 'update', '--force', '--depth=1000'], cwd=dst) + log.info('Git checkout: DONE') + execute(cmd=['git', 'status'], cwd=dst) + execute(cmd=['git', 'log', '--max-count=1', 'HEAD'], cwd=dst) + +def git_apply_patch(src_dir, patch_file): + src_dir = str(src_dir) + patch_file = str(patch_file) + assert os.path.exists(patch_file), patch_file + execute(cmd=['git', 'apply', '--3way', '-v', '--ignore-space-change', str(patch_file)], cwd=src_dir) + execute(cmd=['git', '--no-pager', 'diff', 'HEAD'], cwd=src_dir) + os.environ['GIT_AUTHOR_NAME'] = os.environ['GIT_COMMITTER_NAME'] = 'build' + os.environ['GIT_AUTHOR_EMAIL'] = os.environ['GIT_COMMITTER_EMAIL'] = 'build@opencv.org' + execute(cmd=['git', 'commit', '-am', 'apply opencv patch'], cwd=src_dir) + +class BuilderDLDT: + + def __init__(self, config): + self.config = config + cpath = self.config.dldt_config + log.info('DLDT build configuration: %s', cpath) + if not os.path.exists(cpath): + cpath = os.path.join(SCRIPT_DIR, cpath) + if not os.path.exists(cpath): + raise Fail('Config "%s" is missing' % cpath) + self.cpath = Path(cpath) + clean_src_dir = self.config.clean_dldt + if self.config.dldt_src_dir: + assert os.path.exists(self.config.dldt_src_dir), self.config.dldt_src_dir + dldt_dir_name = 'dldt-custom' + self.srcdir = self.config.dldt_src_dir + clean_src_dir = False + else: + assert not self.config.dldt_src_dir + self.init_patchset() + dldt_dir_name = 'dldt-' + self.config.dldt_src_commit + ('/patch-' + self.patch_hashsum if self.patch_hashsum else '') + if self.config.build_debug: + dldt_dir_name += '-debug' + self.srcdir = None + log.info('DLDT directory: %s', dldt_dir_name) + self.outdir = prepare_dir(os.path.join(self.config.build_cache_dir, dldt_dir_name)) + if self.srcdir is None: + self.srcdir = prepare_dir(self.outdir / 'sources', clean=clean_src_dir) + self.build_dir = prepare_dir(self.outdir / 'build', clean=self.config.clean_dldt) + self.sysrootdir = prepare_dir(self.outdir / 'sysroot', clean=self.config.clean_dldt or self.config.clean_dldt_sysroot) + if not (self.config.clean_dldt or self.config.clean_dldt_sysroot): + _ = prepare_dir(self.sysrootdir / 'bin', clean=True) + _ = prepare_dir(self.sysrootdir / 'etc', clean=True) + if self.config.build_subst_drive: + if os.path.exists(self.config.build_subst_drive + ':\\'): + execute(['subst', self.config.build_subst_drive + ':', '/D']) + execute(['subst', self.config.build_subst_drive + ':', str(self.outdir)]) + + def fix_path(p): + return str(p).replace(str(self.outdir), self.config.build_subst_drive + ':') + self.srcdir = Path(fix_path(self.srcdir)) + self.build_dir = Path(fix_path(self.build_dir)) + self.sysrootdir = Path(fix_path(self.sysrootdir)) + + def init_patchset(self): + cpath = self.cpath + self.patch_file = str(cpath / 'patch.config.py') + with open(self.patch_file, 'r') as f: + self.patch_file_contents = f.read() + patch_hashsum = None + try: + import hashlib + patch_hashsum = hashlib.md5(self.patch_file_contents.encode('utf-8')).hexdigest() + except: + log.warn("Can't compute hashsum of patches: %s", self.patch_file) + self.patch_hashsum = self.config.override_patch_hashsum if self.config.override_patch_hashsum else patch_hashsum + + def prepare_sources(self): + if self.config.dldt_src_dir: + log.info('Using DLDT custom repository: %s', self.srcdir) + return + + def do_clone(srcdir, noFetch): + git_checkout(srcdir, self.config.dldt_src_url, self.config.dldt_src_branch, self.config.dldt_src_commit, ['-n', '--depth=100', '--no-single-branch', '--recurse-submodules'] + (self.config.dldt_src_git_clone_extra or []), noFetch=noFetch) + if not os.path.exists(str(self.srcdir / '.git')): + log.info('DLDT git checkout through "reference" copy.') + reference_dir = self.config.dldt_reference_dir + if reference_dir is None: + reference_dir = prepare_dir(os.path.join(self.config.build_cache_dir, 'dldt-git-reference-repository')) + do_clone(reference_dir, False) + log.info('DLDT reference git checkout completed. Copying...') + else: + log.info('Using DLDT reference repository. Copying...') + copytree(reference_dir, self.srcdir) + do_clone(self.srcdir, True) + else: + do_clone(self.srcdir, False) + log.info('DLDT git checkout completed. Patching...') + + def applyPatch(patch_file, subdir=None): + if subdir: + log.info('Patching "%s": %s' % (subdir, patch_file)) + else: + log.info('Patching: %s' % patch_file) + git_apply_patch(self.srcdir / subdir if subdir else self.srcdir, self.cpath / patch_file) + exec(compile(self.patch_file_contents, self.patch_file, 'exec')) + log.info('DLDT patches applied') + + def build(self): + self.cmake_path = 'cmake' + build_config = 'Release' if not self.config.build_debug else 'Debug' + cmd = [self.cmake_path, '-G', 'Visual Studio 16 2019', '-A', 'x64'] + cmake_vars = dict(CMAKE_BUILD_TYPE=build_config, TREAT_WARNING_AS_ERROR='OFF', ENABLE_SAMPLES='OFF', ENABLE_TESTS='OFF', BUILD_TESTS='OFF', ENABLE_OPENCV='OFF', ENABLE_GNA='OFF', ENABLE_SPEECH_DEMO='OFF', NGRAPH_DOC_BUILD_ENABLE='OFF', NGRAPH_UNIT_TEST_ENABLE='OFF', NGRAPH_UNIT_TEST_OPENVINO_ENABLE='OFF', NGRAPH_TEST_UTIL_ENABLE='OFF', NGRAPH_ONNX_IMPORT_ENABLE='OFF', CMAKE_INSTALL_PREFIX=str(self.build_dir / 'install'), OUTPUT_ROOT=str(self.build_dir)) + self.build_config_file = str(self.cpath / 'build.config.py') + if os.path.exists(str(self.build_config_file)): + with open(self.build_config_file, 'r') as f: + cfg = f.read() + exec(compile(cfg, str(self.build_config_file), 'exec')) + log.info('DLDT processed build configuration script') + cmd += ['-D%s=%s' % (k, v) for (k, v) in cmake_vars.items() if v is not None] + if self.config.cmake_option_dldt: + cmd += self.config.cmake_option_dldt + cmd.append(str(self.srcdir)) + build_dir = self.build_dir + try: + execute(cmd, cwd=build_dir) + cmd = [self.cmake_path, '--build', '.', '--config', build_config, '--', '/v:n', '/consoleloggerparameters:NoSummary'] + execute(cmd, cwd=build_dir) + cmd = [self.cmake_path, '-DBUILD_TYPE=' + build_config, '-P', 'cmake_install.cmake'] + execute(cmd, cwd=build_dir / 'ngraph') + except: + raise + log.info('DLDT build completed') + + def make_sysroot(self): + cfg_file = str(self.cpath / 'sysroot.config.py') + with open(cfg_file, 'r') as f: + cfg = f.read() + exec(compile(cfg, cfg_file, 'exec')) + log.info('DLDT sysroot preparation completed') + + def cleanup(self): + if self.config.build_subst_drive: + execute(['subst', self.config.build_subst_drive + ':', '/D']) + +class Builder: + + def __init__(self, config): + self.config = config + build_dir_name = 'opencv_build' if not self.config.build_debug else 'opencv_build_debug' + self.build_dir = prepare_dir(Path(self.config.output_dir) / build_dir_name, clean=self.config.clean_opencv) + self.package_dir = prepare_dir(Path(self.config.output_dir) / 'package/opencv', clean=True) + self.install_dir = prepare_dir(self.package_dir / 'build') + self.src_dir = check_dir(self.config.opencv_dir) + + def build(self, builderDLDT): + self.cmake_path = 'cmake' + build_config = 'Release' if not self.config.build_debug else 'Debug' + cmd = [self.cmake_path, '-G', 'Visual Studio 16 2019', '-A', 'x64'] + cmake_vars = dict(CMAKE_BUILD_TYPE=build_config, INSTALL_CREATE_DISTRIB='ON', BUILD_opencv_world='OFF', BUILD_TESTS='OFF', BUILD_PERF_TESTS='OFF', ENABLE_CXX11='ON', WITH_INF_ENGINE='ON', WITH_TBB='ON', CPU_BASELINE='AVX2', CMAKE_INSTALL_PREFIX=str(self.install_dir), INSTALL_PDB='ON', INSTALL_PDB_COMPONENT_EXCLUDE_FROM_ALL='OFF', VIDEOIO_PLUGIN_LIST='all', OPENCV_SKIP_CMAKE_ROOT_CONFIG='ON', OPENCV_BIN_INSTALL_PATH='bin', OPENCV_INCLUDE_INSTALL_PATH='include', OPENCV_LIB_INSTALL_PATH='lib', OPENCV_CONFIG_INSTALL_PATH='cmake', OPENCV_3P_LIB_INSTALL_PATH='3rdparty', OPENCV_SAMPLES_SRC_INSTALL_PATH='samples', OPENCV_DOC_INSTALL_PATH='doc', OPENCV_OTHER_INSTALL_PATH='etc', OPENCV_LICENSES_INSTALL_PATH='etc/licenses', OPENCV_INSTALL_DATA_DIR_RELATIVE='../../src/opencv', BUILD_opencv_python2='OFF', BUILD_opencv_python3='ON', PYTHON3_LIMITED_API='ON', OPENCV_PYTHON_INSTALL_PATH='python') + if self.config.dldt_release: + cmake_vars['INF_ENGINE_RELEASE'] = str(self.config.dldt_release) + InferenceEngine_DIR = str(builderDLDT.sysrootdir / 'deployment_tools' / 'inference_engine' / 'cmake') + assert os.path.exists(InferenceEngine_DIR), InferenceEngine_DIR + cmake_vars['InferenceEngine_DIR:PATH'] = InferenceEngine_DIR + ngraph_DIR = str(builderDLDT.sysrootdir / 'ngraph/cmake') + if not os.path.exists(ngraph_DIR): + ngraph_DIR = str(builderDLDT.sysrootdir / 'ngraph/deployment_tools/ngraph/cmake') + assert os.path.exists(ngraph_DIR), ngraph_DIR + cmake_vars['ngraph_DIR:PATH'] = ngraph_DIR + cmake_vars['TBB_DIR:PATH'] = str(builderDLDT.sysrootdir / 'tbb/cmake') + assert os.path.exists(cmake_vars['TBB_DIR:PATH']), cmake_vars['TBB_DIR:PATH'] + if self.config.build_debug: + cmake_vars['CMAKE_BUILD_TYPE'] = 'Debug' + cmake_vars['BUILD_opencv_python3'] = 'OFF' + cmake_vars['OPENCV_INSTALL_APPS_LIST'] = 'all' + if self.config.build_tests: + cmake_vars['BUILD_TESTS'] = 'ON' + cmake_vars['BUILD_PERF_TESTS'] = 'ON' + cmake_vars['BUILD_opencv_ts'] = 'ON' + cmake_vars['INSTALL_TESTS'] = 'ON' + if self.config.build_tests_dnn: + cmake_vars['BUILD_TESTS'] = 'ON' + cmake_vars['BUILD_PERF_TESTS'] = 'ON' + cmake_vars['BUILD_opencv_ts'] = 'ON' + cmake_vars['OPENCV_BUILD_TEST_MODULES_LIST'] = 'dnn' + cmake_vars['OPENCV_BUILD_PERF_TEST_MODULES_LIST'] = 'dnn' + cmake_vars['INSTALL_TESTS'] = 'ON' + cmd += ['-D%s=%s' % (k, v) for (k, v) in cmake_vars.items() if v is not None] + if self.config.cmake_option: + cmd += self.config.cmake_option + cmd.append(str(self.src_dir)) + log.info('Configuring OpenCV...') + execute(cmd, cwd=self.build_dir) + log.info('Building OpenCV...') + cmd = [self.cmake_path, '--build', '.', '--config', build_config, '--target', 'install', '--', '/v:n', '/m:2', '/consoleloggerparameters:NoSummary'] + execute(cmd, cwd=self.build_dir) + log.info('OpenCV build/install completed') + + def copy_sysroot(self, builderDLDT): + log.info('Copy sysroot files') + copytree(builderDLDT.sysrootdir / 'bin', self.install_dir / 'bin') + copytree(builderDLDT.sysrootdir / 'etc', self.install_dir / 'etc') + log.info('Copy sysroot files - DONE') + + def package_sources(self): + package_opencv = prepare_dir(self.package_dir / 'src/opencv', clean=True) + package_opencv = str(package_opencv) + execute(cmd=['git', 'clone', '-s', str(self.src_dir), '.'], cwd=str(package_opencv)) + for item in os.listdir(package_opencv): + if str(item).startswith('.git'): + rm_one(os.path.join(package_opencv, item)) + with open(str(self.package_dir / 'README.md'), 'w') as f: + f.write('See licensing/copying statements in "build/etc/licenses"\n') + f.write('Wiki page: https://github.com/opencv/opencv/wiki/Intel%27s-Deep-Learning-Inference-Engine-backend\n') + log.info('Package OpenCV sources - DONE') + +def main(): + dldt_src_url = 'https://github.com/openvinotoolkit/openvino' + dldt_src_commit = '2021.4.2' + dldt_config = None + dldt_release = None + build_cache_dir_default = os.environ.get('BUILD_CACHE_DIR', '.build_cache') + build_subst_drive = os.environ.get('BUILD_SUBST_DRIVE', None) + parser = argparse.ArgumentParser(description='Build OpenCV Windows package with Inference Engine (DLDT)') + parser.add_argument('output_dir', nargs='?', default='.', help='Output directory') + parser.add_argument('opencv_dir', nargs='?', default=os.path.join(SCRIPT_DIR, '../..'), help='Path to OpenCV source dir') + parser.add_argument('--build_cache_dir', default=build_cache_dir_default, help='Build cache directory (sources and binaries cache of build dependencies, default = "%s")' % build_cache_dir_default) + parser.add_argument('--build_subst_drive', default=build_subst_drive, help='Drive letter to workaround Windows limit for 260 symbols in path (error MSB3491)') + parser.add_argument('--cmake_option', action='append', help='Append OpenCV CMake option') + parser.add_argument('--cmake_option_dldt', action='append', help='Append CMake option for DLDT project') + parser.add_argument('--clean_dldt', action='store_true', help='Clean DLDT build and sysroot directories') + parser.add_argument('--clean_dldt_sysroot', action='store_true', help='Clean DLDT sysroot directories') + parser.add_argument('--clean_opencv', action='store_true', help='Clean OpenCV build directory') + parser.add_argument('--build_debug', action='store_true', help='Build debug binaries') + parser.add_argument('--build_tests', action='store_true', help='Build OpenCV tests') + parser.add_argument('--build_tests_dnn', action='store_true', help='Build OpenCV DNN accuracy and performance tests only') + parser.add_argument('--dldt_src_url', default=dldt_src_url, help='DLDT source URL (tag / commit, default: %s)' % dldt_src_url) + parser.add_argument('--dldt_src_branch', help='DLDT checkout branch') + parser.add_argument('--dldt_src_commit', default=dldt_src_commit, help='DLDT source commit / tag (default: %s)' % dldt_src_commit) + parser.add_argument('--dldt_src_git_clone_extra', action='append', help='DLDT git clone extra args') + parser.add_argument('--dldt_release', default=dldt_release, help='DLDT release code for INF_ENGINE_RELEASE, e.g 2021030000 (default: %s)' % dldt_release) + parser.add_argument('--dldt_reference_dir', help='DLDT reference git repository (optional)') + parser.add_argument('--dldt_src_dir', help='DLDT custom source repository (skip git checkout and patching, use for TESTING only)') + parser.add_argument('--dldt_config', default=dldt_config, help='Specify DLDT build configuration (defaults to evaluate from DLDT commit/branch)') + parser.add_argument('--override_patch_hashsum', default='', help='(script debug mode)') + args = parser.parse_args() + log.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=os.environ.get('LOGLEVEL', 'INFO'), datefmt='%Y-%m-%d %H:%M:%S') + log.debug('Args: %s', args) + if not check_executable(['git', '--version']): + sys.exit("FATAL: 'git' is not available") + if not check_executable(['cmake', '--version']): + sys.exit("FATAL: 'cmake' is not available") + if os.path.realpath(args.output_dir) == os.path.realpath(SCRIPT_DIR): + raise Fail('Specify output_dir (building from script directory is not supported)') + if os.path.realpath(args.output_dir) == os.path.realpath(args.opencv_dir): + raise Fail('Specify output_dir (building from OpenCV source directory is not supported)') + if args.opencv_dir is not None and (not os.path.isabs(args.opencv_dir)): + args.opencv_dir = os.path.abspath(args.opencv_dir) + if not args.dldt_config: + if str(args.dldt_src_commit).startswith('releases/20'): + args.dldt_config = str(args.dldt_src_commit)[len('releases/'):].replace('/', '.') + if not args.dldt_src_branch: + args.dldt_src_branch = args.dldt_src_commit + elif str(args.dldt_src_branch).startswith('releases/20'): + args.dldt_config = str(args.dldt_src_branch)[len('releases/'):].replace('/', '.') + else: + args.dldt_config = args.dldt_src_commit + _opencv_dir = check_dir(args.opencv_dir) + _outdir = prepare_dir(args.output_dir) + _cachedir = prepare_dir(args.build_cache_dir) + ocv_hooks_dir = os.environ.get('OPENCV_CMAKE_HOOKS_DIR', None) + hooks_dir = os.path.join(SCRIPT_DIR, 'cmake-opencv-checks') + os.environ['OPENCV_CMAKE_HOOKS_DIR'] = hooks_dir if ocv_hooks_dir is None else hooks_dir + ';' + ocv_hooks_dir + builder_dldt = BuilderDLDT(args) + try: + builder_dldt.prepare_sources() + builder_dldt.build() + builder_dldt.make_sysroot() + builder_opencv = Builder(args) + builder_opencv.build(builder_dldt) + builder_opencv.copy_sysroot(builder_dldt) + builder_opencv.package_sources() + except: + builder_dldt.cleanup() + raise + log.info('=====') + log.info('===== Build finished') + log.info('=====') +if __name__ == '__main__': + try: + main() + except: + log.info('FATAL: Error occurred. To investigate problem try to change logging level using LOGLEVEL=DEBUG environment variable.') + raise + +# File: opencv-master/samples/dnn/action_recognition.py +import os +import numpy as np +import cv2 as cv +import argparse +from common import findFile +parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--model', required=True, help='Path to model.') +parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.') + +def get_class_names(path): + class_names = [] + with open(path) as f: + for row in f: + class_names.append(row[:-1]) + return class_names + +def classify_video(video_path, net_path): + SAMPLE_DURATION = 16 + SAMPLE_SIZE = 112 + mean = (114.7748, 107.7354, 99.475) + class_names = get_class_names(args.classes) + net = cv.dnn.readNet(net_path) + net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE) + net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) + winName = 'Deep learning image classification in OpenCV' + cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) + cap = cv.VideoCapture(video_path) + while cv.waitKey(1) < 0: + frames = [] + for _ in range(SAMPLE_DURATION): + (hasFrame, frame) = cap.read() + if not hasFrame: + exit(0) + frames.append(frame) + inputs = cv.dnn.blobFromImages(frames, 1, (SAMPLE_SIZE, SAMPLE_SIZE), mean, True, crop=True) + inputs = np.transpose(inputs, (1, 0, 2, 3)) + inputs = np.expand_dims(inputs, axis=0) + net.setInput(inputs) + outputs = net.forward() + class_pred = np.argmax(outputs) + label = class_names[class_pred] + for frame in frames: + (labelSize, baseLine) = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) + cv.rectangle(frame, (0, 10 - labelSize[1]), (labelSize[0], 10 + baseLine), (255, 255, 255), cv.FILLED) + cv.putText(frame, label, (0, 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) + cv.imshow(winName, frame) + if cv.waitKey(1) & 255 == ord('q'): + break +if __name__ == '__main__': + (args, _) = parser.parse_known_args() + classify_video(args.input if args.input else 0, args.model) + +# File: opencv-master/samples/dnn/classification.py +import argparse +import cv2 as cv +import numpy as np +from common import * + +def get_args_parser(func_args): + backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) + targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') + parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') + parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'], help='Optional name of an origin framework of the model. Detect it automatically if it does not set.') + parser.add_argument('--std', nargs='*', type=float, help='Preprocess input image by dividing on a standard deviation.') + parser.add_argument('--crop', type=bool, default=False, help='Preprocess input image by dividing on a standard deviation.') + parser.add_argument('--initial_width', type=int, help='Preprocess input image by initial resizing to a specific width.') + parser.add_argument('--initial_height', type=int, help='Preprocess input image by initial resizing to a specific height.') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Halide language (http://halide-lang.org/), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) + (args, _) = parser.parse_known_args() + add_preproc_args(args.zoo, parser, 'classification') + parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run classification deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + return parser.parse_args(func_args) + +def main(func_args=None): + args = get_args_parser(func_args) + args.model = findFile(args.model) + args.config = findFile(args.config) + args.classes = findFile(args.classes) + classes = None + if args.classes: + with open(args.classes, 'rt') as f: + classes = f.read().rstrip('\n').split('\n') + net = cv.dnn.readNet(args.model, args.config, args.framework) + net.setPreferableBackend(args.backend) + net.setPreferableTarget(args.target) + winName = 'Deep learning image classification in OpenCV' + cv.namedWindow(winName, cv.WINDOW_NORMAL) + cap = cv.VideoCapture(args.input if args.input else 0) + while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + inpWidth = args.width if args.width else frame.shape[1] + inpHeight = args.height if args.height else frame.shape[0] + if args.initial_width and args.initial_height: + frame = cv.resize(frame, (args.initial_width, args.initial_height)) + blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=args.crop) + if args.std: + blob[0] /= np.asarray(args.std, dtype=np.float32).reshape(3, 1, 1) + net.setInput(blob) + out = net.forward() + out = out.flatten() + classId = np.argmax(out) + confidence = out[classId] + (t, _) = net.getPerfProfile() + label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) + cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + label = '%s: %.4f' % (classes[classId] if classes else 'Class #%d' % classId, confidence) + cv.putText(frame, label, (0, 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + cv.imshow(winName, frame) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/colorization.py +import numpy as np +import argparse +import cv2 as cv + +def parse_args(): + parser = argparse.ArgumentParser(description='iColor: deep interactive colorization') + parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') + parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True) + parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True) + parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True) + args = parser.parse_args() + return args +if __name__ == '__main__': + W_in = 224 + H_in = 224 + imshowSize = (640, 480) + args = parse_args() + net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel) + pts_in_hull = np.load(args.kernel) + pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1) + net.getLayer(net.getLayerId('class8_ab')).blobs = [pts_in_hull.astype(np.float32)] + net.getLayer(net.getLayerId('conv8_313_rh')).blobs = [np.full([1, 313], 2.606, np.float32)] + if args.input: + cap = cv.VideoCapture(args.input) + else: + cap = cv.VideoCapture(0) + while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + img_rgb = (frame[:, :, [2, 1, 0]] * 1.0 / 255).astype(np.float32) + img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab) + img_l = img_lab[:, :, 0] + (H_orig, W_orig) = img_rgb.shape[:2] + img_rs = cv.resize(img_rgb, (W_in, H_in)) + img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab) + img_l_rs = img_lab_rs[:, :, 0] + img_l_rs -= 50 + net.setInput(cv.dnn.blobFromImage(img_l_rs)) + ab_dec = net.forward()[0, :, :, :].transpose((1, 2, 0)) + (H_out, W_out) = ab_dec.shape[:2] + ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig)) + img_lab_out = np.concatenate((img_l[:, :, np.newaxis], ab_dec_us), axis=2) + img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1) + frame = cv.resize(frame, imshowSize) + cv.imshow('origin', frame) + cv.imshow('gray', cv.cvtColor(frame, cv.COLOR_RGB2GRAY)) + cv.imshow('colorized', cv.resize(img_bgr_out, imshowSize)) + +# File: opencv-master/samples/dnn/common.py +import sys +import os +import cv2 as cv + +def add_argument(zoo, parser, name, help, required=False, default=None, type=None, action=None, nargs=None): + if len(sys.argv) <= 1: + return + modelName = sys.argv[1] + if os.path.isfile(zoo): + fs = cv.FileStorage(zoo, cv.FILE_STORAGE_READ) + node = fs.getNode(modelName) + if not node.empty(): + value = node.getNode(name) + if not value.empty(): + if value.isReal(): + default = value.real() + elif value.isString(): + default = value.string() + elif value.isInt(): + default = int(value.real()) + elif value.isSeq(): + default = [] + for i in range(value.size()): + v = value.at(i) + if v.isInt(): + default.append(int(v.real())) + elif v.isReal(): + default.append(v.real()) + else: + print('Unexpected value format') + exit(0) + else: + print('Unexpected field format') + exit(0) + required = False + if action == 'store_true': + default = 1 if default == 'true' else 0 if default == 'false' else default + assert default is None or default == 0 or default == 1 + parser.add_argument('--' + name, required=required, help=help, default=bool(default), action=action) + else: + parser.add_argument('--' + name, required=required, help=help, default=default, action=action, nargs=nargs, type=type) + +def add_preproc_args(zoo, parser, sample): + aliases = [] + if os.path.isfile(zoo): + fs = cv.FileStorage(zoo, cv.FILE_STORAGE_READ) + root = fs.root() + for name in root.keys(): + model = root.getNode(name) + if model.getNode('sample').string() == sample: + aliases.append(name) + parser.add_argument('alias', nargs='?', choices=aliases, help='An alias name of model to extract preprocessing parameters from models.yml file.') + add_argument(zoo, parser, 'model', required=True, help='Path to a binary file of model contains trained weights. It could be a file with extensions .caffemodel (Caffe), .pb (TensorFlow), .t7 or .net (Torch), .weights (Darknet), .bin (OpenVINO)') + add_argument(zoo, parser, 'config', help='Path to a text file of model contains network configuration. It could be a file with extensions .prototxt (Caffe), .pbtxt or .config (TensorFlow), .cfg (Darknet), .xml (OpenVINO)') + add_argument(zoo, parser, 'mean', nargs='+', type=float, default=[0, 0, 0], help='Preprocess input image by subtracting mean values. Mean values should be in BGR order.') + add_argument(zoo, parser, 'scale', type=float, default=1.0, help='Preprocess input image by multiplying on a scale factor.') + add_argument(zoo, parser, 'width', type=int, help='Preprocess input image by resizing to a specific width.') + add_argument(zoo, parser, 'height', type=int, help='Preprocess input image by resizing to a specific height.') + add_argument(zoo, parser, 'rgb', action='store_true', help='Indicate that model works with RGB input images instead BGR ones.') + add_argument(zoo, parser, 'classes', help='Optional path to a text file with names of classes to label detected objects.') + add_argument(zoo, parser, 'postprocessing', type=str, help='Post-processing kind depends on model topology.') + add_argument(zoo, parser, 'background_label_id', type=int, default=-1, help='An index of background class in predictions. If not negative, exclude such class from list of classes.') + +def findFile(filename): + if filename: + if os.path.exists(filename): + return filename + fpath = cv.samples.findFile(filename, False) + if fpath: + return fpath + samplesDataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'dnn') + if os.path.exists(os.path.join(samplesDataDir, filename)): + return os.path.join(samplesDataDir, filename) + for path in ['OPENCV_DNN_TEST_DATA_PATH', 'OPENCV_TEST_DATA_PATH']: + try: + extraPath = os.environ[path] + absPath = os.path.join(extraPath, 'dnn', filename) + if os.path.exists(absPath): + return absPath + except KeyError: + pass + print('File ' + filename + ' not found! Please specify a path to /opencv_extra/testdata in OPENCV_DNN_TEST_DATA_PATH environment variable or pass a full path to model.') + exit(0) + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/common/abstract_model.py +from abc import ABC, ABCMeta, abstractmethod + +class AbstractModel(ABC): + + @abstractmethod + def get_prepared_models(self): + pass + +class Framework(object): + in_blob_name = '' + out_blob_name = '' + __metaclass__ = ABCMeta + + @abstractmethod + def get_name(self): + pass + + @abstractmethod + def get_output(self, input_blob): + pass + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/common/evaluation/classification/cls_accuracy_evaluator.py +import sys +import time +import numpy as np +from ...utils import get_final_summary_info + +class ClsAccEvaluation: + log = sys.stdout + img_classes = {} + batch_size = 0 + + def __init__(self, log_path, img_classes_file, batch_size): + self.log = open(log_path, 'w') + self.img_classes = self.read_classes(img_classes_file) + self.batch_size = batch_size + self.general_quality_metric = [] + self.general_inference_time = [] + + @staticmethod + def read_classes(img_classes_file): + result = {} + with open(img_classes_file) as file: + for l in file.readlines(): + result[l.split()[0]] = int(l.split()[1]) + return result + + def get_correct_answers(self, img_list, net_output_blob): + correct_answers = 0 + for i in range(len(img_list)): + indexes = np.argsort(net_output_blob[i])[-5:] + correct_index = self.img_classes[img_list[i]] + if correct_index in indexes: + correct_answers += 1 + return correct_answers + + def process(self, frameworks, data_fetcher): + sorted_imgs_names = sorted(self.img_classes.keys()) + correct_answers = [0] * len(frameworks) + samples_handled = 0 + blobs_l1_diff = [0] * len(frameworks) + blobs_l1_diff_count = [0] * len(frameworks) + blobs_l_inf_diff = [sys.float_info.min] * len(frameworks) + inference_time = [0.0] * len(frameworks) + for x in range(0, len(sorted_imgs_names), self.batch_size): + sublist = sorted_imgs_names[x:x + self.batch_size] + batch = data_fetcher.get_batch(sublist) + samples_handled += len(sublist) + fw_accuracy = [] + fw_time = [] + frameworks_out = [] + for i in range(len(frameworks)): + start = time.time() + out = frameworks[i].get_output(batch) + end = time.time() + correct_answers[i] += self.get_correct_answers(sublist, out) + fw_accuracy.append(100 * correct_answers[i] / float(samples_handled)) + frameworks_out.append(out) + inference_time[i] += end - start + fw_time.append(inference_time[i] / samples_handled * 1000) + print(samples_handled, 'Accuracy for', frameworks[i].get_name() + ':', fw_accuracy[i], file=self.log) + print('Inference time, ms ', frameworks[i].get_name(), fw_time[i], file=self.log) + self.general_quality_metric.append(fw_accuracy) + self.general_inference_time.append(fw_time) + for i in range(1, len(frameworks)): + log_str = frameworks[0].get_name() + ' vs ' + frameworks[i].get_name() + ':' + diff = np.abs(frameworks_out[0] - frameworks_out[i]) + l1_diff = np.sum(diff) / diff.size + print(samples_handled, 'L1 difference', log_str, l1_diff, file=self.log) + blobs_l1_diff[i] += l1_diff + blobs_l1_diff_count[i] += 1 + if np.max(diff) > blobs_l_inf_diff[i]: + blobs_l_inf_diff[i] = np.max(diff) + print(samples_handled, 'L_INF difference', log_str, blobs_l_inf_diff[i], file=self.log) + self.log.flush() + for i in range(1, len(blobs_l1_diff)): + log_str = frameworks[0].get_name() + ' vs ' + frameworks[i].get_name() + ':' + print('Final l1 diff', log_str, blobs_l1_diff[i] / blobs_l1_diff_count[i], file=self.log) + print(get_final_summary_info(self.general_quality_metric, self.general_inference_time, 'accuracy'), file=self.log) + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/common/evaluation/classification/cls_data_fetcher.py +import os +from abc import ABCMeta, abstractmethod +import cv2 +import numpy as np +from ...img_utils import read_rgb_img, get_pytorch_preprocess +from ...test.configs.default_preprocess_config import PYTORCH_RSZ_HEIGHT, PYTORCH_RSZ_WIDTH + +class DataFetch(object): + imgs_dir = '' + frame_size = 0 + bgr_to_rgb = False + __metaclass__ = ABCMeta + + @abstractmethod + def preprocess(self, img): + pass + + @staticmethod + def reshape_img(img): + img = img[:, :, 0:3].transpose(2, 0, 1) + return np.expand_dims(img, 0) + + def center_crop(self, img): + cols = img.shape[1] + rows = img.shape[0] + y1 = round((rows - self.frame_size) / 2) + y2 = round(y1 + self.frame_size) + x1 = round((cols - self.frame_size) / 2) + x2 = round(x1 + self.frame_size) + return img[y1:y2, x1:x2] + + def initial_preprocess(self, img): + min_dim = min(img.shape[-3], img.shape[-2]) + resize_ratio = self.frame_size / float(min_dim) + img = cv2.resize(img, (0, 0), fx=resize_ratio, fy=resize_ratio) + img = self.center_crop(img) + return img + + def get_preprocessed_img(self, img_path): + image_data = read_rgb_img(img_path, self.bgr_to_rgb) + image_data = self.preprocess(image_data) + return self.reshape_img(image_data) + + def get_batch(self, img_names): + assert type(img_names) is list + batch = np.zeros((len(img_names), 3, self.frame_size, self.frame_size)).astype(np.float32) + for i in range(len(img_names)): + img_name = img_names[i] + img_file = os.path.join(self.imgs_dir, img_name) + assert os.path.exists(img_file) + batch[i] = self.get_preprocessed_img(img_file) + return batch + +class PyTorchPreprocessedFetch(DataFetch): + + def __init__(self, pytorch_cls_config, preprocess_input=None): + self.imgs_dir = pytorch_cls_config.img_root_dir + self.frame_size = pytorch_cls_config.frame_size + self.bgr_to_rgb = pytorch_cls_config.bgr_to_rgb + self.preprocess_input = preprocess_input + + def preprocess(self, img): + img = cv2.resize(img, (PYTORCH_RSZ_WIDTH, PYTORCH_RSZ_HEIGHT)) + img = self.center_crop(img) + if self.preprocess_input: + return self.presprocess_input(img) + return get_pytorch_preprocess(img) + +class TFPreprocessedFetch(DataFetch): + + def __init__(self, tf_cls_config, preprocess_input): + self.imgs_dir = tf_cls_config.img_root_dir + self.frame_size = tf_cls_config.frame_size + self.bgr_to_rgb = tf_cls_config.bgr_to_rgb + self.preprocess_input = preprocess_input + + def preprocess(self, img): + img = self.initial_preprocess(img) + return self.preprocess_input(img) + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/common/img_utils.py +import cv2 +import numpy as np +from .test.configs.default_preprocess_config import BASE_IMG_SCALE_FACTOR + +def read_rgb_img(img_file, is_bgr_to_rgb=True): + img = cv2.imread(img_file, cv2.IMREAD_COLOR) + if is_bgr_to_rgb: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + return img + +def get_pytorch_preprocess(img): + img = img.astype(np.float32) + img *= BASE_IMG_SCALE_FACTOR + img -= [0.485, 0.456, 0.406] + img /= [0.229, 0.224, 0.225] + return img + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/common/utils.py +import argparse +import importlib.util +import os +import random +import matplotlib.pyplot as plt +import numpy as np +import tensorflow as tf +import torch +from .test.configs.test_config import CommonConfig +SEED_VAL = 42 +DNN_LIB = 'DNN' +MODEL_PATH_ROOT = os.path.join(CommonConfig().output_data_root_dir, '{}/models') + +def get_full_model_path(lib_name, model_full_name): + model_path = MODEL_PATH_ROOT.format(lib_name) + return {'path': model_path, 'full_path': os.path.join(model_path, model_full_name)} + +def plot_acc(data_list, experiment_name): + plt.figure(figsize=[8, 6]) + plt.plot(data_list[:, 0], 'r', linewidth=2.5, label='Original Model') + plt.plot(data_list[:, 1], 'b', linewidth=2.5, label='Converted DNN Model') + plt.xlabel('Iterations ', fontsize=15) + plt.ylabel('Time (ms)', fontsize=15) + plt.title(experiment_name, fontsize=15) + plt.legend() + full_path_to_fig = os.path.join(CommonConfig().output_data_root_dir, experiment_name + '.png') + plt.savefig(full_path_to_fig, bbox_inches='tight') + +def get_final_summary_info(general_quality_metric, general_inference_time, metric_name): + general_quality_metric = np.array(general_quality_metric) + general_inference_time = np.array(general_inference_time) + summary_line = '===== End of processing. General results:\n' + '' + '' + '' + '\t* mean time (min) for the DNN model inferences: {}\n'.format(metric_name, np.mean(general_quality_metric[:, 0]), np.mean(general_inference_time[:, 0]) / 60000, metric_name, np.mean(general_quality_metric[:, 1]), np.mean(general_inference_time[:, 1]) / 60000) + return summary_line + +def set_common_reproducibility(): + random.seed(SEED_VAL) + np.random.seed(SEED_VAL) + +def set_pytorch_env(): + set_common_reproducibility() + torch.manual_seed(SEED_VAL) + torch.set_printoptions(precision=10) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(SEED_VAL) + torch.backends.cudnn_benchmark_enabled = False + torch.backends.cudnn.deterministic = True + +def set_tf_env(is_use_gpu=True): + set_common_reproducibility() + tf.random.set_seed(SEED_VAL) + os.environ['TF_DETERMINISTIC_OPS'] = '1' + if tf.config.list_physical_devices('GPU') and is_use_gpu: + gpu_devices = tf.config.list_physical_devices('GPU') + tf.config.experimental.set_visible_devices(gpu_devices[0], 'GPU') + tf.config.experimental.set_memory_growth(gpu_devices[0], True) + os.environ['TF_USE_CUDNN'] = '1' + else: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +def str_bool(input_val): + if input_val.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif input_val.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value was expected') + +def get_formatted_model_list(model_list): + note_line = 'Please, choose the model from the below list:\n' + spaces_to_set = ' ' * (len(note_line) - 2) + return note_line + ''.join([spaces_to_set, '{} \n'] * len(model_list)).format(*model_list) + +def model_str(model_list): + + def type_model_list(input_val): + if input_val.lower() in model_list: + return input_val.lower() + else: + raise argparse.ArgumentTypeError('The model is currently unavailable for test.\n' + get_formatted_model_list(model_list)) + return type_model_list + +def get_test_module(test_module_name, test_module_path): + module_spec = importlib.util.spec_from_file_location(test_module_name, test_module_path) + test_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(test_module) + module_spec.loader.exec_module(test_module) + return test_module + +def create_parser(): + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + (parser.add_argument('--test', type=str_bool, help="Define whether you'd like to run the model with OpenCV for testing.", default=False),) + (parser.add_argument('--default_img_preprocess', type=str_bool, help="Define whether you'd like to preprocess the input image with defined PyTorch or TF functions for model test with OpenCV.", default=False),) + parser.add_argument('--evaluate', type=str_bool, help="Define whether you'd like to run evaluation of the models (ex.: TF vs OpenCV networks).", default=True) + return parser + +def create_extended_parser(model_list): + parser = create_parser() + parser.add_argument('--model_name', type=model_str(model_list=model_list), help='\nDefine the model name to test.\n' + get_formatted_model_list(model_list), required=True) + return parser + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_humanseg.py +import os +import paddlehub.vision.transforms as T +import numpy as np +import cv2 as cv + +def get_color_map_list(num_classes): + num_classes += 1 + color_map = num_classes * [0, 0, 0] + for i in range(0, num_classes): + j = 0 + lab = i + while lab: + color_map[i * 3] |= (lab >> 0 & 1) << 7 - j + color_map[i * 3 + 1] |= (lab >> 1 & 1) << 7 - j + color_map[i * 3 + 2] |= (lab >> 2 & 1) << 7 - j + j += 1 + lab >>= 3 + color_map = color_map[3:] + return color_map + +def visualize(image, result, save_dir=None, weight=0.6): + color_map = get_color_map_list(256) + color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)] + color_map = np.array(color_map).astype('uint8') + c1 = cv.LUT(result, color_map[:, 0]) + c2 = cv.LUT(result, color_map[:, 1]) + c3 = cv.LUT(result, color_map[:, 2]) + pseudo_img = np.dstack((c1, c2, c3)) + im = cv.imread(image) + vis_result = cv.addWeighted(im, weight, pseudo_img, 1 - weight, 0) + if save_dir is not None: + if not os.path.exists(save_dir): + os.makedirs(save_dir) + image_name = os.path.split(image)[-1] + out_path = os.path.join(save_dir, image_name) + cv.imwrite(out_path, vis_result) + else: + return vis_result + +def preprocess(image_path): + transforms = T.Compose([T.Resize((192, 192)), T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])], to_rgb=True) + return np.expand_dims(transforms(image_path), axis=0) +if __name__ == '__main__': + img_path = '../../../../data/messi5.jpg' + net = cv.dnn.readNetFromONNX('humanseg_hrnet18_tiny.onnx') + im = preprocess(img_path) + net.setInput(im) + result = net.forward(['save_infer_model/scale_0.tmp_1']) + image = cv.imread(img_path) + (r, c, _) = image.shape + result = np.argmax(result[0], axis=1).astype(np.uint8) + result = cv.resize(result[0, :, :], dsize=(c, r), interpolation=cv.INTER_NEAREST) + print('grid_image.shape is: ', result.shape) + folder_path = 'data' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + file_path = os.path.join(folder_path, '%s.jpg' % 'result_test_human') + result_color = visualize(img_path, result) + cv.imwrite(file_path, result_color) + print('%s saved' % file_path) + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_resnet50.py +import paddle +import paddlehub as hub +import paddlehub.vision.transforms as T +import cv2 as cv +import numpy as np + +def preprocess(image_path): + transforms = T.Compose([T.Resize((256, 256)), T.CenterCrop(224), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])], to_rgb=True) + return np.expand_dims(transforms(image_path), axis=0) + +def export_onnx_resnet50(save_path): + model = hub.Module(name='resnet50_vd_imagenet_ssld') + input_spec = paddle.static.InputSpec([1, 3, 224, 224], 'float32', 'image') + paddle.onnx.export(model, save_path, input_spec=[input_spec], opset_version=10) +if __name__ == '__main__': + save_path = './resnet50' + image_file = './data/cat.jpg' + labels = open('./data/labels.txt').read().strip().split('\n') + model = export_onnx_resnet50(save_path) + net = cv.dnn.readNetFromONNX(save_path + '.onnx') + im = preprocess(image_file) + net.setInput(im) + result = net.forward(['save_infer_model/scale_0.tmp_0']) + class_id = np.argmax(result[0]) + label = labels[class_id] + print('Image: {}'.format(image_file)) + print('Predict Category: {}'.format(label)) + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/pytorch/classification/py_to_py_cls.py +from torchvision import models +from ..pytorch_model import PyTorchModelPreparer, PyTorchModelProcessor, PyTorchDnnModelProcessor +from ...common.evaluation.classification.cls_data_fetcher import PyTorchPreprocessedFetch +from ...common.test.cls_model_test_pipeline import ClsModelTestPipeline +from ...common.test.configs.default_preprocess_config import pytorch_resize_input_blob +from ...common.test.configs.test_config import TestClsConfig +from ...common.utils import set_pytorch_env, create_extended_parser +model_dict = {'alexnet': models.alexnet, 'vgg11': models.vgg11, 'vgg13': models.vgg13, 'vgg16': models.vgg16, 'vgg19': models.vgg19, 'resnet18': models.resnet18, 'resnet34': models.resnet34, 'resnet50': models.resnet50, 'resnet101': models.resnet101, 'resnet152': models.resnet152, 'squeezenet1_0': models.squeezenet1_0, 'squeezenet1_1': models.squeezenet1_1, 'resnext50_32x4d': models.resnext50_32x4d, 'resnext101_32x8d': models.resnext101_32x8d, 'wide_resnet50_2': models.wide_resnet50_2, 'wide_resnet101_2': models.wide_resnet101_2} + +class PyTorchClsModel(PyTorchModelPreparer): + + def __init__(self, height, width, model_name, original_model): + super(PyTorchClsModel, self).__init__(height, width, model_name, original_model) + +def main(): + set_pytorch_env() + parser = create_extended_parser(list(model_dict.keys())) + cmd_args = parser.parse_args() + model_name = cmd_args.model_name + cls_model = PyTorchClsModel(height=TestClsConfig().frame_size, width=TestClsConfig().frame_size, model_name=model_name, original_model=model_dict[model_name](pretrained=True)) + pytorch_cls_pipeline = ClsModelTestPipeline(network_model=cls_model, model_processor=PyTorchModelProcessor, dnn_model_processor=PyTorchDnnModelProcessor, data_fetcher=PyTorchPreprocessedFetch, cls_args_parser=parser, default_input_blob_preproc=pytorch_resize_input_blob) + pytorch_cls_pipeline.init_test_pipeline() +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/pytorch/classification/py_to_py_resnet50.py +import os +import cv2 +import numpy as np +import torch +import torch.onnx +from torch.autograd import Variable +from torchvision import models + +def get_pytorch_onnx_model(original_model): + onnx_model_path = 'models' + onnx_model_name = 'resnet50.onnx' + os.makedirs(onnx_model_path, exist_ok=True) + full_model_path = os.path.join(onnx_model_path, onnx_model_name) + generated_input = Variable(torch.randn(1, 3, 224, 224)) + torch.onnx.export(original_model, generated_input, full_model_path, verbose=True, input_names=['input'], output_names=['output'], opset_version=11) + return full_model_path + +def get_preprocessed_img(img_path): + input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) + input_img = input_img.astype(np.float32) + input_img = cv2.resize(input_img, (256, 256)) + mean = np.array([0.485, 0.456, 0.406]) * 255.0 + scale = 1 / 255.0 + std = [0.229, 0.224, 0.225] + input_blob = cv2.dnn.blobFromImage(image=input_img, scalefactor=scale, size=(224, 224), mean=mean, swapRB=True, crop=True) + input_blob[0] /= np.asarray(std, dtype=np.float32).reshape(3, 1, 1) + return input_blob + +def get_imagenet_labels(labels_path): + with open(labels_path) as f: + imagenet_labels = [line.strip() for line in f.readlines()] + return imagenet_labels + +def get_opencv_dnn_prediction(opencv_net, preproc_img, imagenet_labels): + opencv_net.setInput(preproc_img) + out = opencv_net.forward() + print('OpenCV DNN prediction: \n') + print('* shape: ', out.shape) + imagenet_class_id = np.argmax(out) + confidence = out[0][imagenet_class_id] + print('* class ID: {}, label: {}'.format(imagenet_class_id, imagenet_labels[imagenet_class_id])) + print('* confidence: {:.4f}'.format(confidence)) + +def get_pytorch_dnn_prediction(original_net, preproc_img, imagenet_labels): + original_net.eval() + preproc_img = torch.FloatTensor(preproc_img) + with torch.no_grad(): + out = original_net(preproc_img) + print('\nPyTorch model prediction: \n') + print('* shape: ', out.shape) + imagenet_class_id = torch.argmax(out, axis=1).item() + print('* class ID: {}, label: {}'.format(imagenet_class_id, imagenet_labels[imagenet_class_id])) + confidence = out[0][imagenet_class_id] + print('* confidence: {:.4f}'.format(confidence.item())) + +def main(): + original_model = models.resnet50(pretrained=True) + full_model_path = get_pytorch_onnx_model(original_model) + opencv_net = cv2.dnn.readNetFromONNX(full_model_path) + print('OpenCV model was successfully read. Layer IDs: \n', opencv_net.getLayerNames()) + input_img = get_preprocessed_img('../data/squirrel_cls.jpg') + imagenet_labels = get_imagenet_labels('../data/dnn/classification_classes_ILSVRC2012.txt') + get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels) + get_pytorch_dnn_prediction(original_model, input_img, imagenet_labels) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/pytorch/classification/py_to_py_resnet50_onnx.py +import os +import torch +import torch.onnx +from torch.autograd import Variable +from torchvision import models + +def get_pytorch_onnx_model(original_model): + onnx_model_path = 'models' + onnx_model_name = 'resnet50.onnx' + os.makedirs(onnx_model_path, exist_ok=True) + full_model_path = os.path.join(onnx_model_path, onnx_model_name) + generated_input = Variable(torch.randn(1, 3, 224, 224)) + torch.onnx.export(original_model, generated_input, full_model_path, verbose=True, input_names=['input'], output_names=['output'], opset_version=11) + return full_model_path + +def main(): + original_model = models.resnet50(pretrained=True) + full_model_path = get_pytorch_onnx_model(original_model) + print('PyTorch ResNet-50 model was successfully converted: ', full_model_path) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/pytorch/pytorch_model.py +import os +import cv2 +import torch.onnx +from torch.autograd import Variable +from ..common.abstract_model import AbstractModel, Framework +from ..common.utils import DNN_LIB, get_full_model_path +CURRENT_LIB = 'PyTorch' +MODEL_FORMAT = '.onnx' + +class PyTorchModelPreparer(AbstractModel): + + def __init__(self, height, width, model_name='default', original_model=object, batch_size=1, default_input_name='input', default_output_name='output'): + self._height = height + self._width = width + self._model_name = model_name + self._original_model = original_model + self._batch_size = batch_size + self._default_input_name = default_input_name + self._default_output_name = default_output_name + self.model_path = self._set_model_path() + self._dnn_model = self._set_dnn_model() + + def _set_dnn_model(self): + generated_input = Variable(torch.randn(self._batch_size, 3, self._height, self._width)) + os.makedirs(self.model_path['path'], exist_ok=True) + torch.onnx.export(self._original_model, generated_input, self.model_path['full_path'], verbose=True, input_names=[self._default_input_name], output_names=[self._default_output_name], opset_version=11) + return cv2.dnn.readNetFromONNX(self.model_path['full_path']) + + def _set_model_path(self): + model_to_save = self._model_name + MODEL_FORMAT + return get_full_model_path(CURRENT_LIB.lower(), model_to_save) + + def get_prepared_models(self): + return {CURRENT_LIB + ' ' + self._model_name: self._original_model, DNN_LIB + ' ' + self._model_name: self._dnn_model} + +class PyTorchModelProcessor(Framework): + + def __init__(self, prepared_model, model_name): + self._prepared_model = prepared_model + self._name = model_name + + def get_output(self, input_blob): + tensor = torch.FloatTensor(input_blob) + self._prepared_model.eval() + with torch.no_grad(): + model_out = self._prepared_model(tensor) + if len(model_out) == 2: + model_out = model_out['out'] + out = model_out.detach().numpy() + return out + + def get_name(self): + return self._name + +class PyTorchDnnModelProcessor(Framework): + + def __init__(self, prepared_dnn_model, model_name): + self._prepared_dnn_model = prepared_dnn_model + self._name = model_name + + def get_output(self, input_blob): + self._prepared_dnn_model.setInput(input_blob, '') + return self._prepared_dnn_model.forward() + + def get_name(self): + return self._name + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/tf/classification/py_to_py_cls.py +from tensorflow.keras.applications import VGG16, vgg16, VGG19, vgg19, ResNet50, resnet, ResNet101, ResNet152, DenseNet121, densenet, DenseNet169, DenseNet201, InceptionResNetV2, inception_resnet_v2, InceptionV3, inception_v3, MobileNet, mobilenet, MobileNetV2, mobilenet_v2, NASNetLarge, nasnet, NASNetMobile, Xception, xception +from ..tf_model import TFModelPreparer +from ..tf_model import TFModelProcessor, TFDnnModelProcessor +from ...common.evaluation.classification.cls_data_fetcher import TFPreprocessedFetch +from ...common.test.cls_model_test_pipeline import ClsModelTestPipeline +from ...common.test.configs.default_preprocess_config import tf_input_blob, pytorch_input_blob, tf_model_blob_caffe_mode +from ...common.utils import set_tf_env, create_extended_parser +model_dict = {'vgg16': [VGG16, vgg16, tf_model_blob_caffe_mode], 'vgg19': [VGG19, vgg19, tf_model_blob_caffe_mode], 'resnet50': [ResNet50, resnet, tf_model_blob_caffe_mode], 'resnet101': [ResNet101, resnet, tf_model_blob_caffe_mode], 'resnet152': [ResNet152, resnet, tf_model_blob_caffe_mode], 'densenet121': [DenseNet121, densenet, pytorch_input_blob], 'densenet169': [DenseNet169, densenet, pytorch_input_blob], 'densenet201': [DenseNet201, densenet, pytorch_input_blob], 'inceptionresnetv2': [InceptionResNetV2, inception_resnet_v2, tf_input_blob], 'inceptionv3': [InceptionV3, inception_v3, tf_input_blob], 'mobilenet': [MobileNet, mobilenet, tf_input_blob], 'mobilenetv2': [MobileNetV2, mobilenet_v2, tf_input_blob], 'nasnetlarge': [NASNetLarge, nasnet, tf_input_blob], 'nasnetmobile': [NASNetMobile, nasnet, tf_input_blob], 'xception': [Xception, xception, tf_input_blob]} +CNN_CLASS_ID = 0 +CNN_UTILS_ID = 1 +DEFAULT_BLOB_PARAMS_ID = 2 + +class TFClsModel(TFModelPreparer): + + def __init__(self, model_name, original_model): + super(TFClsModel, self).__init__(model_name, original_model) + +def main(): + set_tf_env() + parser = create_extended_parser(list(model_dict.keys())) + cmd_args = parser.parse_args() + model_name = cmd_args.model_name + model_name_val = model_dict[model_name] + cls_model = TFClsModel(model_name=model_name, original_model=model_name_val[CNN_CLASS_ID](include_top=True, weights='imagenet')) + tf_cls_pipeline = ClsModelTestPipeline(network_model=cls_model, model_processor=TFModelProcessor, dnn_model_processor=TFDnnModelProcessor, data_fetcher=TFPreprocessedFetch, img_processor=model_name_val[CNN_UTILS_ID].preprocess_input, cls_args_parser=parser, default_input_blob_preproc=model_name_val[DEFAULT_BLOB_PARAMS_ID]) + tf_cls_pipeline.init_test_pipeline() +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/tf/classification/py_to_py_mobilenet.py +import os +import cv2 +import numpy as np +import tensorflow as tf +from tensorflow.keras.applications import MobileNet +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 +from ...common.utils import set_tf_env + +def get_tf_model_proto(tf_model): + pb_model_path = 'models' + pb_model_name = 'mobilenet.pb' + os.makedirs(pb_model_path, exist_ok=True) + tf_model_graph = tf.function(lambda x: tf_model(x)) + tf_model_graph = tf_model_graph.get_concrete_function(tf.TensorSpec(tf_model.inputs[0].shape, tf_model.inputs[0].dtype)) + frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph) + frozen_tf_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph, logdir=pb_model_path, name=pb_model_name, as_text=False) + return os.path.join(pb_model_path, pb_model_name) + +def get_preprocessed_img(img_path): + input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) + input_img = input_img.astype(np.float32) + mean = np.array([1.0, 1.0, 1.0]) * 127.5 + scale = 1 / 127.5 + input_blob = cv2.dnn.blobFromImage(image=input_img, scalefactor=scale, size=(224, 224), mean=mean, swapRB=True, crop=True) + print('Input blob shape: {}\n'.format(input_blob.shape)) + return input_blob + +def get_imagenet_labels(labels_path): + with open(labels_path) as f: + imagenet_labels = [line.strip() for line in f.readlines()] + return imagenet_labels + +def get_opencv_dnn_prediction(opencv_net, preproc_img, imagenet_labels): + opencv_net.setInput(preproc_img) + out = opencv_net.forward() + print('OpenCV DNN prediction: \n') + print('* shape: ', out.shape) + imagenet_class_id = np.argmax(out) + confidence = out[0][imagenet_class_id] + print('* class ID: {}, label: {}'.format(imagenet_class_id, imagenet_labels[imagenet_class_id])) + print('* confidence: {:.4f}\n'.format(confidence)) + +def get_tf_dnn_prediction(original_net, preproc_img, imagenet_labels): + preproc_img = preproc_img.transpose(0, 2, 3, 1) + print('TF input blob shape: {}\n'.format(preproc_img.shape)) + out = original_net(preproc_img) + print('\nTensorFlow model prediction: \n') + print('* shape: ', out.shape) + imagenet_class_id = np.argmax(out) + print('* class ID: {}, label: {}'.format(imagenet_class_id, imagenet_labels[imagenet_class_id])) + confidence = out[0][imagenet_class_id] + print('* confidence: {:.4f}'.format(confidence)) + +def main(): + set_tf_env() + original_tf_model = MobileNet(include_top=True, weights='imagenet') + full_pb_path = get_tf_model_proto(original_tf_model) + opencv_net = cv2.dnn.readNetFromTensorflow(full_pb_path) + print('OpenCV model was successfully read. Model layers: \n', opencv_net.getLayerNames()) + input_img = get_preprocessed_img('../data/squirrel_cls.jpg') + imagenet_labels = get_imagenet_labels('../data/dnn/classification_classes_ILSVRC2012.txt') + get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels) + get_tf_dnn_prediction(original_tf_model, input_img, imagenet_labels) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/tf/detection/py_to_py_ssd_mobilenet.py +import os +import tarfile +import urllib +DETECTION_MODELS_URL = 'http://download.tensorflow.org/models/object_detection/' + +def extract_tf_frozen_graph(model_name, extracted_model_path): + tf_model_tar = model_name + '.tar.gz' + model_link = DETECTION_MODELS_URL + tf_model_tar + tf_frozen_graph_name = 'frozen_inference_graph' + try: + urllib.request.urlretrieve(model_link, tf_model_tar) + except Exception: + print('TF {} was not retrieved: {}'.format(model_name, model_link)) + return + print('TF {} was retrieved.'.format(model_name)) + tf_model_tar = tarfile.open(tf_model_tar) + frozen_graph_path = '' + for model_tar_elem in tf_model_tar.getmembers(): + if tf_frozen_graph_name in os.path.basename(model_tar_elem.name): + tf_model_tar.extract(model_tar_elem, extracted_model_path) + frozen_graph_path = os.path.join(extracted_model_path, model_tar_elem.name) + break + tf_model_tar.close() + return frozen_graph_path + +def main(): + tf_model_name = 'ssd_mobilenet_v1_coco_2017_11_17' + graph_extraction_dir = './' + frozen_graph_path = extract_tf_frozen_graph(tf_model_name, graph_extraction_dir) + print('Frozen graph path for {}: {}'.format(tf_model_name, frozen_graph_path)) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/dnn_model_runner/dnn_conversion/tf/tf_model.py +import cv2 +import tensorflow as tf +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 +from ..common.abstract_model import AbstractModel, Framework +from ..common.utils import DNN_LIB, get_full_model_path +CURRENT_LIB = 'TF' +MODEL_FORMAT = '.pb' + +class TFModelPreparer(AbstractModel): + + def __init__(self, model_name='default', original_model=None, is_ready_graph=False, tf_model_graph_path=''): + self._model_name = model_name + self._original_model = original_model + self._model_to_save = '' + self._is_ready_to_transfer_graph = is_ready_graph + self.model_path = self._set_model_path(tf_model_graph_path) + self._dnn_model = self._set_dnn_model() + + def _set_dnn_model(self): + if not self._is_ready_to_transfer_graph: + tf_model_graph = tf.function(lambda x: self._original_model(x)) + tf_model_graph = tf_model_graph.get_concrete_function(tf.TensorSpec(self._original_model.inputs[0].shape, self._original_model.inputs[0].dtype)) + frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph) + frozen_tf_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph, logdir=self.model_path['path'], name=self._model_to_save, as_text=False) + return cv2.dnn.readNetFromTensorflow(self.model_path['full_path']) + + def _set_model_path(self, tf_pb_file_path): + model_paths_dict = {'path': '', 'full_path': tf_pb_file_path} + if not self._is_ready_to_transfer_graph: + self._model_to_save = self._model_name + MODEL_FORMAT + model_paths_dict = get_full_model_path(CURRENT_LIB.lower(), self._model_to_save) + return model_paths_dict + + def get_prepared_models(self): + original_lib_name = CURRENT_LIB + ' ' + self._model_name + configured_model_dict = {original_lib_name: self._original_model, DNN_LIB + ' ' + self._model_name: self._dnn_model} + return configured_model_dict + +class TFModelProcessor(Framework): + + def __init__(self, prepared_model, model_name): + self._prepared_model = prepared_model + self._name = model_name + + def get_output(self, input_blob): + assert len(input_blob.shape) == 4 + batch_tf = input_blob.transpose(0, 2, 3, 1) + out = self._prepared_model(batch_tf) + return out + + def get_name(self): + return CURRENT_LIB + +class TFDnnModelProcessor(Framework): + + def __init__(self, prepared_dnn_model, model_name): + self._prepared_dnn_model = prepared_dnn_model + self._name = model_name + + def get_output(self, input_blob): + self._prepared_dnn_model.setInput(input_blob) + ret_val = self._prepared_dnn_model.forward() + return ret_val + + def get_name(self): + return DNN_LIB + +# File: opencv-master/samples/dnn/download_models.py +"""""" +from __future__ import print_function +import os +import sys +import yaml +import argparse +import tarfile +import platform +import tempfile +import hashlib +import requests +import shutil +from pathlib import Path +from datetime import datetime +if sys.version_info[0] < 3: + from urllib2 import urlopen +else: + from urllib.request import urlopen +import xml.etree.ElementTree as ET +__all__ = ['downloadFile'] + +class HashMismatchException(Exception): + + def __init__(self, expected, actual): + Exception.__init__(self) + self.expected = expected + self.actual = actual + + def __str__(self): + return 'Hash mismatch: expected {} vs actual of {}'.format(self.expected, self.actual) + +def getHashsumFromFile(filepath): + sha = hashlib.sha1() + if os.path.exists(filepath): + print(' there is already a file with the same name') + with open(filepath, 'rb') as f: + while True: + buf = f.read(10 * 1024 * 1024) + if not buf: + break + sha.update(buf) + hashsum = sha.hexdigest() + return hashsum + +def checkHashsum(expected_sha, filepath, silent=True): + print(' expected SHA1: {}'.format(expected_sha)) + actual_sha = getHashsumFromFile(filepath) + print(' actual SHA1:{}'.format(actual_sha)) + hashes_matched = expected_sha == actual_sha + if not hashes_matched and (not silent): + raise HashMismatchException(expected_sha, actual_sha) + return hashes_matched + +def isArchive(filepath): + return tarfile.is_tarfile(filepath) + +class DownloadInstance: + + def __init__(self, **kwargs): + self.name = kwargs.pop('name') + self.filename = kwargs.pop('filename') + self.loader = kwargs.pop('loader', None) + self.save_dir = kwargs.pop('save_dir') + self.sha = kwargs.pop('sha', None) + + def __str__(self): + return 'DownloadInstance <{}>'.format(self.name) + + def get(self): + print(' Working on ' + self.name) + print(' Getting file ' + self.filename) + if self.sha is None: + print(' No expected hashsum provided, loading file') + else: + filepath = os.path.join(self.save_dir, self.sha, self.filename) + if checkHashsum(self.sha, filepath): + print(' hash match - file already exists, skipping') + return filepath + else: + print(" hash didn't match, loading file") + if not os.path.exists(self.save_dir): + print(' creating directory: ' + self.save_dir) + os.makedirs(self.save_dir) + print(' hash check failed - loading') + assert self.loader + try: + self.loader.load(self.filename, self.sha, self.save_dir) + print(' done') + print(' file {}'.format(self.filename)) + if self.sha is None: + download_path = os.path.join(self.save_dir, self.filename) + self.sha = getHashsumFromFile(download_path) + new_dir = os.path.join(self.save_dir, self.sha) + if not os.path.exists(new_dir): + os.makedirs(new_dir) + filepath = os.path.join(new_dir, self.filename) + if not os.path.exists(filepath): + shutil.move(download_path, new_dir) + print(' No expected hashsum provided, actual SHA is {}'.format(self.sha)) + else: + checkHashsum(self.sha, filepath, silent=False) + except Exception as e: + print(' There was some problem with loading file {} for {}'.format(self.filename, self.name)) + print(' Exception: {}'.format(e)) + return + print(' Finished ' + self.name) + return filepath + +class Loader(object): + MB = 1024 * 1024 + BUFSIZE = 10 * MB + + def __init__(self, download_name, download_sha, archive_member=None): + self.download_name = download_name + self.download_sha = download_sha + self.archive_member = archive_member + + def load(self, requested_file, sha, save_dir): + if self.download_sha is None: + download_dir = save_dir + else: + download_dir = os.path.join(save_dir, self.download_sha) + if not os.path.exists(download_dir): + os.makedirs(download_dir) + download_path = os.path.join(download_dir, self.download_name) + print(' Preparing to download file ' + self.download_name) + if checkHashsum(self.download_sha, download_path): + print(' hash match - file already exists, no need to download') + else: + filesize = self.download(download_path) + print(' Downloaded {} with size {} Mb'.format(self.download_name, filesize / self.MB)) + if self.download_sha is not None: + checkHashsum(self.download_sha, download_path, silent=False) + if self.download_name == requested_file: + return + elif isArchive(download_path): + if sha is not None: + extract_dir = os.path.join(save_dir, sha) + else: + extract_dir = save_dir + if not os.path.exists(extract_dir): + os.makedirs(extract_dir) + self.extract(requested_file, download_path, extract_dir) + else: + raise Exception('Downloaded file has different name') + + def download(self, filepath): + print('Warning: download is not implemented, this is a base class') + return 0 + + def extract(self, requested_file, archive_path, save_dir): + filepath = os.path.join(save_dir, requested_file) + try: + with tarfile.open(archive_path) as f: + if self.archive_member is None: + pathDict = dict(((os.path.split(elem)[1], os.path.split(elem)[0]) for elem in f.getnames())) + self.archive_member = pathDict[requested_file] + assert self.archive_member in f.getnames() + self.save(filepath, f.extractfile(self.archive_member)) + except Exception as e: + print(' catch {}'.format(e)) + + def save(self, filepath, r): + with open(filepath, 'wb') as f: + print(' progress ', end='') + sys.stdout.flush() + while True: + buf = r.read(self.BUFSIZE) + if not buf: + break + f.write(buf) + print('>', end='') + sys.stdout.flush() + +class URLLoader(Loader): + + def __init__(self, download_name, download_sha, url, archive_member=None): + super(URLLoader, self).__init__(download_name, download_sha, archive_member) + self.download_name = download_name + self.download_sha = download_sha + self.url = url + + def download(self, filepath): + r = urlopen(self.url, timeout=60) + self.printRequest(r) + self.save(filepath, r) + return os.path.getsize(filepath) + + def printRequest(self, r): + + def getMB(r): + d = dict(r.info()) + for c in ['content-length', 'Content-Length']: + if c in d: + return int(d[c]) / self.MB + return '' + print(' {} {} [{} Mb]'.format(r.getcode(), r.msg, getMB(r))) + +class GDriveLoader(Loader): + BUFSIZE = 1024 * 1024 + PROGRESS_SIZE = 10 * 1024 * 1024 + + def __init__(self, download_name, download_sha, gid, archive_member=None): + super(GDriveLoader, self).__init__(download_name, download_sha, archive_member) + self.download_name = download_name + self.download_sha = download_sha + self.gid = gid + + def download(self, filepath): + session = requests.Session() + URL = 'https://docs.google.com/uc?export=download' + response = session.get(URL, params={'id': self.gid}, stream=True) + + def get_confirm_token(response): + for (key, value) in response.cookies.items(): + if key.startswith('download_warning'): + return value + return None + token = get_confirm_token(response) + if token: + params = {'id': self.gid, 'confirm': token} + response = session.get(URL, params=params, stream=True) + sz = 0 + progress_sz = self.PROGRESS_SIZE + with open(filepath, 'wb') as f: + for chunk in response.iter_content(self.BUFSIZE): + if not chunk: + continue + f.write(chunk) + sz += len(chunk) + if sz >= progress_sz: + progress_sz += self.PROGRESS_SIZE + print('>', end='') + sys.stdout.flush() + print('') + return sz + +def produceDownloadInstance(instance_name, filename, sha, url, save_dir, download_name=None, download_sha=None, archive_member=None): + spec_param = url + loader = URLLoader + if download_name is None: + download_name = filename + if download_sha is None: + download_sha = sha + if 'drive.google.com' in url: + token = '' + token_part = url.rsplit('/', 1)[-1] + if '&id=' not in token_part: + token_part = url.rsplit('/', 1)[-2] + for param in token_part.split('&'): + if param.startswith('id='): + token = param[3:] + if token: + loader = GDriveLoader + spec_param = token + else: + print('Warning: possibly wrong Google Drive link') + return DownloadInstance(name=instance_name, filename=filename, sha=sha, save_dir=save_dir, loader=loader(download_name, download_sha, spec_param, archive_member)) + +def getSaveDir(): + env_path = os.environ.get('OPENCV_DOWNLOAD_DATA_PATH', None) + if env_path: + save_dir = env_path + else: + if platform.system() == 'Darwin': + temp_env = os.environ.get('TMPDIR', None) + if temp_env is None or not os.path.isdir(temp_env): + temp_dir = Path('/tmp') + print('Using world accessible cache directory. This may be not secure: ', temp_dir) + else: + temp_dir = temp_env + elif platform.system() == 'Windows': + temp_dir = tempfile.gettempdir() + else: + xdg_cache_env = os.environ.get('XDG_CACHE_HOME', None) + if xdg_cache_env and xdg_cache_env[0] and os.path.isdir(xdg_cache_env): + temp_dir = xdg_cache_env + else: + home_env = os.environ.get('HOME', None) + if home_env and home_env[0] and os.path.isdir(home_env): + home_path = os.path.join(home_env, '.cache/') + if os.path.isdir(home_path): + temp_dir = home_path + else: + temp_dir = tempfile.gettempdir() + print('Using world accessible cache directory. This may be not secure: ', temp_dir) + save_dir = os.path.join(temp_dir, 'downloads') + if not os.path.exists(save_dir): + os.makedirs(save_dir) + return save_dir + +def downloadFile(url, sha=None, save_dir=None, filename=None): + if save_dir is None: + save_dir = getSaveDir() + if filename is None: + filename = 'download_' + datetime.now().__str__() + name = filename + return produceDownloadInstance(name, filename, sha, url, save_dir).get() + +def parseMetalinkFile(metalink_filepath, save_dir): + NS = {'ml': 'urn:ietf:params:xml:ns:metalink'} + models = [] + for file_elem in ET.parse(metalink_filepath).getroot().findall('ml:file', NS): + url = file_elem.find('ml:url', NS).text + fname = file_elem.attrib['name'] + name = file_elem.find('ml:identity', NS).text + hash_sum = file_elem.find('ml:hash', NS).text + models.append(produceDownloadInstance(name, fname, hash_sum, url, save_dir)) + return models + +def parseYAMLFile(yaml_filepath, save_dir): + models = [] + with open(yaml_filepath, 'r') as stream: + data_loaded = yaml.safe_load(stream) + for (name, params) in data_loaded.items(): + load_info = params.get('load_info', None) + if load_info: + fname = os.path.basename(params.get('model')) + hash_sum = load_info.get('sha1') + url = load_info.get('url') + download_sha = load_info.get('download_sha') + download_name = load_info.get('download_name') + archive_member = load_info.get('member') + models.append(produceDownloadInstance(name, fname, hash_sum, url, save_dir, download_name=download_name, download_sha=download_sha, archive_member=archive_member)) + return models +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='This is a utility script for downloading DNN models for samples.') + parser.add_argument('--save_dir', action='store', default=os.getcwd(), help='Path to the directory to store downloaded files') + parser.add_argument('model_name', type=str, default='', nargs='?', action='store', help='name of the model to download') + args = parser.parse_args() + models = [] + save_dir = args.save_dir + selected_model_name = args.model_name + models.extend(parseMetalinkFile('face_detector/weights.meta4', save_dir)) + models.extend(parseYAMLFile('models.yml', save_dir)) + for m in models: + print(m) + if selected_model_name and (not m.name.startswith(selected_model_name)): + continue + print('Model: ' + selected_model_name) + m.get() + +# File: opencv-master/samples/dnn/edge_detection.py +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='This sample shows how to define custom OpenCV deep learning layers in Python. Holistically-Nested Edge Detection (https://arxiv.org/abs/1504.06375) neural network is used as an example model. Find a pre-trained model at https://github.com/s9xie/hed.') +parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') +parser.add_argument('--prototxt', help='Path to deploy.prototxt', required=True) +parser.add_argument('--caffemodel', help='Path to hed_pretrained_bsds.caffemodel', required=True) +parser.add_argument('--width', help='Resize input image to a specific width', default=500, type=int) +parser.add_argument('--height', help='Resize input image to a specific height', default=500, type=int) +args = parser.parse_args() + +class CropLayer(object): + + def __init__(self, params, blobs): + self.xstart = 0 + self.xend = 0 + self.ystart = 0 + self.yend = 0 + + def getMemoryShapes(self, inputs): + (inputShape, targetShape) = (inputs[0], inputs[1]) + (batchSize, numChannels) = (inputShape[0], inputShape[1]) + (height, width) = (targetShape[2], targetShape[3]) + self.ystart = (inputShape[2] - targetShape[2]) // 2 + self.xstart = (inputShape[3] - targetShape[3]) // 2 + self.yend = self.ystart + height + self.xend = self.xstart + width + return [[batchSize, numChannels, height, width]] + + def forward(self, inputs): + return [inputs[0][:, :, self.ystart:self.yend, self.xstart:self.xend]] +cv.dnn_registerLayer('Crop', CropLayer) +net = cv.dnn.readNet(cv.samples.findFile(args.prototxt), cv.samples.findFile(args.caffemodel)) +kWinName = 'Holistically-Nested Edge Detection' +cv.namedWindow('Input', cv.WINDOW_NORMAL) +cv.namedWindow(kWinName, cv.WINDOW_NORMAL) +cap = cv.VideoCapture(args.input if args.input else 0) +while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + cv.imshow('Input', frame) + inp = cv.dnn.blobFromImage(frame, scalefactor=1.0, size=(args.width, args.height), mean=(104.00698793, 116.66876762, 122.67891434), swapRB=False, crop=False) + net.setInput(inp) + out = net.forward() + out = out[0, 0] + out = cv.resize(out, (frame.shape[1], frame.shape[0])) + cv.imshow(kWinName, out) + +# File: opencv-master/samples/dnn/face_detect.py +import argparse +import numpy as np +import cv2 as cv + +def str2bool(v): + if v.lower() in ['on', 'yes', 'true', 'y', 't']: + return True + elif v.lower() in ['off', 'no', 'false', 'n', 'f']: + return False + else: + raise NotImplementedError +parser = argparse.ArgumentParser() +parser.add_argument('--image1', '-i1', type=str, help='Path to the input image1. Omit for detecting on default camera.') +parser.add_argument('--image2', '-i2', type=str, help='Path to the input image2. When image1 and image2 parameters given then the program try to find a face on both images and runs face recognition algorithm.') +parser.add_argument('--video', '-v', type=str, help='Path to the input video.') +parser.add_argument('--scale', '-sc', type=float, default=1.0, help='Scale factor used to resize input video frames.') +parser.add_argument('--face_detection_model', '-fd', type=str, default='face_detection_yunet_2021dec.onnx', help='Path to the face detection model. Download the model at https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet') +parser.add_argument('--face_recognition_model', '-fr', type=str, default='face_recognition_sface_2021dec.onnx', help='Path to the face recognition model. Download the model at https://github.com/opencv/opencv_zoo/tree/master/models/face_recognition_sface') +parser.add_argument('--score_threshold', type=float, default=0.9, help='Filtering out faces of score < score_threshold.') +parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.') +parser.add_argument('--top_k', type=int, default=5000, help='Keep top_k bounding boxes before NMS.') +parser.add_argument('--save', '-s', type=str2bool, default=False, help='Set true to save results. This flag is invalid when using camera.') +args = parser.parse_args() + +def visualize(input, faces, fps, thickness=2): + if faces[1] is not None: + for (idx, face) in enumerate(faces[1]): + print('Face {}, top-left coordinates: ({:.0f}, {:.0f}), box width: {:.0f}, box height {:.0f}, score: {:.2f}'.format(idx, face[0], face[1], face[2], face[3], face[-1])) + coords = face[:-1].astype(np.int32) + cv.rectangle(input, (coords[0], coords[1]), (coords[0] + coords[2], coords[1] + coords[3]), (0, 255, 0), thickness) + cv.circle(input, (coords[4], coords[5]), 2, (255, 0, 0), thickness) + cv.circle(input, (coords[6], coords[7]), 2, (0, 0, 255), thickness) + cv.circle(input, (coords[8], coords[9]), 2, (0, 255, 0), thickness) + cv.circle(input, (coords[10], coords[11]), 2, (255, 0, 255), thickness) + cv.circle(input, (coords[12], coords[13]), 2, (0, 255, 255), thickness) + cv.putText(input, 'FPS: {:.2f}'.format(fps), (1, 16), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) +if __name__ == '__main__': + detector = cv.FaceDetectorYN.create(args.face_detection_model, '', (320, 320), args.score_threshold, args.nms_threshold, args.top_k) + tm = cv.TickMeter() + if args.image1 is not None: + img1 = cv.imread(cv.samples.findFile(args.image1)) + img1Width = int(img1.shape[1] * args.scale) + img1Height = int(img1.shape[0] * args.scale) + img1 = cv.resize(img1, (img1Width, img1Height)) + tm.start() + detector.setInputSize((img1Width, img1Height)) + faces1 = detector.detect(img1) + tm.stop() + assert faces1[1] is not None, 'Cannot find a face in {}'.format(args.image1) + visualize(img1, faces1, tm.getFPS()) + if args.save: + print('Results saved to result.jpg\n') + cv.imwrite('result.jpg', img1) + cv.imshow('image1', img1) + if args.image2 is not None: + img2 = cv.imread(cv.samples.findFile(args.image2)) + tm.reset() + tm.start() + detector.setInputSize((img2.shape[1], img2.shape[0])) + faces2 = detector.detect(img2) + tm.stop() + assert faces2[1] is not None, 'Cannot find a face in {}'.format(args.image2) + visualize(img2, faces2, tm.getFPS()) + cv.imshow('image2', img2) + recognizer = cv.FaceRecognizerSF.create(args.face_recognition_model, '') + face1_align = recognizer.alignCrop(img1, faces1[1][0]) + face2_align = recognizer.alignCrop(img2, faces2[1][0]) + face1_feature = recognizer.feature(face1_align) + face2_feature = recognizer.feature(face2_align) + cosine_similarity_threshold = 0.363 + l2_similarity_threshold = 1.128 + cosine_score = recognizer.match(face1_feature, face2_feature, cv.FaceRecognizerSF_FR_COSINE) + l2_score = recognizer.match(face1_feature, face2_feature, cv.FaceRecognizerSF_FR_NORM_L2) + msg = 'different identities' + if cosine_score >= cosine_similarity_threshold: + msg = 'the same identity' + print('They have {}. Cosine Similarity: {}, threshold: {} (higher value means higher similarity, max 1.0).'.format(msg, cosine_score, cosine_similarity_threshold)) + msg = 'different identities' + if l2_score <= l2_similarity_threshold: + msg = 'the same identity' + print('They have {}. NormL2 Distance: {}, threshold: {} (lower value means higher similarity, min 0.0).'.format(msg, l2_score, l2_similarity_threshold)) + cv.waitKey(0) + else: + if args.video is not None: + deviceId = args.video + else: + deviceId = 0 + cap = cv.VideoCapture(deviceId) + frameWidth = int(cap.get(cv.CAP_PROP_FRAME_WIDTH) * args.scale) + frameHeight = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT) * args.scale) + detector.setInputSize([frameWidth, frameHeight]) + while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + print('No frames grabbed!') + break + frame = cv.resize(frame, (frameWidth, frameHeight)) + tm.start() + faces = detector.detect(frame) + tm.stop() + visualize(frame, faces, tm.getFPS()) + cv.imshow('Live', frame) + cv.destroyAllWindows() + +# File: opencv-master/samples/dnn/fast_neural_style.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='This script is used to run style transfer models from https://github.com/onnx/models/tree/main/vision/style_transfer/fast_neural_style using OpenCV') +parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') +parser.add_argument('--model', help='Path to .onnx model') +parser.add_argument('--width', default=-1, type=int, help='Resize input to specific width.') +parser.add_argument('--height', default=-1, type=int, help='Resize input to specific height.') +parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of postprocessing blurring.') +args = parser.parse_args() +net = cv.dnn.readNet(cv.samples.findFile(args.model)) +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +if args.input: + cap = cv.VideoCapture(args.input) +else: + cap = cv.VideoCapture(0) +cv.namedWindow('Styled image', cv.WINDOW_NORMAL) +while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + inWidth = args.width if args.width != -1 else frame.shape[1] + inHeight = args.height if args.height != -1 else frame.shape[0] + inp = cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), swapRB=True, crop=False) + net.setInput(inp) + out = net.forward() + out = out.reshape(3, out.shape[2], out.shape[3]) + out = out.transpose(1, 2, 0) + (t, _) = net.getPerfProfile() + freq = cv.getTickFrequency() / 1000 + print(t / freq, 'ms') + if args.median_filter: + out = cv.medianBlur(out, args.median_filter) + out = np.clip(out, 0, 255) + out = out.astype(np.uint8) + cv.imshow('Styled image', out) + +# File: opencv-master/samples/dnn/human_parsing.py +"""""" +import argparse +import os.path +import numpy as np +import cv2 as cv +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) + +def preprocess(image): + image_rev = np.flip(image, axis=1) + input = cv.dnn.blobFromImages([image, image_rev], mean=(104.00698793, 116.66876762, 122.67891434)) + return input + +def run_net(input, model_path, backend, target): + net = cv.dnn.readNet(model_path) + net.setPreferableBackend(backend) + net.setPreferableTarget(target) + net.setInput(input) + out = net.forward() + return out + +def postprocess(out, input_shape): + (head_output, tail_output) = np.split(out, indices_or_sections=[1], axis=0) + head_output = head_output.squeeze(0) + tail_output = tail_output.squeeze(0) + head_output = np.stack([cv.resize(img, dsize=input_shape) for img in head_output[:, ...]]) + tail_output = np.stack([cv.resize(img, dsize=input_shape) for img in tail_output[:, ...]]) + tail_list = np.split(tail_output, indices_or_sections=list(range(1, 20)), axis=0) + tail_list = [arr.squeeze(0) for arr in tail_list] + tail_list_rev = [tail_list[i] for i in range(14)] + tail_list_rev.extend([tail_list[15], tail_list[14], tail_list[17], tail_list[16], tail_list[19], tail_list[18]]) + tail_output_rev = np.stack(tail_list_rev, axis=0) + tail_output_rev = np.flip(tail_output_rev, axis=2) + raw_output_all = np.mean(np.stack([head_output, tail_output_rev], axis=0), axis=0, keepdims=True) + raw_output_all = np.argmax(raw_output_all, axis=1) + raw_output_all = raw_output_all.transpose(1, 2, 0) + return raw_output_all + +def decode_labels(gray_image): + (height, width, _) = gray_image.shape + colors = [(0, 0, 0), (128, 0, 0), (255, 0, 0), (0, 85, 0), (170, 0, 51), (255, 85, 0), (0, 0, 85), (0, 119, 221), (85, 85, 0), (0, 85, 85), (85, 51, 0), (52, 86, 128), (0, 128, 0), (0, 0, 255), (51, 170, 221), (0, 255, 255), (85, 255, 170), (170, 255, 85), (255, 255, 0), (255, 170, 0)] + segm = np.stack([colors[idx] for idx in gray_image.flatten()]) + segm = segm.reshape(height, width, 3).astype(np.uint8) + segm = cv.cvtColor(segm, cv.COLOR_BGR2RGB) + return segm + +def parse_human(image, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU): + input = preprocess(image) + (input_h, input_w) = input.shape[2:] + output = run_net(input, model_path, backend, target) + grayscale_out = postprocess(output, (input_w, input_h)) + segmentation = decode_labels(grayscale_out) + return segmentation +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--input', '-i', required=True, help='Path to input image.') + parser.add_argument('--model', '-m', default='lip_jppnet_384.pb', help='Path to pb model.') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) + (args, _) = parser.parse_known_args() + if not os.path.isfile(args.model): + raise OSError('Model not exist') + image = cv.imread(args.input) + output = parse_human(image, args.model, args.backend, args.target) + winName = 'Deep learning human parsing in OpenCV' + cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) + cv.imshow(winName, output) + cv.waitKey() + +# File: opencv-master/samples/dnn/mask_rcnn.py +import cv2 as cv +import argparse +import numpy as np +parser = argparse.ArgumentParser(description='Use this script to run Mask-RCNN object detection and semantic segmentation network from TensorFlow Object Detection API.') +parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--model', required=True, help='Path to a .pb file with weights.') +parser.add_argument('--config', required=True, help='Path to a .pxtxt file contains network configuration.') +parser.add_argument('--classes', help='Optional path to a text file with names of classes.') +parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. An every color is represented with three values from 0 to 255 in BGR channels order.') +parser.add_argument('--width', type=int, default=800, help='Preprocess input image by resizing to a specific width.') +parser.add_argument('--height', type=int, default=800, help='Preprocess input image by resizing to a specific height.') +parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold') +args = parser.parse_args() +np.random.seed(324) +classes = None +if args.classes: + with open(args.classes, 'rt') as f: + classes = f.read().rstrip('\n').split('\n') +colors = None +if args.colors: + with open(args.colors, 'rt') as f: + colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')] +legend = None + +def showLegend(classes): + global legend + if not classes is None and legend is None: + blockHeight = 30 + assert len(classes) == len(colors) + legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8) + for i in range(len(classes)): + block = legend[i * blockHeight:(i + 1) * blockHeight] + block[:, :] = colors[i] + cv.putText(block, classes[i], (0, blockHeight // 2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) + cv.namedWindow('Legend', cv.WINDOW_NORMAL) + cv.imshow('Legend', legend) + classes = None + +def drawBox(frame, classId, conf, left, top, right, bottom): + cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0)) + label = '%.2f' % conf + if classes: + assert classId < len(classes) + label = '%s: %s' % (classes[classId], label) + (labelSize, baseLine) = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) + top = max(top, labelSize[1]) + cv.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED) + cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) +net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config)) +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +winName = 'Mask-RCNN in OpenCV' +cv.namedWindow(winName, cv.WINDOW_NORMAL) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) +legend = None +while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + frameH = frame.shape[0] + frameW = frame.shape[1] + blob = cv.dnn.blobFromImage(frame, size=(args.width, args.height), swapRB=True, crop=False) + net.setInput(blob) + (boxes, masks) = net.forward(['detection_out_final', 'detection_masks']) + numClasses = masks.shape[1] + numDetections = boxes.shape[2] + if not colors: + colors = [np.array([0, 0, 0], np.uint8)] + for i in range(1, numClasses + 1): + colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2) + del colors[0] + boxesToDraw = [] + for i in range(numDetections): + box = boxes[0, 0, i] + mask = masks[i] + score = box[2] + if score > args.thr: + classId = int(box[1]) + left = int(frameW * box[3]) + top = int(frameH * box[4]) + right = int(frameW * box[5]) + bottom = int(frameH * box[6]) + left = max(0, min(left, frameW - 1)) + top = max(0, min(top, frameH - 1)) + right = max(0, min(right, frameW - 1)) + bottom = max(0, min(bottom, frameH - 1)) + boxesToDraw.append([frame, classId, score, left, top, right, bottom]) + classMask = mask[classId] + classMask = cv.resize(classMask, (right - left + 1, bottom - top + 1)) + mask = classMask > 0.5 + roi = frame[top:bottom + 1, left:right + 1][mask] + frame[top:bottom + 1, left:right + 1][mask] = (0.7 * colors[classId] + 0.3 * roi).astype(np.uint8) + for box in boxesToDraw: + drawBox(*box) + (t, _) = net.getPerfProfile() + label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) + cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + showLegend(classes) + cv.imshow(winName, frame) + +# File: opencv-master/samples/dnn/mobilenet_ssd_accuracy.py +from __future__ import print_function +import os +import cv2 as cv +import json +import argparse +parser = argparse.ArgumentParser(description='Evaluate MobileNet-SSD model using both TensorFlow and OpenCV. COCO evaluation framework is required: http://cocodataset.org') +parser.add_argument('--weights', required=True, help='Path to frozen_inference_graph.pb of MobileNet-SSD model. Download it from http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz') +parser.add_argument('--prototxt', help='Path to ssd_mobilenet_v1_coco.pbtxt from opencv_extra.', required=True) +parser.add_argument('--images', help='Path to COCO validation images directory.', required=True) +parser.add_argument('--annotations', help='Path to COCO annotations file.', required=True) +args = parser.parse_args() +net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt)) +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +detections = [] +for imgName in os.listdir(args.images): + inp = cv.imread(cv.samples.findFile(os.path.join(args.images, imgName))) + rows = inp.shape[0] + cols = inp.shape[1] + inp = cv.resize(inp, (300, 300)) + net.setInput(cv.dnn.blobFromImage(inp, 1.0 / 127.5, (300, 300), (127.5, 127.5, 127.5), True)) + out = net.forward() + for i in range(out.shape[2]): + score = float(out[0, 0, i, 2]) + classId = int(out[0, 0, i, 1]) + x = out[0, 0, i, 3] * cols + y = out[0, 0, i, 4] * rows + w = out[0, 0, i, 5] * cols - x + h = out[0, 0, i, 6] * rows - y + detections.append({'image_id': int(imgName.rstrip('0')[:imgName.rfind('.')]), 'category_id': classId, 'bbox': [x, y, w, h], 'score': score}) +with open('cv_result.json', 'wt') as f: + json.dump(detections, f) +import tensorflow as tf +with tf.gfile.FastGFile(args.weights) as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) +with tf.Session() as sess: + sess.graph.as_default() + tf.import_graph_def(graph_def, name='') + detections = [] + for imgName in os.listdir(args.images): + inp = cv.imread(os.path.join(args.images, imgName)) + rows = inp.shape[0] + cols = inp.shape[1] + inp = cv.resize(inp, (300, 300)) + inp = inp[:, :, [2, 1, 0]] + out = sess.run([sess.graph.get_tensor_by_name('num_detections:0'), sess.graph.get_tensor_by_name('detection_scores:0'), sess.graph.get_tensor_by_name('detection_boxes:0'), sess.graph.get_tensor_by_name('detection_classes:0')], feed_dict={'image_tensor:0': inp.reshape(1, inp.shape[0], inp.shape[1], 3)}) + num_detections = int(out[0][0]) + for i in range(num_detections): + classId = int(out[3][0][i]) + score = float(out[1][0][i]) + bbox = [float(v) for v in out[2][0][i]] + if score > 0.01: + x = bbox[1] * cols + y = bbox[0] * rows + w = bbox[3] * cols - x + h = bbox[2] * rows - y + detections.append({'image_id': int(imgName.rstrip('0')[:imgName.rfind('.')]), 'category_id': classId, 'bbox': [x, y, w, h], 'score': score}) +with open('tf_result.json', 'wt') as f: + json.dump(detections, f) +import matplotlib.pyplot as plt +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +import numpy as np +import skimage.io as io +import pylab +pylab.rcParams['figure.figsize'] = (10.0, 8.0) +annType = ['segm', 'bbox', 'keypoints'] +annType = annType[1] +prefix = 'person_keypoints' if annType == 'keypoints' else 'instances' +print('Running demo for *%s* results.' % annType) +cocoGt = COCO(args.annotations) +for resFile in ['tf_result.json', 'cv_result.json']: + print(resFile) + cocoDt = cocoGt.loadRes(resFile) + cocoEval = COCOeval(cocoGt, cocoDt, annType) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + +# File: opencv-master/samples/dnn/object_detection.py +import cv2 as cv +import argparse +import numpy as np +import sys +import copy +import time +from threading import Thread +if sys.version_info[0] == 2: + import Queue as queue +else: + import queue +from common import * +from tf_text_graph_common import readTextMessage +from tf_text_graph_ssd import createSSDGraph +from tf_text_graph_faster_rcnn import createFasterRCNNGraph +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') +parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--out_tf_graph', default='graph.pbtxt', help='For models from TensorFlow Object Detection API, you may pass a .config file which was used for training through --config argument. This way an additional .pbtxt file with TensorFlow graph will be created.') +parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet', 'dldt', 'onnx'], help='Optional name of an origin framework of the model. Detect it automatically if it does not set.') +parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold') +parser.add_argument('--nms', type=float, default=0.4, help='Non-maximum suppression threshold') +parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Halide language (http://halide-lang.org/), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) +parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) +parser.add_argument('--async', type=int, default=0, dest='asyncN', help='Number of asynchronous forwards at the same time. Choose 0 for synchronous mode') +(args, _) = parser.parse_known_args() +add_preproc_args(args.zoo, parser, 'object_detection') +parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run object detection deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +args = parser.parse_args() +args.model = findFile(args.model) +args.config = findFile(args.config) +args.classes = findFile(args.classes) +config = readTextMessage(args.config) +if 'model' in config: + print('TensorFlow Object Detection API config detected') + if 'ssd' in config['model'][0]: + print('Preparing text graph representation for SSD model: ' + args.out_tf_graph) + createSSDGraph(args.model, args.config, args.out_tf_graph) + args.config = args.out_tf_graph + elif 'faster_rcnn' in config['model'][0]: + print('Preparing text graph representation for Faster-RCNN model: ' + args.out_tf_graph) + createFasterRCNNGraph(args.model, args.config, args.out_tf_graph) + args.config = args.out_tf_graph +classes = None +if args.classes: + with open(args.classes, 'rt') as f: + classes = f.read().rstrip('\n').split('\n') +net = cv.dnn.readNet(args.model, args.config, args.framework) +net.setPreferableBackend(args.backend) +net.setPreferableTarget(args.target) +outNames = net.getUnconnectedOutLayersNames() +confThreshold = args.thr +nmsThreshold = args.nms + +def postprocess(frame, outs): + frameHeight = frame.shape[0] + frameWidth = frame.shape[1] + + def drawPred(classId, conf, left, top, right, bottom): + cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0)) + label = '%.2f' % conf + if classes: + assert classId < len(classes) + label = '%s: %s' % (classes[classId], label) + (labelSize, baseLine) = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) + top = max(top, labelSize[1]) + cv.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED) + cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) + layerNames = net.getLayerNames() + lastLayerId = net.getLayerId(layerNames[-1]) + lastLayer = net.getLayer(lastLayerId) + classIds = [] + confidences = [] + boxes = [] + if lastLayer.type == 'DetectionOutput': + for out in outs: + for detection in out[0, 0]: + confidence = detection[2] + if confidence > confThreshold: + left = int(detection[3]) + top = int(detection[4]) + right = int(detection[5]) + bottom = int(detection[6]) + width = right - left + 1 + height = bottom - top + 1 + if width <= 2 or height <= 2: + left = int(detection[3] * frameWidth) + top = int(detection[4] * frameHeight) + right = int(detection[5] * frameWidth) + bottom = int(detection[6] * frameHeight) + width = right - left + 1 + height = bottom - top + 1 + classIds.append(int(detection[1]) - 1) + confidences.append(float(confidence)) + boxes.append([left, top, width, height]) + elif lastLayer.type == 'Region' or args.postprocessing == 'yolov8': + if args.postprocessing == 'yolov8': + box_scale_w = frameWidth / args.width + box_scale_h = frameHeight / args.height + else: + box_scale_w = frameWidth + box_scale_h = frameHeight + for out in outs: + if args.postprocessing == 'yolov8': + out = out[0].transpose(1, 0) + for detection in out: + scores = detection[4:] + if args.background_label_id >= 0: + scores = np.delete(scores, args.background_label_id) + classId = np.argmax(scores) + confidence = scores[classId] + if confidence > confThreshold: + center_x = int(detection[0] * box_scale_w) + center_y = int(detection[1] * box_scale_h) + width = int(detection[2] * box_scale_w) + height = int(detection[3] * box_scale_h) + left = int(center_x - width / 2) + top = int(center_y - height / 2) + classIds.append(classId) + confidences.append(float(confidence)) + boxes.append([left, top, width, height]) + else: + print('Unknown output layer type: ' + lastLayer.type) + exit() + if len(outNames) > 1 or ((lastLayer.type == 'Region' or args.postprocessing == 'yolov8') and args.backend != cv.dnn.DNN_BACKEND_OPENCV): + indices = [] + classIds = np.array(classIds) + boxes = np.array(boxes) + confidences = np.array(confidences) + unique_classes = set(classIds) + for cl in unique_classes: + class_indices = np.where(classIds == cl)[0] + conf = confidences[class_indices] + box = boxes[class_indices].tolist() + nms_indices = cv.dnn.NMSBoxes(box, conf, confThreshold, nmsThreshold) + indices.extend(class_indices[nms_indices]) + else: + indices = np.arange(0, len(classIds)) + for i in indices: + box = boxes[i] + left = box[0] + top = box[1] + width = box[2] + height = box[3] + drawPred(classIds[i], confidences[i], left, top, left + width, top + height) +winName = 'Deep learning object detection in OpenCV' +cv.namedWindow(winName, cv.WINDOW_NORMAL) + +def callback(pos): + global confThreshold + confThreshold = pos / 100.0 +cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) + +class QueueFPS(queue.Queue): + + def __init__(self): + queue.Queue.__init__(self) + self.startTime = 0 + self.counter = 0 + + def put(self, v): + queue.Queue.put(self, v) + self.counter += 1 + if self.counter == 1: + self.startTime = time.time() + + def getFPS(self): + return self.counter / (time.time() - self.startTime) +process = True +framesQueue = QueueFPS() + +def framesThreadBody(): + global framesQueue, process + while process: + (hasFrame, frame) = cap.read() + if not hasFrame: + break + framesQueue.put(frame) +processedFramesQueue = queue.Queue() +predictionsQueue = QueueFPS() + +def processingThreadBody(): + global processedFramesQueue, predictionsQueue, args, process + futureOutputs = [] + while process: + frame = None + try: + frame = framesQueue.get_nowait() + if args.asyncN: + if len(futureOutputs) == args.asyncN: + frame = None + else: + framesQueue.queue.clear() + except queue.Empty: + pass + if not frame is None: + frameHeight = frame.shape[0] + frameWidth = frame.shape[1] + inpWidth = args.width if args.width else frameWidth + inpHeight = args.height if args.height else frameHeight + blob = cv.dnn.blobFromImage(frame, size=(inpWidth, inpHeight), swapRB=args.rgb, ddepth=cv.CV_8U) + processedFramesQueue.put(frame) + net.setInput(blob, scalefactor=args.scale, mean=args.mean) + if net.getLayer(0).outputNameToIndex('im_info') != -1: + frame = cv.resize(frame, (inpWidth, inpHeight)) + net.setInput(np.array([[inpHeight, inpWidth, 1.6]], dtype=np.float32), 'im_info') + if args.asyncN: + futureOutputs.append(net.forwardAsync()) + else: + outs = net.forward(outNames) + predictionsQueue.put(copy.deepcopy(outs)) + while futureOutputs and futureOutputs[0].wait_for(0): + out = futureOutputs[0].get() + predictionsQueue.put(copy.deepcopy([out])) + del futureOutputs[0] +framesThread = Thread(target=framesThreadBody) +framesThread.start() +processingThread = Thread(target=processingThreadBody) +processingThread.start() +while cv.waitKey(1) < 0: + try: + outs = predictionsQueue.get_nowait() + frame = processedFramesQueue.get_nowait() + postprocess(frame, outs) + if predictionsQueue.counter > 1: + label = 'Camera: %.2f FPS' % framesQueue.getFPS() + cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + label = 'Network: %.2f FPS' % predictionsQueue.getFPS() + cv.putText(frame, label, (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + label = 'Skipped frames: %d' % (framesQueue.counter - predictionsQueue.counter) + cv.putText(frame, label, (0, 45), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + cv.imshow(winName, frame) + except queue.Empty: + pass +process = False +framesThread.join() +processingThread.join() + +# File: opencv-master/samples/dnn/openpose.py +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='This script is used to demonstrate OpenPose human pose estimation network from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. The sample and model are simplified and could be used for a single person on the frame.') +parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') +parser.add_argument('--proto', help='Path to .prototxt') +parser.add_argument('--model', help='Path to .caffemodel') +parser.add_argument('--dataset', help='Specify what kind of model was trained. It could be (COCO, MPI, HAND) depends on dataset.') +parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map') +parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.') +parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.') +parser.add_argument('--scale', default=0.003922, type=float, help='Scale for blob.') +args = parser.parse_args() +if args.dataset == 'COCO': + BODY_PARTS = {'Nose': 0, 'Neck': 1, 'RShoulder': 2, 'RElbow': 3, 'RWrist': 4, 'LShoulder': 5, 'LElbow': 6, 'LWrist': 7, 'RHip': 8, 'RKnee': 9, 'RAnkle': 10, 'LHip': 11, 'LKnee': 12, 'LAnkle': 13, 'REye': 14, 'LEye': 15, 'REar': 16, 'LEar': 17, 'Background': 18} + POSE_PAIRS = [['Neck', 'RShoulder'], ['Neck', 'LShoulder'], ['RShoulder', 'RElbow'], ['RElbow', 'RWrist'], ['LShoulder', 'LElbow'], ['LElbow', 'LWrist'], ['Neck', 'RHip'], ['RHip', 'RKnee'], ['RKnee', 'RAnkle'], ['Neck', 'LHip'], ['LHip', 'LKnee'], ['LKnee', 'LAnkle'], ['Neck', 'Nose'], ['Nose', 'REye'], ['REye', 'REar'], ['Nose', 'LEye'], ['LEye', 'LEar']] +elif args.dataset == 'MPI': + BODY_PARTS = {'Head': 0, 'Neck': 1, 'RShoulder': 2, 'RElbow': 3, 'RWrist': 4, 'LShoulder': 5, 'LElbow': 6, 'LWrist': 7, 'RHip': 8, 'RKnee': 9, 'RAnkle': 10, 'LHip': 11, 'LKnee': 12, 'LAnkle': 13, 'Chest': 14, 'Background': 15} + POSE_PAIRS = [['Head', 'Neck'], ['Neck', 'RShoulder'], ['RShoulder', 'RElbow'], ['RElbow', 'RWrist'], ['Neck', 'LShoulder'], ['LShoulder', 'LElbow'], ['LElbow', 'LWrist'], ['Neck', 'Chest'], ['Chest', 'RHip'], ['RHip', 'RKnee'], ['RKnee', 'RAnkle'], ['Chest', 'LHip'], ['LHip', 'LKnee'], ['LKnee', 'LAnkle']] +elif args.dataset == 'HAND': + BODY_PARTS = {'Wrist': 0, 'ThumbMetacarpal': 1, 'ThumbProximal': 2, 'ThumbMiddle': 3, 'ThumbDistal': 4, 'IndexFingerMetacarpal': 5, 'IndexFingerProximal': 6, 'IndexFingerMiddle': 7, 'IndexFingerDistal': 8, 'MiddleFingerMetacarpal': 9, 'MiddleFingerProximal': 10, 'MiddleFingerMiddle': 11, 'MiddleFingerDistal': 12, 'RingFingerMetacarpal': 13, 'RingFingerProximal': 14, 'RingFingerMiddle': 15, 'RingFingerDistal': 16, 'LittleFingerMetacarpal': 17, 'LittleFingerProximal': 18, 'LittleFingerMiddle': 19, 'LittleFingerDistal': 20} + POSE_PAIRS = [['Wrist', 'ThumbMetacarpal'], ['ThumbMetacarpal', 'ThumbProximal'], ['ThumbProximal', 'ThumbMiddle'], ['ThumbMiddle', 'ThumbDistal'], ['Wrist', 'IndexFingerMetacarpal'], ['IndexFingerMetacarpal', 'IndexFingerProximal'], ['IndexFingerProximal', 'IndexFingerMiddle'], ['IndexFingerMiddle', 'IndexFingerDistal'], ['Wrist', 'MiddleFingerMetacarpal'], ['MiddleFingerMetacarpal', 'MiddleFingerProximal'], ['MiddleFingerProximal', 'MiddleFingerMiddle'], ['MiddleFingerMiddle', 'MiddleFingerDistal'], ['Wrist', 'RingFingerMetacarpal'], ['RingFingerMetacarpal', 'RingFingerProximal'], ['RingFingerProximal', 'RingFingerMiddle'], ['RingFingerMiddle', 'RingFingerDistal'], ['Wrist', 'LittleFingerMetacarpal'], ['LittleFingerMetacarpal', 'LittleFingerProximal'], ['LittleFingerProximal', 'LittleFingerMiddle'], ['LittleFingerMiddle', 'LittleFingerDistal']] +else: + raise Exception("you need to specify either 'COCO', 'MPI', or 'Hand' in args.dataset") +inWidth = args.width +inHeight = args.height +inScale = args.scale +net = cv.dnn.readNet(cv.samples.findFile(args.proto), cv.samples.findFile(args.model)) +cap = cv.VideoCapture(args.input if args.input else 0) +while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + frameWidth = frame.shape[1] + frameHeight = frame.shape[0] + inp = cv.dnn.blobFromImage(frame, inScale, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False) + net.setInput(inp) + out = net.forward() + assert len(BODY_PARTS) <= out.shape[1] + points = [] + for i in range(len(BODY_PARTS)): + heatMap = out[0, i, :, :] + (_, conf, _, point) = cv.minMaxLoc(heatMap) + x = frameWidth * point[0] / out.shape[3] + y = frameHeight * point[1] / out.shape[2] + points.append((int(x), int(y)) if conf > args.thr else None) + for pair in POSE_PAIRS: + partFrom = pair[0] + partTo = pair[1] + assert partFrom in BODY_PARTS + assert partTo in BODY_PARTS + idFrom = BODY_PARTS[partFrom] + idTo = BODY_PARTS[partTo] + if points[idFrom] and points[idTo]: + cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) + cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) + cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) + (t, _) = net.getPerfProfile() + freq = cv.getTickFrequency() / 1000 + cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) + cv.imshow('OpenPose using OpenCV', frame) + +# File: opencv-master/samples/dnn/optical_flow.py +"""""" +import argparse +import os.path +import numpy as np +import cv2 as cv + +class OpticalFlow(object): + + def __init__(self, model, height, width, proto=''): + if proto: + self.net = cv.dnn.readNetFromCaffe(proto, model) + else: + self.net = cv.dnn.readNet(model) + self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) + self.height = height + self.width = width + + def compute_flow(self, first_img, second_img): + inp0 = cv.dnn.blobFromImage(first_img, size=(self.width, self.height)) + inp1 = cv.dnn.blobFromImage(second_img, size=(self.width, self.height)) + self.net.setInputsNames(['img0', 'img1']) + self.net.setInput(inp0, 'img0') + self.net.setInput(inp1, 'img1') + flow = self.net.forward() + output = self.motion_to_color(flow) + return output + + def motion_to_color(self, flow): + arr = np.arange(0, 255, dtype=np.uint8) + colormap = cv.applyColorMap(arr, cv.COLORMAP_HSV) + colormap = colormap.squeeze(1) + flow = flow.squeeze(0) + (fx, fy) = (flow[0, ...], flow[1, ...]) + rad = np.sqrt(fx ** 2 + fy ** 2) + maxrad = rad.max() if rad.max() != 0 else 1 + ncols = arr.size + rad = rad[..., np.newaxis] / maxrad + a = np.arctan2(-fy / maxrad, -fx / maxrad) / np.pi + fk = (a + 1) / 2.0 * (ncols - 1) + k0 = fk.astype(np.int32) + k1 = (k0 + 1) % ncols + f = fk[..., np.newaxis] - k0[..., np.newaxis] + col0 = colormap[k0] / 255.0 + col1 = colormap[k1] / 255.0 + col = (1 - f) * col0 + f * col1 + col = np.where(rad <= 1, 1 - rad * (1 - col), col * 0.75) + output = (255.0 * col).astype(np.uint8) + return output +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Use this script to calculate optical flow', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-input', '-i', required=True, help='Path to input video file. Skip this argument to capture frames from a camera.') + parser.add_argument('--height', default=320, type=int, help='Input height') + parser.add_argument('--width', default=448, type=int, help='Input width') + parser.add_argument('--proto', '-p', default='', help='Path to prototxt.') + parser.add_argument('--model', '-m', required=True, help='Path to model.') + (args, _) = parser.parse_known_args() + if not os.path.isfile(args.model): + raise OSError('Model does not exist') + if args.proto and (not os.path.isfile(args.proto)): + raise OSError('Prototxt does not exist') + winName = 'Calculation optical flow in OpenCV' + cv.namedWindow(winName, cv.WINDOW_NORMAL) + cap = cv.VideoCapture(args.input if args.input else 0) + (hasFrame, first_frame) = cap.read() + if args.proto: + divisor = 64.0 + var = {} + var['ADAPTED_WIDTH'] = int(np.ceil(args.width / divisor) * divisor) + var['ADAPTED_HEIGHT'] = int(np.ceil(args.height / divisor) * divisor) + var['SCALE_WIDTH'] = args.width / float(var['ADAPTED_WIDTH']) + var['SCALE_HEIGHT'] = args.height / float(var['ADAPTED_HEIGHT']) + config = '' + proto = open(args.proto).readlines() + for line in proto: + for (key, value) in var.items(): + tag = '$%s$' % key + line = line.replace(tag, str(value)) + config += line + caffemodel = open(args.model, 'rb').read() + opt_flow = OpticalFlow(caffemodel, var['ADAPTED_HEIGHT'], var['ADAPTED_WIDTH'], bytearray(config.encode())) + else: + opt_flow = OpticalFlow(args.model, 360, 480) + while cv.waitKey(1) < 0: + (hasFrame, second_frame) = cap.read() + if not hasFrame: + break + flow = opt_flow.compute_flow(first_frame, second_frame) + first_frame = second_frame + cv.imshow(winName, flow) + +# File: opencv-master/samples/dnn/person_reid.py +"""""" +import argparse +import os.path +import numpy as np +import cv2 as cv +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) +MEAN = (0.485, 0.456, 0.406) +STD = (0.229, 0.224, 0.225) + +def preprocess(images, height, width): + img_list = [] + for image in images: + image = cv.resize(image, (width, height)) + img_list.append(image[:, :, ::-1]) + images = np.array(img_list) + images = (images / 255.0 - MEAN) / STD + input = cv.dnn.blobFromImages(images.astype(np.float32), ddepth=cv.CV_32F) + return input + +def extract_feature(img_dir, model_path, batch_size=32, resize_h=384, resize_w=128, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU): + feat_list = [] + path_list = os.listdir(img_dir) + path_list = [os.path.join(img_dir, img_name) for img_name in path_list] + count = 0 + for i in range(0, len(path_list), batch_size): + print('Feature Extraction for images in', img_dir, 'Batch:', count, '/', len(path_list)) + batch = path_list[i:min(i + batch_size, len(path_list))] + imgs = read_data(batch) + inputs = preprocess(imgs, resize_h, resize_w) + feat = run_net(inputs, model_path, backend, target) + feat_list.append(feat) + count += batch_size + feats = np.concatenate(feat_list, axis=0) + return (feats, path_list) + +def run_net(inputs, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU): + net = cv.dnn.readNet(model_path) + net.setPreferableBackend(backend) + net.setPreferableTarget(target) + net.setInput(inputs) + out = net.forward() + out = np.reshape(out, (out.shape[0], out.shape[1])) + return out + +def read_data(path_list): + img_list = [] + for img_path in path_list: + img = cv.imread(img_path) + if img is None: + continue + img_list.append(img) + return img_list + +def normalize(nparray, order=2, axis=0): + norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True) + return nparray / (norm + np.finfo(np.float32).eps) + +def similarity(array1, array2): + array1 = normalize(array1, axis=1) + array2 = normalize(array2, axis=1) + dist = np.matmul(array1, array2.T) + return dist + +def topk(query_feat, gallery_feat, topk=5): + sim = similarity(query_feat, gallery_feat) + index = np.argsort(-sim, axis=1) + return [i[0:int(topk)] for i in index] + +def drawRankList(query_name, gallery_list, output_size=(128, 384)): + + def addBorder(im, color): + bordersize = 5 + border = cv.copyMakeBorder(im, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType=cv.BORDER_CONSTANT, value=color) + return border + query_img = cv.imread(query_name) + query_img = cv.resize(query_img, output_size) + query_img = addBorder(query_img, [0, 0, 0]) + cv.putText(query_img, 'Query', (10, 30), cv.FONT_HERSHEY_COMPLEX, 1.0, (0, 255, 0), 2) + gallery_img_list = [] + for (i, gallery_name) in enumerate(gallery_list): + gallery_img = cv.imread(gallery_name) + gallery_img = cv.resize(gallery_img, output_size) + gallery_img = addBorder(gallery_img, [255, 255, 255]) + cv.putText(gallery_img, 'G%02d' % i, (10, 30), cv.FONT_HERSHEY_COMPLEX, 1.0, (0, 255, 0), 2) + gallery_img_list.append(gallery_img) + ret = np.concatenate([query_img] + gallery_img_list, axis=1) + return ret + +def visualization(topk_idx, query_names, gallery_names, output_dir='vis'): + if not os.path.exists(output_dir): + os.mkdir(output_dir) + for (i, idx) in enumerate(topk_idx): + query_name = query_names[i] + topk_names = [gallery_names[j] for j in idx] + vis_img = drawRankList(query_name, topk_names) + output_path = os.path.join(output_dir, '%03d_%s' % (i, os.path.basename(query_name))) + cv.imwrite(output_path, vis_img) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--query_dir', '-q', required=True, help='Path to query image.') + parser.add_argument('--gallery_dir', '-g', required=True, help='Path to gallery directory.') + parser.add_argument('--resize_h', default=256, help='The height of the input for model inference.') + parser.add_argument('--resize_w', default=128, help='The width of the input for model inference') + parser.add_argument('--model', '-m', default='reid.onnx', help='Path to pb model.') + parser.add_argument('--visualization_dir', default='vis', help='Path for the visualization results') + parser.add_argument('--topk', default=10, help='Number of images visualized in the rank list') + parser.add_argument('--batchsize', default=32, help='The batch size of each inference') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA backend" % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA FP16' % targets) + (args, _) = parser.parse_known_args() + if not os.path.isfile(args.model): + raise OSError('Model not exist') + (query_feat, query_names) = extract_feature(args.query_dir, args.model, args.batchsize, args.resize_h, args.resize_w, args.backend, args.target) + (gallery_feat, gallery_names) = extract_feature(args.gallery_dir, args.model, args.batchsize, args.resize_h, args.resize_w, args.backend, args.target) + topk_idx = topk(query_feat, gallery_feat, args.topk) + visualization(topk_idx, query_names, gallery_names, output_dir=args.visualization_dir) + +# File: opencv-master/samples/dnn/segmentation.py +import cv2 as cv +import argparse +import numpy as np +import sys +from common import * +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), help='An optional path to file with preprocessing parameters.') +parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet', 'onnx'], help='Optional name of an origin framework of the model. Detect it automatically if it does not set.') +parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. An every color is represented with three values from 0 to 255 in BGR channels order.') +parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Halide language (http://halide-lang.org/), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) +parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) +(args, _) = parser.parse_known_args() +add_preproc_args(args.zoo, parser, 'segmentation') +parser = argparse.ArgumentParser(parents=[parser], description='Use this script to run semantic segmentation deep learning networks using OpenCV.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +args = parser.parse_args() +args.model = findFile(args.model) +args.config = findFile(args.config) +args.classes = findFile(args.classes) +np.random.seed(324) +classes = None +if args.classes: + with open(args.classes, 'rt') as f: + classes = f.read().rstrip('\n').split('\n') +colors = None +if args.colors: + with open(args.colors, 'rt') as f: + colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')] +legend = None + +def showLegend(classes): + global legend + if not classes is None and legend is None: + blockHeight = 30 + assert len(classes) == len(colors) + legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8) + for i in range(len(classes)): + block = legend[i * blockHeight:(i + 1) * blockHeight] + block[:, :] = colors[i] + cv.putText(block, classes[i], (0, blockHeight // 2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) + cv.namedWindow('Legend', cv.WINDOW_NORMAL) + cv.imshow('Legend', legend) + classes = None +net = cv.dnn.readNet(args.model, args.config, args.framework) +net.setPreferableBackend(args.backend) +net.setPreferableTarget(args.target) +winName = 'Deep learning semantic segmentation in OpenCV' +cv.namedWindow(winName, cv.WINDOW_NORMAL) +cap = cv.VideoCapture(args.input if args.input else 0) +legend = None +while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + frameHeight = frame.shape[0] + frameWidth = frame.shape[1] + inpWidth = args.width if args.width else frameWidth + inpHeight = args.height if args.height else frameHeight + blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False) + net.setInput(blob) + score = net.forward() + numClasses = score.shape[1] + height = score.shape[2] + width = score.shape[3] + if not colors: + colors = [np.array([0, 0, 0], np.uint8)] + for i in range(1, numClasses): + colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2) + classIds = np.argmax(score[0], axis=0) + segm = np.stack([colors[idx] for idx in classIds.flatten()]) + segm = segm.reshape(height, width, 3) + segm = cv.resize(segm, (frameWidth, frameHeight), interpolation=cv.INTER_NEAREST) + frame = (0.1 * frame + 0.9 * segm).astype(np.uint8) + (t, _) = net.getPerfProfile() + label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) + cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + showLegend(classes) + cv.imshow(winName, frame) + +# File: opencv-master/samples/dnn/shrink_tf_graph_weights.py +import tensorflow as tf +import struct +import argparse +import numpy as np +parser = argparse.ArgumentParser(description='Convert weights of a frozen TensorFlow graph to fp16.') +parser.add_argument('--input', required=True, help='Path to frozen graph.') +parser.add_argument('--output', required=True, help='Path to output graph.') +parser.add_argument('--ops', default=['Conv2D', 'MatMul'], nargs='+', help='List of ops which weights are converted.') +args = parser.parse_args() +DT_FLOAT = 1 +DT_HALF = 19 +with tf.gfile.FastGFile(args.input) as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) +inputs = [] +for node in graph_def.node: + if node.op in args.ops: + inputs += node.input +weightsNodes = [] +for node in graph_def.node: + if node.name in inputs and node.op == 'Identity' and (node.attr['T'].type == DT_FLOAT): + weightsNodes.append(node.input[0]) + node.op = 'Cast' + node.attr['DstT'].type = DT_FLOAT + node.attr['SrcT'].type = DT_HALF + del node.attr['T'] + del node.attr['_class'] +for node in graph_def.node: + if node.name in weightsNodes: + node.attr['dtype'].type = DT_HALF + node.attr['value'].tensor.dtype = DT_HALF + floats = node.attr['value'].tensor.tensor_content + floats = struct.unpack('f' * (len(floats) / 4), floats) + halfs = np.array(floats).astype(np.float16).view(np.uint16) + node.attr['value'].tensor.tensor_content = struct.pack('H' * len(halfs), *halfs) +tf.train.write_graph(graph_def, '', args.output, as_text=False) + +# File: opencv-master/samples/dnn/siamrpnpp.py +import argparse +import cv2 as cv +import numpy as np +import os +'' + +class ModelBuilder: + + def __init__(self, target_net, search_net, rpn_head): + super(ModelBuilder, self).__init__() + self.target_net = target_net + self.search_net = search_net + self.rpn_head = rpn_head + + def template(self, z): + self.target_net.setInput(z) + outNames = self.target_net.getUnconnectedOutLayersNames() + (self.zfs_1, self.zfs_2, self.zfs_3) = self.target_net.forward(outNames) + + def track(self, x): + self.search_net.setInput(x) + outNames = self.search_net.getUnconnectedOutLayersNames() + (xfs_1, xfs_2, xfs_3) = self.search_net.forward(outNames) + self.rpn_head.setInput(np.stack([self.zfs_1, self.zfs_2, self.zfs_3]), 'input_1') + self.rpn_head.setInput(np.stack([xfs_1, xfs_2, xfs_3]), 'input_2') + outNames = self.rpn_head.getUnconnectedOutLayersNames() + (cls, loc) = self.rpn_head.forward(outNames) + return {'cls': cls, 'loc': loc} + +class Anchors: + + def __init__(self, stride, ratios, scales, image_center=0, size=0): + self.stride = stride + self.ratios = ratios + self.scales = scales + self.image_center = image_center + self.size = size + self.anchor_num = len(self.scales) * len(self.ratios) + self.anchors = self.generate_anchors() + + def generate_anchors(self): + anchors = np.zeros((self.anchor_num, 4), dtype=np.float32) + size = self.stride ** 2 + count = 0 + for r in self.ratios: + ws = int(np.sqrt(size * 1.0 / r)) + hs = int(ws * r) + for s in self.scales: + w = ws * s + h = hs * s + anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:] + count += 1 + return anchors + +class SiamRPNTracker: + + def __init__(self, model): + super(SiamRPNTracker, self).__init__() + self.anchor_stride = 8 + self.anchor_ratios = [0.33, 0.5, 1, 2, 3] + self.anchor_scales = [8] + self.track_base_size = 8 + self.track_context_amount = 0.5 + self.track_exemplar_size = 127 + self.track_instance_size = 255 + self.track_lr = 0.4 + self.track_penalty_k = 0.04 + self.track_window_influence = 0.44 + self.score_size = (self.track_instance_size - self.track_exemplar_size) // self.anchor_stride + 1 + self.track_base_size + self.anchor_num = len(self.anchor_ratios) * len(self.anchor_scales) + hanning = np.hanning(self.score_size) + window = np.outer(hanning, hanning) + self.window = np.tile(window.flatten(), self.anchor_num) + self.anchors = self.generate_anchor(self.score_size) + self.model = model + + def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): + if isinstance(pos, float): + pos = [pos, pos] + sz = original_sz + (im_h, im_w, im_d) = im.shape + c = (original_sz + 1) / 2 + (cx, cy) = pos + context_xmin = np.floor(cx - c + 0.5) + context_xmax = context_xmin + sz - 1 + context_ymin = np.floor(cy - c + 0.5) + context_ymax = context_ymin + sz - 1 + left_pad = int(max(0.0, -context_xmin)) + top_pad = int(max(0.0, -context_ymin)) + right_pad = int(max(0.0, context_xmax - im_w + 1)) + bottom_pad = int(max(0.0, context_ymax - im_h + 1)) + context_xmin += left_pad + context_xmax += left_pad + context_ymin += top_pad + context_ymax += top_pad + if any([top_pad, bottom_pad, left_pad, right_pad]): + size = (im_h + top_pad + bottom_pad, im_w + left_pad + right_pad, im_d) + te_im = np.zeros(size, np.uint8) + te_im[top_pad:top_pad + im_h, left_pad:left_pad + im_w, :] = im + if top_pad: + te_im[0:top_pad, left_pad:left_pad + im_w, :] = avg_chans + if bottom_pad: + te_im[im_h + top_pad:, left_pad:left_pad + im_w, :] = avg_chans + if left_pad: + te_im[:, 0:left_pad, :] = avg_chans + if right_pad: + te_im[:, im_w + left_pad:, :] = avg_chans + im_patch = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] + else: + im_patch = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] + if not np.array_equal(model_sz, original_sz): + im_patch = cv.resize(im_patch, (model_sz, model_sz)) + im_patch = im_patch.transpose(2, 0, 1) + im_patch = im_patch[np.newaxis, :, :, :] + im_patch = im_patch.astype(np.float32) + return im_patch + + def generate_anchor(self, score_size): + anchors = Anchors(self.anchor_stride, self.anchor_ratios, self.anchor_scales) + anchor = anchors.anchors + (x1, y1, x2, y2) = (anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]) + anchor = np.stack([(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1], 1) + total_stride = anchors.stride + anchor_num = anchors.anchor_num + anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4)) + ori = -(score_size // 2) * total_stride + (xx, yy) = np.meshgrid([ori + total_stride * dx for dx in range(score_size)], [ori + total_stride * dy for dy in range(score_size)]) + (xx, yy) = (np.tile(xx.flatten(), (anchor_num, 1)).flatten(), np.tile(yy.flatten(), (anchor_num, 1)).flatten()) + (anchor[:, 0], anchor[:, 1]) = (xx.astype(np.float32), yy.astype(np.float32)) + return anchor + + def _convert_bbox(self, delta, anchor): + delta_transpose = np.transpose(delta, (1, 2, 3, 0)) + delta_contig = np.ascontiguousarray(delta_transpose) + delta = delta_contig.reshape(4, -1) + delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0] + delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1] + delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2] + delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3] + return delta + + def _softmax(self, x): + x = x.astype(dtype=np.float32) + x_max = x.max(axis=1)[:, np.newaxis] + e_x = np.exp(x - x_max) + div = np.sum(e_x, axis=1)[:, np.newaxis] + y = e_x / div + return y + + def _convert_score(self, score): + score_transpose = np.transpose(score, (1, 2, 3, 0)) + score_con = np.ascontiguousarray(score_transpose) + score_view = score_con.reshape(2, -1) + score = np.transpose(score_view, (1, 0)) + score = self._softmax(score) + return score[:, 1] + + def _bbox_clip(self, cx, cy, width, height, boundary): + (bbox_h, bbox_w) = boundary + cx = max(0, min(cx, bbox_w)) + cy = max(0, min(cy, bbox_h)) + width = max(10, min(width, bbox_w)) + height = max(10, min(height, bbox_h)) + return (cx, cy, width, height) + + def init(self, img, bbox): + (x, y, w, h) = bbox + self.center_pos = np.array([x + (w - 1) / 2, y + (h - 1) / 2]) + self.h = h + self.w = w + w_z = self.w + self.track_context_amount * np.add(h, w) + h_z = self.h + self.track_context_amount * np.add(h, w) + s_z = round(np.sqrt(w_z * h_z)) + self.channel_average = np.mean(img, axis=(0, 1)) + z_crop = self.get_subwindow(img, self.center_pos, self.track_exemplar_size, s_z, self.channel_average) + self.model.template(z_crop) + + def track(self, img): + w_z = self.w + self.track_context_amount * np.add(self.w, self.h) + h_z = self.h + self.track_context_amount * np.add(self.w, self.h) + s_z = np.sqrt(w_z * h_z) + scale_z = self.track_exemplar_size / s_z + s_x = s_z * (self.track_instance_size / self.track_exemplar_size) + x_crop = self.get_subwindow(img, self.center_pos, self.track_instance_size, round(s_x), self.channel_average) + outputs = self.model.track(x_crop) + score = self._convert_score(outputs['cls']) + pred_bbox = self._convert_bbox(outputs['loc'], self.anchors) + + def change(r): + return np.maximum(r, 1.0 / r) + + def sz(w, h): + pad = (w + h) * 0.5 + return np.sqrt((w + pad) * (h + pad)) + s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) / sz(self.w * scale_z, self.h * scale_z)) + r_c = change(self.w / self.h / (pred_bbox[2, :] / pred_bbox[3, :])) + penalty = np.exp(-(r_c * s_c - 1) * self.track_penalty_k) + pscore = penalty * score + pscore = pscore * (1 - self.track_window_influence) + self.window * self.track_window_influence + best_idx = np.argmax(pscore) + bbox = pred_bbox[:, best_idx] / scale_z + lr = penalty[best_idx] * score[best_idx] * self.track_lr + (cpx, cpy) = self.center_pos + (x, y, w, h) = bbox + cx = x + cpx + cy = y + cpy + width = self.w * (1 - lr) + w * lr + height = self.h * (1 - lr) + h * lr + (cx, cy, width, height) = self._bbox_clip(cx, cy, width, height, img.shape[:2]) + self.center_pos = np.array([cx, cy]) + self.w = width + self.h = height + bbox = [cx - width / 2, cy - height / 2, width, height] + best_score = score[best_idx] + return {'bbox': bbox, 'best_score': best_score} + +def get_frames(video_name): + cap = cv.VideoCapture(video_name if video_name else 0) + while True: + (ret, frame) = cap.read() + if ret: + yield frame + else: + break + +def main(): + backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) + targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) + parser = argparse.ArgumentParser(description='Use this script to run SiamRPN++ Visual Tracker', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--input_video', type=str, help='Path to input video file. Skip this argument to capture frames from a camera.') + parser.add_argument('--target_net', type=str, default='target_net.onnx', help='Path to part of SiamRPN++ ran on target frame.') + parser.add_argument('--search_net', type=str, default='search_net.onnx', help='Path to part of SiamRPN++ ran on search frame.') + parser.add_argument('--rpn_head', type=str, default='rpn_head.onnx', help='Path to RPN Head ONNX model.') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Select a computation backend: %d: automatically (by default), %d: Halide, %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV Implementation, %d: VKCOM, %d: CUDA" % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Select a target device: %d: CPU target (by default), %d: OpenCL, %d: OpenCL FP16, %d: Myriad, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) + (args, _) = parser.parse_known_args() + if args.input_video and (not os.path.isfile(args.input_video)): + raise OSError('Input video file does not exist') + if not os.path.isfile(args.target_net): + raise OSError('Target Net does not exist') + if not os.path.isfile(args.search_net): + raise OSError('Search Net does not exist') + if not os.path.isfile(args.rpn_head): + raise OSError('RPN Head Net does not exist') + target_net = cv.dnn.readNetFromONNX(args.target_net) + target_net.setPreferableBackend(args.backend) + target_net.setPreferableTarget(args.target) + search_net = cv.dnn.readNetFromONNX(args.search_net) + search_net.setPreferableBackend(args.backend) + search_net.setPreferableTarget(args.target) + rpn_head = cv.dnn.readNetFromONNX(args.rpn_head) + rpn_head.setPreferableBackend(args.backend) + rpn_head.setPreferableTarget(args.target) + model = ModelBuilder(target_net, search_net, rpn_head) + tracker = SiamRPNTracker(model) + first_frame = True + cv.namedWindow('SiamRPN++ Tracker', cv.WINDOW_AUTOSIZE) + for frame in get_frames(args.input_video): + if first_frame: + try: + init_rect = cv.selectROI('SiamRPN++ Tracker', frame, False, False) + except: + exit() + tracker.init(frame, init_rect) + first_frame = False + else: + outputs = tracker.track(frame) + bbox = list(map(int, outputs['bbox'])) + (x, y, w, h) = bbox + cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) + cv.imshow('SiamRPN++ Tracker', frame) + key = cv.waitKey(1) + if key == ord('q'): + break +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/speech_recognition.py +import numpy as np +import cv2 as cv +import argparse +import os +'' + +class FilterbankFeatures: + + def __init__(self, sample_rate=16000, window_size=0.02, window_stride=0.01, n_fft=512, preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True, dither=1e-05): + self.win_length = int(sample_rate * window_size) + self.hop_length = int(sample_rate * window_stride) + self.n_fft = n_fft or 2 ** np.ceil(np.log2(self.win_length)) + self.log = log + self.dither = dither + self.n_filt = n_filt + self.preemph = preemph + highfreq = highfreq or sample_rate / 2 + self.window_tensor = np.hanning(self.win_length) + self.filterbanks = self.mel(sample_rate, self.n_fft, n_mels=n_filt, fmin=lowfreq, fmax=highfreq) + self.filterbanks.dtype = np.float32 + self.filterbanks = np.expand_dims(self.filterbanks, 0) + + def normalize_batch(self, x, seq_len): + x_mean = np.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype) + x_std = np.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype) + for i in range(x.shape[0]): + x_mean[i, :] = np.mean(x[i, :, :seq_len[i]], axis=1) + x_std[i, :] = np.std(x[i, :, :seq_len[i]], axis=1) + x_std += 1e-10 + return (x - np.expand_dims(x_mean, 2)) / np.expand_dims(x_std, 2) + + def calculate_features(self, x, seq_len): + dtype = x.dtype + seq_len = np.ceil(seq_len / self.hop_length) + seq_len = np.array(seq_len, dtype=np.int32) + if self.dither > 0: + x += self.dither * np.random.randn(*x.shape) + if self.preemph is not None: + x = np.concatenate((np.expand_dims(x[0], -1), x[1:] - self.preemph * x[:-1]), axis=0) + x = self.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, fft_window=self.window_tensor) + x = (x ** 2).sum(-1) + x = np.matmul(np.array(self.filterbanks, dtype=x.dtype), x) + if self.log: + x = np.log(x + 1e-20) + x = self.normalize_batch(x, seq_len).astype(dtype) + return x + + def hz_to_mel(self, frequencies): + frequencies = np.asanyarray(frequencies) + f_min = 0.0 + f_sp = 200.0 / 3 + mels = (frequencies - f_min) / f_sp + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = np.log(6.4) / 27.0 + if frequencies.ndim: + log_t = frequencies >= min_log_hz + mels[log_t] = min_log_mel + np.log(frequencies[log_t] / min_log_hz) / logstep + elif frequencies >= min_log_hz: + mels = min_log_mel + np.log(frequencies / min_log_hz) / logstep + return mels + + def mel_to_hz(self, mels): + mels = np.asanyarray(mels) + f_min = 0.0 + f_sp = 200.0 / 3 + freqs = f_min + f_sp * mels + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = np.log(6.4) / 27.0 + if mels.ndim: + log_t = mels >= min_log_mel + freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) + elif mels >= min_log_mel: + freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel)) + return freqs + + def mel_frequencies(self, n_mels=128, fmin=0.0, fmax=11025.0): + min_mel = self.hz_to_mel(fmin) + max_mel = self.hz_to_mel(fmax) + mels = np.linspace(min_mel, max_mel, n_mels) + return self.mel_to_hz(mels) + + def mel(self, sr, n_fft, n_mels=128, fmin=0.0, fmax=None, dtype=np.float32): + if fmax is None: + fmax = float(sr) / 2 + n_mels = int(n_mels) + weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype) + fftfreqs = np.linspace(0, float(sr) / 2, int(1 + n_fft // 2), endpoint=True) + mel_f = self.mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax) + fdiff = np.diff(mel_f) + ramps = np.subtract.outer(mel_f, fftfreqs) + for i in range(n_mels): + lower = -ramps[i] / fdiff[i] + upper = ramps[i + 2] / fdiff[i + 1] + weights[i] = np.maximum(0, np.minimum(lower, upper)) + enorm = 2.0 / (mel_f[2:n_mels + 2] - mel_f[:n_mels]) + weights *= enorm[:, np.newaxis] + return weights + + def pad_window_center(self, data, size, axis=-1, **kwargs): + kwargs.setdefault('mode', 'constant') + n = data.shape[axis] + lpad = int((size - n) // 2) + lengths = [(0, 0)] * data.ndim + lengths[axis] = (lpad, int(size - n - lpad)) + if lpad < 0: + raise Exception('Target size ({:d}) must be at least input size ({:d})'.format(size, n)) + return np.pad(data, lengths, **kwargs) + + def frame(self, x, frame_length, hop_length): + if x.shape[-1] < frame_length: + raise Exception('Input is too short (n={:d}) for frame_length={:d}'.format(x.shape[-1], frame_length)) + x = np.asfortranarray(x) + n_frames = 1 + (x.shape[-1] - frame_length) // hop_length + strides = np.asarray(x.strides) + new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize + shape = list(x.shape)[:-1] + [frame_length, n_frames] + strides = list(strides) + [hop_length * new_stride] + return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) + + def dtype_r2c(self, d, default=np.complex64): + mapping = {np.dtype(np.float32): np.complex64, np.dtype(np.float64): np.complex128} + dt = np.dtype(d) + if dt.kind == 'c': + return dt + return np.dtype(mapping.get(dt, default)) + + def stft(self, y, n_fft, hop_length=None, win_length=None, fft_window=None, pad_mode='reflect', return_complex=False): + if win_length is None: + win_length = n_fft + if hop_length is None: + hop_length = int(win_length // 4) + if y.ndim != 1: + raise Exception(f'Invalid input shape. Only Mono Channeled audio supported. Input must have shape (Audio,). Got {y.shape}') + fft_window = self.pad_window_center(fft_window, n_fft) + fft_window = fft_window.reshape((-1, 1)) + y = np.pad(y, int(n_fft // 2), mode=pad_mode) + y_frames = self.frame(y, frame_length=n_fft, hop_length=hop_length) + dtype = self.dtype_r2c(y.dtype) + stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[-1]), dtype=dtype, order='F') + stft_matrix = np.fft.rfft(fft_window * y_frames, axis=0) + return stft_matrix if return_complex == True else np.stack((stft_matrix.real, stft_matrix.imag), axis=-1) + +class Decoder: + + def __init__(self): + labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] + self.labels_map = {i: label for (i, label) in enumerate(labels)} + self.blank_id = 28 + + def decode(self, x): + x = np.argmax(x, axis=-1) + hypotheses = [] + prediction = x.tolist() + decoded_prediction = [] + previous = self.blank_id + for p in prediction: + if (p != previous or previous == self.blank_id) and p != self.blank_id: + decoded_prediction.append(p) + previous = p + hypothesis = ''.join([self.labels_map[c] for c in decoded_prediction]) + hypotheses.append(hypothesis) + return hypotheses + +def predict(features, net, decoder): + net.setInput(features) + output = net.forward() + prediction = decoder.decode(output.squeeze(0)) + return prediction[0] + +def readAudioFile(file, audioStream): + cap = cv.VideoCapture(file) + samplingRate = 16000 + params = np.asarray([cv.CAP_PROP_AUDIO_STREAM, audioStream, cv.CAP_PROP_VIDEO_STREAM, -1, cv.CAP_PROP_AUDIO_DATA_DEPTH, cv.CV_32F, cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND, samplingRate]) + cap.open(file, cv.CAP_ANY, params) + if cap.isOpened() is False: + print("Error : Can't read audio file:", file, 'with audioStream = ', audioStream) + return + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + inputAudio = [] + while 1: + if cap.grab(): + frame = np.asarray([]) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + inputAudio.append(frame[1][0][i]) + else: + break + inputAudio = np.asarray(inputAudio, dtype=np.float64) + return (inputAudio, samplingRate) + +def readAudioMicrophone(microTime): + cap = cv.VideoCapture() + samplingRate = 16000 + params = np.asarray([cv.CAP_PROP_AUDIO_STREAM, 0, cv.CAP_PROP_VIDEO_STREAM, -1, cv.CAP_PROP_AUDIO_DATA_DEPTH, cv.CV_32F, cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND, samplingRate]) + cap.open(0, cv.CAP_ANY, params) + if cap.isOpened() is False: + print("Error: Can't open microphone") + print('Error: problems with audio reading, check input arguments') + return + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + cvTickFreq = cv.getTickFrequency() + sysTimeCurr = cv.getTickCount() + sysTimePrev = sysTimeCurr + inputAudio = [] + while (sysTimeCurr - sysTimePrev) / cvTickFreq < microTime: + if cap.grab(): + frame = np.asarray([]) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + inputAudio.append(frame[1][0][i]) + sysTimeCurr = cv.getTickCount() + else: + print('Error: Grab error') + break + inputAudio = np.asarray(inputAudio, dtype=np.float64) + print('Number of samples: ', len(inputAudio)) + return (inputAudio, samplingRate) +if __name__ == '__main__': + backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) + targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16) + parser = argparse.ArgumentParser(description='This script runs Jasper Speech recognition model', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--input_type', type=str, required=True, help='file or microphone') + parser.add_argument('--micro_time', type=int, default=15, help='Duration of microphone work in seconds. Must be more than 6 sec') + parser.add_argument('--input_audio', type=str, help='Path to input audio file. OR Path to a txt file with relative path to multiple audio files in different lines') + parser.add_argument('--audio_stream', type=int, default=0, help='CAP_PROP_AUDIO_STREAM value') + parser.add_argument('--show_spectrogram', action='store_true', help='Whether to show a spectrogram of the input audio.') + parser.add_argument('--model', type=str, default='jasper.onnx', help='Path to the onnx file of Jasper. default="jasper.onnx"') + parser.add_argument('--output', type=str, help='Path to file where recognized audio transcript must be saved. Leave this to print on console.') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help='Select a computation backend: %d: automatically (by default) %d: OpenVINO Inference Engine %d: OpenCV Implementation ' % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Select a target device: %d: CPU target (by default) %d: OpenCL %d: OpenCL FP16 ' % targets) + (args, _) = parser.parse_known_args() + if args.input_audio and (not os.path.isfile(args.input_audio)): + raise OSError('Input audio file does not exist') + if not os.path.isfile(args.model): + raise OSError('Jasper model file does not exist') + features = [] + if args.input_type == 'file': + if args.input_audio.endswith('.txt'): + with open(args.input_audio) as f: + content = f.readlines() + content = [x.strip() for x in content] + audio_file_paths = content + for audio_file_path in audio_file_paths: + if not os.path.isfile(audio_file_path): + raise OSError('Audio file({audio_file_path}) does not exist') + else: + audio_file_paths = [args.input_audio] + audio_file_paths = [os.path.abspath(x) for x in audio_file_paths] + for audio_file_path in audio_file_paths: + audio = readAudioFile(audio_file_path, args.audio_stream) + if audio is None: + raise Exception(f"Can't read {args.input_audio}. Try a different format") + features.append(audio[0]) + elif args.input_type == 'microphone': + audio = readAudioMicrophone(args.micro_time) + if audio is None: + raise Exception(f"Can't open microphone. Try a different format") + features.append(audio[0]) + else: + raise Exception(f"input_type {args.input_type} doesn't exist. Please enter 'file' or 'microphone'") + feature_extractor = FilterbankFeatures() + for i in range(len(features)): + X = features[i] + seq_len = np.array([X.shape[0]], dtype=np.int32) + features[i] = feature_extractor.calculate_features(x=X, seq_len=seq_len) + net = cv.dnn.readNetFromONNX(args.model) + net.setPreferableBackend(args.backend) + net.setPreferableTarget(args.target) + if args.show_spectrogram and (not args.input_audio.endswith('.txt')): + img = cv.normalize(src=features[0][0], dst=None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U) + img = cv.applyColorMap(img, cv.COLORMAP_JET) + cv.imshow('spectogram', img) + cv.waitKey(0) + decoder = Decoder() + prediction = [] + print('Predicting...') + for feature in features: + print(f'\rAudio file {len(prediction) + 1}/{len(features)}', end='') + prediction.append(predict(feature, net, decoder)) + print('') + if args.output: + with open(args.output, 'w') as f: + for pred in prediction: + f.write(pred + '\n') + print('Transcript was written to {}'.format(args.output)) + else: + print(prediction) + cv.destroyAllWindows() + +# File: opencv-master/samples/dnn/text_detection.py +"""""" +import numpy as np +import cv2 as cv +import math +import argparse +parser = argparse.ArgumentParser(description='Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorchOr you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing') +parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--model', '-m', required=True, help='Path to a binary .pb file contains trained detector network.') +parser.add_argument('--ocr', default='crnn.onnx', help='Path to a binary .pb or .onnx file contains trained recognition network') +parser.add_argument('--width', type=int, default=320, help='Preprocess input image by resizing to a specific width. It should be multiple by 32.') +parser.add_argument('--height', type=int, default=320, help='Preprocess input image by resizing to a specific height. It should be multiple by 32.') +parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold.') +parser.add_argument('--nms', type=float, default=0.4, help='Non-maximum suppression threshold.') +args = parser.parse_args() + +def fourPointsTransform(frame, vertices): + vertices = np.asarray(vertices) + outputSize = (100, 32) + targetVertices = np.array([[0, outputSize[1] - 1], [0, 0], [outputSize[0] - 1, 0], [outputSize[0] - 1, outputSize[1] - 1]], dtype='float32') + rotationMatrix = cv.getPerspectiveTransform(vertices, targetVertices) + result = cv.warpPerspective(frame, rotationMatrix, outputSize) + return result + +def decodeText(scores): + text = '' + alphabet = '0123456789abcdefghijklmnopqrstuvwxyz' + for i in range(scores.shape[0]): + c = np.argmax(scores[i][0]) + if c != 0: + text += alphabet[c - 1] + else: + text += '-' + char_list = [] + for i in range(len(text)): + if text[i] != '-' and (not (i > 0 and text[i] == text[i - 1])): + char_list.append(text[i]) + return ''.join(char_list) + +def decodeBoundingBoxes(scores, geometry, scoreThresh): + detections = [] + confidences = [] + assert len(scores.shape) == 4, 'Incorrect dimensions of scores' + assert len(geometry.shape) == 4, 'Incorrect dimensions of geometry' + assert scores.shape[0] == 1, 'Invalid dimensions of scores' + assert geometry.shape[0] == 1, 'Invalid dimensions of geometry' + assert scores.shape[1] == 1, 'Invalid dimensions of scores' + assert geometry.shape[1] == 5, 'Invalid dimensions of geometry' + assert scores.shape[2] == geometry.shape[2], 'Invalid dimensions of scores and geometry' + assert scores.shape[3] == geometry.shape[3], 'Invalid dimensions of scores and geometry' + height = scores.shape[2] + width = scores.shape[3] + for y in range(0, height): + scoresData = scores[0][0][y] + x0_data = geometry[0][0][y] + x1_data = geometry[0][1][y] + x2_data = geometry[0][2][y] + x3_data = geometry[0][3][y] + anglesData = geometry[0][4][y] + for x in range(0, width): + score = scoresData[x] + if score < scoreThresh: + continue + offsetX = x * 4.0 + offsetY = y * 4.0 + angle = anglesData[x] + cosA = math.cos(angle) + sinA = math.sin(angle) + h = x0_data[x] + x2_data[x] + w = x1_data[x] + x3_data[x] + offset = [offsetX + cosA * x1_data[x] + sinA * x2_data[x], offsetY - sinA * x1_data[x] + cosA * x2_data[x]] + p1 = (-sinA * h + offset[0], -cosA * h + offset[1]) + p3 = (-cosA * w + offset[0], sinA * w + offset[1]) + center = (0.5 * (p1[0] + p3[0]), 0.5 * (p1[1] + p3[1])) + detections.append((center, (w, h), -1 * angle * 180.0 / math.pi)) + confidences.append(float(score)) + return [detections, confidences] + +def main(): + confThreshold = args.thr + nmsThreshold = args.nms + inpWidth = args.width + inpHeight = args.height + modelDetector = args.model + modelRecognition = args.ocr + detector = cv.dnn.readNet(modelDetector) + recognizer = cv.dnn.readNet(modelRecognition) + kWinName = 'EAST: An Efficient and Accurate Scene Text Detector' + cv.namedWindow(kWinName, cv.WINDOW_NORMAL) + outNames = [] + outNames.append('feature_fusion/Conv_7/Sigmoid') + outNames.append('feature_fusion/concat_3') + cap = cv.VideoCapture(args.input if args.input else 0) + tickmeter = cv.TickMeter() + while cv.waitKey(1) < 0: + (hasFrame, frame) = cap.read() + if not hasFrame: + cv.waitKey() + break + height_ = frame.shape[0] + width_ = frame.shape[1] + rW = width_ / float(inpWidth) + rH = height_ / float(inpHeight) + blob = cv.dnn.blobFromImage(frame, 1.0, (inpWidth, inpHeight), (123.68, 116.78, 103.94), True, False) + detector.setInput(blob) + tickmeter.start() + outs = detector.forward(outNames) + tickmeter.stop() + scores = outs[0] + geometry = outs[1] + [boxes, confidences] = decodeBoundingBoxes(scores, geometry, confThreshold) + indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold, nmsThreshold) + for i in indices: + vertices = cv.boxPoints(boxes[i]) + for j in range(4): + vertices[j][0] *= rW + vertices[j][1] *= rH + if modelRecognition: + cropped = fourPointsTransform(frame, vertices) + cropped = cv.cvtColor(cropped, cv.COLOR_BGR2GRAY) + blob = cv.dnn.blobFromImage(cropped, size=(100, 32), mean=127.5, scalefactor=1 / 127.5) + recognizer.setInput(blob) + tickmeter.start() + result = recognizer.forward() + tickmeter.stop() + wordRecognized = decodeText(result) + cv.putText(frame, wordRecognized, (int(vertices[1][0]), int(vertices[1][1])), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0)) + for j in range(4): + p1 = (int(vertices[j][0]), int(vertices[j][1])) + p2 = (int(vertices[(j + 1) % 4][0]), int(vertices[(j + 1) % 4][1])) + cv.line(frame, p1, p2, (0, 255, 0), 1) + label = 'Inference time: %.2f ms' % tickmeter.getTimeMilli() + cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + cv.imshow(kWinName, frame) + tickmeter.reset() +if __name__ == '__main__': + main() + +# File: opencv-master/samples/dnn/tf_text_graph_common.py +def tokenize(s): + tokens = [] + token = '' + isString = False + isComment = False + for symbol in s: + isComment = isComment and symbol != '\n' or (not isString and symbol == '#') + if isComment: + continue + if symbol == ' ' or symbol == '\t' or symbol == '\r' or (symbol == "'") or (symbol == '\n') or (symbol == ':') or (symbol == '"') or (symbol == ';') or (symbol == ','): + if (symbol == '"' or symbol == "'") and isString: + tokens.append(token) + token = '' + elif isString: + token += symbol + elif token: + tokens.append(token) + token = '' + isString = (symbol == '"' or symbol == "'") ^ isString + elif symbol == '{' or symbol == '}' or symbol == '[' or (symbol == ']'): + if token: + tokens.append(token) + token = '' + tokens.append(symbol) + else: + token += symbol + if token: + tokens.append(token) + return tokens + +def parseMessage(tokens, idx): + msg = {} + assert tokens[idx] == '{' + isArray = False + while True: + if not isArray: + idx += 1 + if idx < len(tokens): + fieldName = tokens[idx] + else: + return None + if fieldName == '}': + break + idx += 1 + fieldValue = tokens[idx] + if fieldValue == '{': + (embeddedMsg, idx) = parseMessage(tokens, idx) + if fieldName in msg: + msg[fieldName].append(embeddedMsg) + else: + msg[fieldName] = [embeddedMsg] + elif fieldValue == '[': + isArray = True + elif fieldValue == ']': + isArray = False + elif fieldName in msg: + msg[fieldName].append(fieldValue) + else: + msg[fieldName] = [fieldValue] + return (msg, idx) + +def readTextMessage(filePath): + if not filePath: + return {} + with open(filePath, 'rt') as f: + content = f.read() + tokens = tokenize('{' + content + '}') + msg = parseMessage(tokens, 0) + return msg[0] if msg else {} + +def listToTensor(values): + if all([isinstance(v, float) for v in values]): + dtype = 'DT_FLOAT' + field = 'float_val' + elif all([isinstance(v, int) for v in values]): + dtype = 'DT_INT32' + field = 'int_val' + else: + raise Exception('Wrong values types') + msg = {'tensor': {'dtype': dtype, 'tensor_shape': {'dim': {'size': len(values)}}}} + msg['tensor'][field] = values + return msg + +def addConstNode(name, values, graph_def): + node = NodeDef() + node.name = name + node.op = 'Const' + node.addAttr('value', values) + graph_def.node.extend([node]) + +def addSlice(inp, out, begins, sizes, graph_def): + beginsNode = NodeDef() + beginsNode.name = out + '/begins' + beginsNode.op = 'Const' + beginsNode.addAttr('value', begins) + graph_def.node.extend([beginsNode]) + sizesNode = NodeDef() + sizesNode.name = out + '/sizes' + sizesNode.op = 'Const' + sizesNode.addAttr('value', sizes) + graph_def.node.extend([sizesNode]) + sliced = NodeDef() + sliced.name = out + sliced.op = 'Slice' + sliced.input.append(inp) + sliced.input.append(beginsNode.name) + sliced.input.append(sizesNode.name) + graph_def.node.extend([sliced]) + +def addReshape(inp, out, shape, graph_def): + shapeNode = NodeDef() + shapeNode.name = out + '/shape' + shapeNode.op = 'Const' + shapeNode.addAttr('value', shape) + graph_def.node.extend([shapeNode]) + reshape = NodeDef() + reshape.name = out + reshape.op = 'Reshape' + reshape.input.append(inp) + reshape.input.append(shapeNode.name) + graph_def.node.extend([reshape]) + +def addSoftMax(inp, out, graph_def): + softmax = NodeDef() + softmax.name = out + softmax.op = 'Softmax' + softmax.addAttr('axis', -1) + softmax.input.append(inp) + graph_def.node.extend([softmax]) + +def addFlatten(inp, out, graph_def): + flatten = NodeDef() + flatten.name = out + flatten.op = 'Flatten' + flatten.input.append(inp) + graph_def.node.extend([flatten]) + +class NodeDef: + + def __init__(self): + self.input = [] + self.name = '' + self.op = '' + self.attr = {} + + def addAttr(self, key, value): + assert not key in self.attr + if isinstance(value, bool): + self.attr[key] = {'b': value} + elif isinstance(value, int): + self.attr[key] = {'i': value} + elif isinstance(value, float): + self.attr[key] = {'f': value} + elif isinstance(value, str): + self.attr[key] = {'s': value} + elif isinstance(value, list): + self.attr[key] = listToTensor(value) + else: + raise Exception('Unknown type of attribute ' + key) + + def Clear(self): + self.input = [] + self.name = '' + self.op = '' + self.attr = {} + +class GraphDef: + + def __init__(self): + self.node = [] + + def save(self, filePath): + with open(filePath, 'wt') as f: + + def printAttr(d, indent): + indent = ' ' * indent + for (key, value) in sorted(d.items(), key=lambda x: x[0].lower()): + value = value if isinstance(value, list) else [value] + for v in value: + if isinstance(v, dict): + f.write(indent + key + ' {\n') + printAttr(v, len(indent) + 2) + f.write(indent + '}\n') + else: + isString = False + if isinstance(v, str) and (not v.startswith('DT_')): + try: + float(v) + except: + isString = True + if isinstance(v, bool): + printed = 'true' if v else 'false' + elif v == 'true' or v == 'false': + printed = 'true' if v == 'true' else 'false' + elif isString: + printed = '"%s"' % v + else: + printed = str(v) + f.write(indent + key + ': ' + printed + '\n') + for node in self.node: + f.write('node {\n') + f.write(' name: "%s"\n' % node.name) + f.write(' op: "%s"\n' % node.op) + for inp in node.input: + f.write(' input: "%s"\n' % inp) + for (key, value) in sorted(node.attr.items(), key=lambda x: x[0].lower()): + f.write(' attr {\n') + f.write(' key: "%s"\n' % key) + f.write(' value {\n') + printAttr(value, 6) + f.write(' }\n') + f.write(' }\n') + f.write('}\n') + +def parseTextGraph(filePath): + msg = readTextMessage(filePath) + graph = GraphDef() + for node in msg['node']: + graphNode = NodeDef() + graphNode.name = node['name'][0] + graphNode.op = node['op'][0] + graphNode.input = node['input'] if 'input' in node else [] + if 'attr' in node: + for attr in node['attr']: + graphNode.attr[attr['key'][0]] = attr['value'][0] + graph.node.append(graphNode) + return graph + +def removeIdentity(graph_def): + identities = {} + for node in graph_def.node: + if node.op == 'Identity' or node.op == 'IdentityN': + inp = node.input[0] + if inp in identities: + identities[node.name] = identities[inp] + else: + identities[node.name] = inp + graph_def.node.remove(node) + for node in graph_def.node: + for i in range(len(node.input)): + if node.input[i] in identities: + node.input[i] = identities[node.input[i]] + +def removeUnusedNodesAndAttrs(to_remove, graph_def): + unusedAttrs = ['T', 'Tshape', 'N', 'Tidx', 'Tdim', 'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training', 'Tpaddings'] + removedNodes = [] + for i in reversed(range(len(graph_def.node))): + op = graph_def.node[i].op + name = graph_def.node[i].name + if to_remove(name, op): + if op != 'Const': + removedNodes.append(name) + del graph_def.node[i] + else: + for attr in unusedAttrs: + if attr in graph_def.node[i].attr: + del graph_def.node[i].attr[attr] + for node in graph_def.node: + for i in reversed(range(len(node.input))): + if node.input[i] in removedNodes: + del node.input[i] + +def writeTextGraph(modelPath, outputPath, outNodes): + try: + import cv2 as cv + cv.dnn.writeTextGraph(modelPath, outputPath) + except: + import tensorflow as tf + from tensorflow.tools.graph_transforms import TransformGraph + with tf.gfile.FastGFile(modelPath, 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + graph_def = TransformGraph(graph_def, ['image_tensor'], outNodes, ['sort_by_execution_order']) + for node in graph_def.node: + if node.op == 'Const': + if 'value' in node.attr and node.attr['value'].tensor.tensor_content: + node.attr['value'].tensor.tensor_content = b'' + tf.train.write_graph(graph_def, '', outputPath, as_text=True) + +# File: opencv-master/samples/dnn/tf_text_graph_efficientdet.py +import argparse +import re +from math import sqrt +from tf_text_graph_common import * + +class AnchorGenerator: + + def __init__(self, min_level, aspect_ratios, num_scales, anchor_scale): + self.min_level = min_level + self.aspect_ratios = aspect_ratios + self.anchor_scale = anchor_scale + self.scales = [2 ** (float(s) / num_scales) for s in range(num_scales)] + + def get(self, layer_id): + widths = [] + heights = [] + for s in self.scales: + for a in self.aspect_ratios: + base_anchor_size = 2 ** (self.min_level + layer_id) * self.anchor_scale + heights.append(base_anchor_size * s * a[1]) + widths.append(base_anchor_size * s * a[0]) + return (widths, heights) + +def createGraph(modelPath, outputPath, min_level, aspect_ratios, num_scales, anchor_scale, num_classes, image_width, image_height): + print('Min level: %d' % min_level) + print('Anchor scale: %f' % anchor_scale) + print('Num scales: %d' % num_scales) + print('Aspect ratios: %s' % str(aspect_ratios)) + print('Number of classes: %d' % num_classes) + print('Input image size: %dx%d' % (image_width, image_height)) + _inpNames = ['image_arrays'] + outNames = ['detections'] + writeTextGraph(modelPath, outputPath, outNames) + graph_def = parseTextGraph(outputPath) + + def getUnconnectedNodes(): + unconnected = [] + for node in graph_def.node: + if node.op == 'Const': + continue + unconnected.append(node.name) + for inp in node.input: + if inp in unconnected: + unconnected.remove(inp) + return unconnected + nodesToKeep = ['truediv'] + removeIdentity(graph_def) + scopesToKeep = ('image_arrays', 'efficientnet', 'resample_p6', 'resample_p7', 'fpn_cells', 'class_net', 'box_net', 'Reshape', 'concat') + addConstNode('scale_w', [2.0], graph_def) + addConstNode('scale_h', [2.0], graph_def) + nodesToKeep += ['scale_w', 'scale_h'] + for node in graph_def.node: + if re.match('efficientnet-(.*)/blocks_\\d+/se/mul_1', node.name): + (node.input[0], node.input[1]) = (node.input[1], node.input[0]) + if re.match('fpn_cells/cell_\\d+/fnode\\d+/resample(.*)/nearest_upsampling/Reshape_1$', node.name): + node.op = 'ResizeNearestNeighbor' + node.input[1] = 'scale_w' + node.input.append('scale_h') + for inpNode in graph_def.node: + if inpNode.name == node.name[:node.name.rfind('_')]: + node.input[0] = inpNode.input[0] + if re.match('box_net/box-predict(_\\d)*/separable_conv2d$', node.name): + node.addAttr('loc_pred_transposed', True) + if node.op == 'RealDiv': + for inpNode in graph_def.node: + if inpNode.name != node.input[1] or not 'value' in inpNode.attr: + continue + tensor = inpNode.attr['value']['tensor'][0] + if not 'float_val' in tensor: + continue + scale = float(inpNode.attr['value']['tensor'][0]['float_val'][0]) + addConstNode(inpNode.name + '/inv', [1.0 / scale], graph_def) + nodesToKeep.append(inpNode.name + '/inv') + node.input[1] = inpNode.name + '/inv' + node.op = 'Mul' + break + + def to_remove(name, op): + if name in nodesToKeep: + return False + return op == 'Const' or not name.startswith(scopesToKeep) + removeUnusedNodesAndAttrs(to_remove, graph_def) + assert graph_def.node[1].name == 'truediv' and graph_def.node[1].op == 'RealDiv' + graph_def.node[1].input.insert(0, 'image_arrays') + graph_def.node[2].input.insert(0, 'truediv') + priors_generator = AnchorGenerator(min_level, aspect_ratios, num_scales, anchor_scale) + priorBoxes = [] + for i in range(5): + inpName = '' + for node in graph_def.node: + if node.name == 'Reshape_%d' % (i * 2 + 1): + inpName = node.input[0] + break + priorBox = NodeDef() + priorBox.name = 'PriorBox_%d' % i + priorBox.op = 'PriorBox' + priorBox.input.append(inpName) + priorBox.input.append(graph_def.node[0].name) + priorBox.addAttr('flip', False) + priorBox.addAttr('clip', False) + (widths, heights) = priors_generator.get(i) + priorBox.addAttr('width', widths) + priorBox.addAttr('height', heights) + priorBox.addAttr('variance', [1.0, 1.0, 1.0, 1.0]) + graph_def.node.extend([priorBox]) + priorBoxes.append(priorBox.name) + addConstNode('concat/axis_flatten', [-1], graph_def) + + def addConcatNode(name, inputs, axisNodeName): + concat = NodeDef() + concat.name = name + concat.op = 'ConcatV2' + for inp in inputs: + concat.input.append(inp) + concat.input.append(axisNodeName) + graph_def.node.extend([concat]) + addConcatNode('PriorBox/concat', priorBoxes, 'concat/axis_flatten') + sigmoid = NodeDef() + sigmoid.name = 'concat/sigmoid' + sigmoid.op = 'Sigmoid' + sigmoid.input.append('concat') + graph_def.node.extend([sigmoid]) + addFlatten(sigmoid.name, sigmoid.name + '/Flatten', graph_def) + addFlatten('concat_1', 'concat_1/Flatten', graph_def) + detectionOut = NodeDef() + detectionOut.name = 'detection_out' + detectionOut.op = 'DetectionOutput' + detectionOut.input.append('concat_1/Flatten') + detectionOut.input.append(sigmoid.name + '/Flatten') + detectionOut.input.append('PriorBox/concat') + detectionOut.addAttr('num_classes', num_classes) + detectionOut.addAttr('share_location', True) + detectionOut.addAttr('background_label_id', num_classes + 1) + detectionOut.addAttr('nms_threshold', 0.6) + detectionOut.addAttr('confidence_threshold', 0.2) + detectionOut.addAttr('top_k', 100) + detectionOut.addAttr('keep_top_k', 100) + detectionOut.addAttr('code_type', 'CENTER_SIZE') + graph_def.node.extend([detectionOut]) + graph_def.node[0].attr['shape'] = {'shape': {'dim': [{'size': -1}, {'size': image_height}, {'size': image_width}, {'size': 3}]}} + while True: + unconnectedNodes = getUnconnectedNodes() + unconnectedNodes.remove(detectionOut.name) + if not unconnectedNodes: + break + for name in unconnectedNodes: + for i in range(len(graph_def.node)): + if graph_def.node[i].name == name: + del graph_def.node[i] + break + graph_def.save(outputPath) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run this script to get a text graph of SSD model from TensorFlow Object Detection API. Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') + parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') + parser.add_argument('--output', required=True, help='Path to output text graph.') + parser.add_argument('--min_level', default=3, type=int, help='Parameter from training config') + parser.add_argument('--num_scales', default=3, type=int, help='Parameter from training config') + parser.add_argument('--anchor_scale', default=4.0, type=float, help='Parameter from training config') + parser.add_argument('--aspect_ratios', default=[1.0, 1.0, 1.4, 0.7, 0.7, 1.4], nargs='+', type=float, help='Parameter from training config') + parser.add_argument('--num_classes', default=90, type=int, help='Number of classes to detect') + parser.add_argument('--width', default=512, type=int, help='Network input width') + parser.add_argument('--height', default=512, type=int, help='Network input height') + args = parser.parse_args() + ar = args.aspect_ratios + assert len(ar) % 2 == 0 + ar = list(zip(ar[::2], ar[1::2])) + createGraph(args.input, args.output, args.min_level, ar, args.num_scales, args.anchor_scale, args.num_classes, args.width, args.height) + +# File: opencv-master/samples/dnn/tf_text_graph_faster_rcnn.py +import argparse +import numpy as np +from tf_text_graph_common import * + +def createFasterRCNNGraph(modelPath, configPath, outputPath): + scopesToKeep = ('FirstStageFeatureExtractor', 'Conv', 'FirstStageBoxPredictor/BoxEncodingPredictor', 'FirstStageBoxPredictor/ClassPredictor', 'CropAndResize', 'MaxPool2D', 'SecondStageFeatureExtractor', 'SecondStageBoxPredictor', 'Preprocessor/sub', 'Preprocessor/mul', 'image_tensor') + scopesToIgnore = ('FirstStageFeatureExtractor/Assert', 'FirstStageFeatureExtractor/Shape', 'FirstStageFeatureExtractor/strided_slice', 'FirstStageFeatureExtractor/GreaterEqual', 'FirstStageFeatureExtractor/LogicalAnd') + config = readTextMessage(configPath) + config = config['model'][0]['faster_rcnn'][0] + num_classes = int(config['num_classes'][0]) + grid_anchor_generator = config['first_stage_anchor_generator'][0]['grid_anchor_generator'][0] + scales = [float(s) for s in grid_anchor_generator['scales']] + aspect_ratios = [float(ar) for ar in grid_anchor_generator['aspect_ratios']] + width_stride = float(grid_anchor_generator['width_stride'][0]) + height_stride = float(grid_anchor_generator['height_stride'][0]) + feature_extractor = config['feature_extractor'][0] + if 'type' in feature_extractor and feature_extractor['type'][0] == 'faster_rcnn_nas': + features_stride = 16.0 + else: + features_stride = float(feature_extractor['first_stage_features_stride'][0]) + first_stage_nms_iou_threshold = float(config['first_stage_nms_iou_threshold'][0]) + first_stage_max_proposals = int(config['first_stage_max_proposals'][0]) + print('Number of classes: %d' % num_classes) + print('Scales: %s' % str(scales)) + print('Aspect ratios: %s' % str(aspect_ratios)) + print('Width stride: %f' % width_stride) + print('Height stride: %f' % height_stride) + print('Features stride: %f' % features_stride) + writeTextGraph(modelPath, outputPath, ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes']) + graph_def = parseTextGraph(outputPath) + removeIdentity(graph_def) + nodesToKeep = [] + + def to_remove(name, op): + if name in nodesToKeep: + return False + return op == 'Const' or name.startswith(scopesToIgnore) or (not name.startswith(scopesToKeep)) or (name.startswith('CropAndResize') and op != 'CropAndResize') + nodesMap = {node.name: node for node in graph_def.node} + for node in reversed(graph_def.node): + if node.op == 'BatchToSpaceND': + del node.input[2] + conv = nodesMap[node.input[0]] + spaceToBatchND = nodesMap[conv.input[0]] + stridedSlice = nodesMap[spaceToBatchND.input[2]] + assert stridedSlice.op == 'StridedSlice' + pack = nodesMap[stridedSlice.input[0]] + assert pack.op == 'Pack' + padNodeH = nodesMap[nodesMap[pack.input[0]].input[0]] + padNodeW = nodesMap[nodesMap[pack.input[1]].input[0]] + padH = int(padNodeH.attr['value']['tensor'][0]['int_val'][0]) + padW = int(padNodeW.attr['value']['tensor'][0]['int_val'][0]) + paddingsNode = NodeDef() + paddingsNode.name = conv.name + '/paddings' + paddingsNode.op = 'Const' + paddingsNode.addAttr('value', [padH, padH, padW, padW]) + graph_def.node.insert(graph_def.node.index(spaceToBatchND), paddingsNode) + nodesToKeep.append(paddingsNode.name) + spaceToBatchND.input[2] = paddingsNode.name + removeUnusedNodesAndAttrs(to_remove, graph_def) + assert graph_def.node[0].op == 'Placeholder' + graph_def.node[1].input.insert(0, graph_def.node[0].name) + topNodes = [] + while True: + node = graph_def.node.pop() + topNodes.append(node) + if node.op == 'CropAndResize': + break + addReshape('FirstStageBoxPredictor/ClassPredictor/BiasAdd', 'FirstStageBoxPredictor/ClassPredictor/reshape_1', [0, -1, 2], graph_def) + addSoftMax('FirstStageBoxPredictor/ClassPredictor/reshape_1', 'FirstStageBoxPredictor/ClassPredictor/softmax', graph_def) + addFlatten('FirstStageBoxPredictor/ClassPredictor/softmax', 'FirstStageBoxPredictor/ClassPredictor/softmax/flatten', graph_def) + addFlatten('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd', 'FirstStageBoxPredictor/BoxEncodingPredictor/flatten', graph_def) + proposals = NodeDef() + proposals.name = 'proposals' + proposals.op = 'PriorBox' + proposals.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd') + proposals.input.append(graph_def.node[0].name) + proposals.addAttr('flip', False) + proposals.addAttr('clip', True) + proposals.addAttr('step', features_stride) + proposals.addAttr('offset', 0.0) + proposals.addAttr('variance', [0.1, 0.1, 0.2, 0.2]) + widths = [] + heights = [] + for a in aspect_ratios: + for s in scales: + ar = np.sqrt(a) + heights.append(height_stride ** 2 * s / ar) + widths.append(width_stride ** 2 * s * ar) + proposals.addAttr('width', widths) + proposals.addAttr('height', heights) + graph_def.node.extend([proposals]) + detectionOut = NodeDef() + detectionOut.name = 'detection_out' + detectionOut.op = 'DetectionOutput' + detectionOut.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/flatten') + detectionOut.input.append('FirstStageBoxPredictor/ClassPredictor/softmax/flatten') + detectionOut.input.append('proposals') + detectionOut.addAttr('num_classes', 2) + detectionOut.addAttr('share_location', True) + detectionOut.addAttr('background_label_id', 0) + detectionOut.addAttr('nms_threshold', first_stage_nms_iou_threshold) + detectionOut.addAttr('top_k', 6000) + detectionOut.addAttr('code_type', 'CENTER_SIZE') + detectionOut.addAttr('keep_top_k', first_stage_max_proposals) + detectionOut.addAttr('clip', False) + graph_def.node.extend([detectionOut]) + addConstNode('clip_by_value/lower', [0.0], graph_def) + addConstNode('clip_by_value/upper', [1.0], graph_def) + clipByValueNode = NodeDef() + clipByValueNode.name = 'detection_out/clip_by_value' + clipByValueNode.op = 'ClipByValue' + clipByValueNode.input.append('detection_out') + clipByValueNode.input.append('clip_by_value/lower') + clipByValueNode.input.append('clip_by_value/upper') + graph_def.node.extend([clipByValueNode]) + for node in reversed(topNodes): + graph_def.node.extend([node]) + addSoftMax('SecondStageBoxPredictor/Reshape_1', 'SecondStageBoxPredictor/Reshape_1/softmax', graph_def) + addSlice('SecondStageBoxPredictor/Reshape_1/softmax', 'SecondStageBoxPredictor/Reshape_1/slice', [0, 0, 1], [-1, -1, -1], graph_def) + addReshape('SecondStageBoxPredictor/Reshape_1/slice', 'SecondStageBoxPredictor/Reshape_1/Reshape', [1, -1], graph_def) + cropAndResizeNodeName = '' + for i in reversed(range(len(graph_def.node))): + if graph_def.node[i].op == 'CropAndResize': + graph_def.node[i].input.insert(1, 'detection_out/clip_by_value') + cropAndResizeNodeName = graph_def.node[i].name + if graph_def.node[i].name == 'SecondStageBoxPredictor/Reshape': + addConstNode('SecondStageBoxPredictor/Reshape/shape2', [1, -1, 4], graph_def) + graph_def.node[i].input.pop() + graph_def.node[i].input.append('SecondStageBoxPredictor/Reshape/shape2') + if graph_def.node[i].name in ['SecondStageBoxPredictor/Flatten/flatten/Shape', 'SecondStageBoxPredictor/Flatten/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten/flatten/Reshape/shape', 'SecondStageBoxPredictor/Flatten_1/flatten/Shape', 'SecondStageBoxPredictor/Flatten_1/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape/shape']: + del graph_def.node[i] + for node in graph_def.node: + if node.name == 'SecondStageBoxPredictor/Flatten/flatten/Reshape' or node.name == 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape': + node.op = 'Flatten' + node.input.pop() + if node.name in ['FirstStageBoxPredictor/BoxEncodingPredictor/Conv2D', 'SecondStageBoxPredictor/BoxEncodingPredictor/MatMul']: + node.addAttr('loc_pred_transposed', True) + if node.name.startswith('MaxPool2D'): + assert node.op == 'MaxPool' + assert cropAndResizeNodeName + node.input = [cropAndResizeNodeName] + addSlice('detection_out/clip_by_value', 'detection_out/slice', [0, 0, 0, 3], [-1, -1, -1, 4], graph_def) + variance = NodeDef() + variance.name = 'proposals/variance' + variance.op = 'Const' + variance.addAttr('value', [0.1, 0.1, 0.2, 0.2]) + graph_def.node.extend([variance]) + varianceEncoder = NodeDef() + varianceEncoder.name = 'variance_encoded' + varianceEncoder.op = 'Mul' + varianceEncoder.input.append('SecondStageBoxPredictor/Reshape') + varianceEncoder.input.append(variance.name) + varianceEncoder.addAttr('axis', 2) + graph_def.node.extend([varianceEncoder]) + addReshape('detection_out/slice', 'detection_out/slice/reshape', [1, 1, -1], graph_def) + addFlatten('variance_encoded', 'variance_encoded/flatten', graph_def) + detectionOut = NodeDef() + detectionOut.name = 'detection_out_final' + detectionOut.op = 'DetectionOutput' + detectionOut.input.append('variance_encoded/flatten') + detectionOut.input.append('SecondStageBoxPredictor/Reshape_1/Reshape') + detectionOut.input.append('detection_out/slice/reshape') + detectionOut.addAttr('num_classes', num_classes) + detectionOut.addAttr('share_location', False) + detectionOut.addAttr('background_label_id', num_classes + 1) + detectionOut.addAttr('nms_threshold', 0.6) + detectionOut.addAttr('code_type', 'CENTER_SIZE') + detectionOut.addAttr('keep_top_k', 100) + detectionOut.addAttr('clip', True) + detectionOut.addAttr('variance_encoded_in_target', True) + graph_def.node.extend([detectionOut]) + + def getUnconnectedNodes(): + unconnected = [node.name for node in graph_def.node] + for node in graph_def.node: + for inp in node.input: + if inp in unconnected: + unconnected.remove(inp) + return unconnected + while True: + unconnectedNodes = getUnconnectedNodes() + unconnectedNodes.remove(detectionOut.name) + if not unconnectedNodes: + break + for name in unconnectedNodes: + for i in range(len(graph_def.node)): + if graph_def.node[i].name == name: + del graph_def.node[i] + break + graph_def.save(outputPath) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run this script to get a text graph of Faster-RCNN model from TensorFlow Object Detection API. Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') + parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') + parser.add_argument('--output', required=True, help='Path to output text graph.') + parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') + args = parser.parse_args() + createFasterRCNNGraph(args.input, args.config, args.output) + +# File: opencv-master/samples/dnn/tf_text_graph_mask_rcnn.py +import argparse +import numpy as np +from tf_text_graph_common import * +parser = argparse.ArgumentParser(description='Run this script to get a text graph of Mask-RCNN model from TensorFlow Object Detection API. Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') +parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') +parser.add_argument('--output', required=True, help='Path to output text graph.') +parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') +args = parser.parse_args() +scopesToKeep = ('FirstStageFeatureExtractor', 'Conv', 'FirstStageBoxPredictor/BoxEncodingPredictor', 'FirstStageBoxPredictor/ClassPredictor', 'CropAndResize', 'MaxPool2D', 'SecondStageFeatureExtractor', 'SecondStageBoxPredictor', 'Preprocessor/sub', 'Preprocessor/mul', 'image_tensor') +scopesToIgnore = ('FirstStageFeatureExtractor/Assert', 'FirstStageFeatureExtractor/Shape', 'FirstStageFeatureExtractor/strided_slice', 'FirstStageFeatureExtractor/GreaterEqual', 'FirstStageFeatureExtractor/LogicalAnd', 'Conv/required_space_to_batch_paddings') +config = readTextMessage(args.config) +config = config['model'][0]['faster_rcnn'][0] +num_classes = int(config['num_classes'][0]) +grid_anchor_generator = config['first_stage_anchor_generator'][0]['grid_anchor_generator'][0] +scales = [float(s) for s in grid_anchor_generator['scales']] +aspect_ratios = [float(ar) for ar in grid_anchor_generator['aspect_ratios']] +width_stride = float(grid_anchor_generator['width_stride'][0]) +height_stride = float(grid_anchor_generator['height_stride'][0]) +features_stride = float(config['feature_extractor'][0]['first_stage_features_stride'][0]) +first_stage_nms_iou_threshold = float(config['first_stage_nms_iou_threshold'][0]) +first_stage_max_proposals = int(config['first_stage_max_proposals'][0]) +print('Number of classes: %d' % num_classes) +print('Scales: %s' % str(scales)) +print('Aspect ratios: %s' % str(aspect_ratios)) +print('Width stride: %f' % width_stride) +print('Height stride: %f' % height_stride) +print('Features stride: %f' % features_stride) +writeTextGraph(args.input, args.output, ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes', 'detection_masks']) +graph_def = parseTextGraph(args.output) +removeIdentity(graph_def) +nodesToKeep = [] + +def to_remove(name, op): + if name in nodesToKeep: + return False + return op == 'Const' or name.startswith(scopesToIgnore) or (not name.startswith(scopesToKeep)) or (name.startswith('CropAndResize') and op != 'CropAndResize') +nodesMap = {node.name: node for node in graph_def.node} +for node in reversed(graph_def.node): + if node.op == 'BatchToSpaceND': + del node.input[2] + conv = nodesMap[node.input[0]] + spaceToBatchND = nodesMap[conv.input[0]] + paddingsNode = NodeDef() + paddingsNode.name = conv.name + '/paddings' + paddingsNode.op = 'Const' + paddingsNode.addAttr('value', [2, 2, 2, 2]) + graph_def.node.insert(graph_def.node.index(spaceToBatchND), paddingsNode) + nodesToKeep.append(paddingsNode.name) + spaceToBatchND.input[2] = paddingsNode.name +removeUnusedNodesAndAttrs(to_remove, graph_def) +assert graph_def.node[0].op == 'Placeholder' +graph_def.node[1].input.insert(0, graph_def.node[0].name) +topNodes = [] +numCropAndResize = 0 +while True: + node = graph_def.node.pop() + topNodes.append(node) + if node.op == 'CropAndResize': + numCropAndResize += 1 + if numCropAndResize == 2: + break +addReshape('FirstStageBoxPredictor/ClassPredictor/BiasAdd', 'FirstStageBoxPredictor/ClassPredictor/reshape_1', [0, -1, 2], graph_def) +addSoftMax('FirstStageBoxPredictor/ClassPredictor/reshape_1', 'FirstStageBoxPredictor/ClassPredictor/softmax', graph_def) +addFlatten('FirstStageBoxPredictor/ClassPredictor/softmax', 'FirstStageBoxPredictor/ClassPredictor/softmax/flatten', graph_def) +addFlatten('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd', 'FirstStageBoxPredictor/BoxEncodingPredictor/flatten', graph_def) +proposals = NodeDef() +proposals.name = 'proposals' +proposals.op = 'PriorBox' +proposals.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/BiasAdd') +proposals.input.append(graph_def.node[0].name) +proposals.addAttr('flip', False) +proposals.addAttr('clip', True) +proposals.addAttr('step', features_stride) +proposals.addAttr('offset', 0.0) +proposals.addAttr('variance', [0.1, 0.1, 0.2, 0.2]) +widths = [] +heights = [] +for a in aspect_ratios: + for s in scales: + ar = np.sqrt(a) + heights.append(height_stride ** 2 * s / ar) + widths.append(width_stride ** 2 * s * ar) +proposals.addAttr('width', widths) +proposals.addAttr('height', heights) +graph_def.node.extend([proposals]) +detectionOut = NodeDef() +detectionOut.name = 'detection_out' +detectionOut.op = 'DetectionOutput' +detectionOut.input.append('FirstStageBoxPredictor/BoxEncodingPredictor/flatten') +detectionOut.input.append('FirstStageBoxPredictor/ClassPredictor/softmax/flatten') +detectionOut.input.append('proposals') +detectionOut.addAttr('num_classes', 2) +detectionOut.addAttr('share_location', True) +detectionOut.addAttr('background_label_id', 0) +detectionOut.addAttr('nms_threshold', first_stage_nms_iou_threshold) +detectionOut.addAttr('top_k', 6000) +detectionOut.addAttr('code_type', 'CENTER_SIZE') +detectionOut.addAttr('keep_top_k', first_stage_max_proposals) +detectionOut.addAttr('clip', True) +graph_def.node.extend([detectionOut]) +cropAndResizeNodesNames = [] +for node in reversed(topNodes): + if node.op != 'CropAndResize': + graph_def.node.extend([node]) + topNodes.pop() + else: + cropAndResizeNodesNames.append(node.name) + if numCropAndResize == 1: + break + else: + graph_def.node.extend([node]) + topNodes.pop() + numCropAndResize -= 1 +addSoftMax('SecondStageBoxPredictor/Reshape_1', 'SecondStageBoxPredictor/Reshape_1/softmax', graph_def) +addSlice('SecondStageBoxPredictor/Reshape_1/softmax', 'SecondStageBoxPredictor/Reshape_1/slice', [0, 0, 1], [-1, -1, -1], graph_def) +addReshape('SecondStageBoxPredictor/Reshape_1/slice', 'SecondStageBoxPredictor/Reshape_1/Reshape', [1, -1], graph_def) +for i in reversed(range(len(graph_def.node))): + if graph_def.node[i].op == 'CropAndResize': + graph_def.node[i].input.insert(1, 'detection_out') + if graph_def.node[i].name == 'SecondStageBoxPredictor/Reshape': + addConstNode('SecondStageBoxPredictor/Reshape/shape2', [1, -1, 4], graph_def) + graph_def.node[i].input.pop() + graph_def.node[i].input.append('SecondStageBoxPredictor/Reshape/shape2') + if graph_def.node[i].name in ['SecondStageBoxPredictor/Flatten/flatten/Shape', 'SecondStageBoxPredictor/Flatten/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten/flatten/Reshape/shape', 'SecondStageBoxPredictor/Flatten_1/flatten/Shape', 'SecondStageBoxPredictor/Flatten_1/flatten/strided_slice', 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape/shape']: + del graph_def.node[i] +for node in graph_def.node: + if node.name == 'SecondStageBoxPredictor/Flatten/flatten/Reshape' or node.name == 'SecondStageBoxPredictor/Flatten_1/flatten/Reshape': + node.op = 'Flatten' + node.input.pop() + if node.name in ['FirstStageBoxPredictor/BoxEncodingPredictor/Conv2D', 'SecondStageBoxPredictor/BoxEncodingPredictor/MatMul']: + node.addAttr('loc_pred_transposed', True) + if node.name.startswith('MaxPool2D'): + assert node.op == 'MaxPool' + assert len(cropAndResizeNodesNames) == 2 + node.input = [cropAndResizeNodesNames[0]] + del cropAndResizeNodesNames[0] +addSlice('detection_out', 'detection_out/slice', [0, 0, 0, 3], [-1, -1, -1, 4], graph_def) +variance = NodeDef() +variance.name = 'proposals/variance' +variance.op = 'Const' +variance.addAttr('value', [0.1, 0.1, 0.2, 0.2]) +graph_def.node.extend([variance]) +varianceEncoder = NodeDef() +varianceEncoder.name = 'variance_encoded' +varianceEncoder.op = 'Mul' +varianceEncoder.input.append('SecondStageBoxPredictor/Reshape') +varianceEncoder.input.append(variance.name) +varianceEncoder.addAttr('axis', 2) +graph_def.node.extend([varianceEncoder]) +addReshape('detection_out/slice', 'detection_out/slice/reshape', [1, 1, -1], graph_def) +addFlatten('variance_encoded', 'variance_encoded/flatten', graph_def) +detectionOut = NodeDef() +detectionOut.name = 'detection_out_final' +detectionOut.op = 'DetectionOutput' +detectionOut.input.append('variance_encoded/flatten') +detectionOut.input.append('SecondStageBoxPredictor/Reshape_1/Reshape') +detectionOut.input.append('detection_out/slice/reshape') +detectionOut.addAttr('num_classes', num_classes) +detectionOut.addAttr('share_location', False) +detectionOut.addAttr('background_label_id', num_classes + 1) +detectionOut.addAttr('nms_threshold', 0.6) +detectionOut.addAttr('code_type', 'CENTER_SIZE') +detectionOut.addAttr('keep_top_k', 100) +detectionOut.addAttr('clip', True) +detectionOut.addAttr('variance_encoded_in_target', True) +detectionOut.addAttr('confidence_threshold', 0.3) +detectionOut.addAttr('group_by_classes', False) +graph_def.node.extend([detectionOut]) +for node in reversed(topNodes): + graph_def.node.extend([node]) + if node.name.startswith('MaxPool2D'): + assert node.op == 'MaxPool' + assert len(cropAndResizeNodesNames) == 1 + node.input = [cropAndResizeNodesNames[0]] +for i in reversed(range(len(graph_def.node))): + if graph_def.node[i].op == 'CropAndResize': + graph_def.node[i].input.insert(1, 'detection_out_final') + break +graph_def.node[-1].name = 'detection_masks' +graph_def.node[-1].op = 'Sigmoid' +graph_def.node[-1].input.pop() + +def getUnconnectedNodes(): + unconnected = [node.name for node in graph_def.node] + for node in graph_def.node: + for inp in node.input: + if inp in unconnected: + unconnected.remove(inp) + return unconnected +while True: + unconnectedNodes = getUnconnectedNodes() + unconnectedNodes.remove(graph_def.node[-1].name) + if not unconnectedNodes: + break + for name in unconnectedNodes: + for i in range(len(graph_def.node)): + if graph_def.node[i].name == name: + del graph_def.node[i] + break +graph_def.save(args.output) + +# File: opencv-master/samples/dnn/tf_text_graph_ssd.py +import argparse +import re +from math import sqrt +from tf_text_graph_common import * + +class SSDAnchorGenerator: + + def __init__(self, min_scale, max_scale, num_layers, aspect_ratios, reduce_boxes_in_lowest_layer, image_width, image_height): + self.min_scale = min_scale + self.aspect_ratios = aspect_ratios + self.reduce_boxes_in_lowest_layer = reduce_boxes_in_lowest_layer + self.image_width = image_width + self.image_height = image_height + self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [1.0] + + def get(self, layer_id): + if layer_id == 0 and self.reduce_boxes_in_lowest_layer: + widths = [0.1, self.min_scale * sqrt(2.0), self.min_scale * sqrt(0.5)] + heights = [0.1, self.min_scale / sqrt(2.0), self.min_scale / sqrt(0.5)] + else: + widths = [self.scales[layer_id] * sqrt(ar) for ar in self.aspect_ratios] + heights = [self.scales[layer_id] / sqrt(ar) for ar in self.aspect_ratios] + widths += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])] + heights += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])] + min_size = min(self.image_width, self.image_height) + widths = [w * min_size for w in widths] + heights = [h * min_size for h in heights] + return (widths, heights) + +class MultiscaleAnchorGenerator: + + def __init__(self, min_level, aspect_ratios, scales_per_octave, anchor_scale): + self.min_level = min_level + self.aspect_ratios = aspect_ratios + self.anchor_scale = anchor_scale + self.scales = [2 ** (float(s) / scales_per_octave) for s in range(scales_per_octave)] + + def get(self, layer_id): + widths = [] + heights = [] + for a in self.aspect_ratios: + for s in self.scales: + base_anchor_size = 2 ** (self.min_level + layer_id) * self.anchor_scale + ar = sqrt(a) + heights.append(base_anchor_size * s / ar) + widths.append(base_anchor_size * s * ar) + return (widths, heights) + +def createSSDGraph(modelPath, configPath, outputPath): + keepOps = ['Conv2D', 'BiasAdd', 'Add', 'AddV2', 'Relu', 'Relu6', 'Placeholder', 'FusedBatchNorm', 'DepthwiseConv2dNative', 'ConcatV2', 'Mul', 'MaxPool', 'AvgPool', 'Identity', 'Sub', 'ResizeNearestNeighbor', 'Pad', 'FusedBatchNormV3', 'Mean'] + prefixesToRemove = ('MultipleGridAnchorGenerator/', 'Concatenate/', 'Postprocessor/', 'Preprocessor/map') + config = readTextMessage(configPath) + config = config['model'][0]['ssd'][0] + num_classes = int(config['num_classes'][0]) + fixed_shape_resizer = config['image_resizer'][0]['fixed_shape_resizer'][0] + image_width = int(fixed_shape_resizer['width'][0]) + image_height = int(fixed_shape_resizer['height'][0]) + box_predictor = 'convolutional' if 'convolutional_box_predictor' in config['box_predictor'][0] else 'weight_shared_convolutional' + anchor_generator = config['anchor_generator'][0] + if 'ssd_anchor_generator' in anchor_generator: + ssd_anchor_generator = anchor_generator['ssd_anchor_generator'][0] + min_scale = float(ssd_anchor_generator['min_scale'][0]) + max_scale = float(ssd_anchor_generator['max_scale'][0]) + num_layers = int(ssd_anchor_generator['num_layers'][0]) + aspect_ratios = [float(ar) for ar in ssd_anchor_generator['aspect_ratios']] + reduce_boxes_in_lowest_layer = True + if 'reduce_boxes_in_lowest_layer' in ssd_anchor_generator: + reduce_boxes_in_lowest_layer = ssd_anchor_generator['reduce_boxes_in_lowest_layer'][0] == 'true' + priors_generator = SSDAnchorGenerator(min_scale, max_scale, num_layers, aspect_ratios, reduce_boxes_in_lowest_layer, image_width, image_height) + print('Scale: [%f-%f]' % (min_scale, max_scale)) + print('Aspect ratios: %s' % str(aspect_ratios)) + print('Reduce boxes in the lowest layer: %s' % str(reduce_boxes_in_lowest_layer)) + elif 'multiscale_anchor_generator' in anchor_generator: + multiscale_anchor_generator = anchor_generator['multiscale_anchor_generator'][0] + min_level = int(multiscale_anchor_generator['min_level'][0]) + max_level = int(multiscale_anchor_generator['max_level'][0]) + anchor_scale = float(multiscale_anchor_generator['anchor_scale'][0]) + aspect_ratios = [float(ar) for ar in multiscale_anchor_generator['aspect_ratios']] + scales_per_octave = int(multiscale_anchor_generator['scales_per_octave'][0]) + num_layers = max_level - min_level + 1 + priors_generator = MultiscaleAnchorGenerator(min_level, aspect_ratios, scales_per_octave, anchor_scale) + print('Levels: [%d-%d]' % (min_level, max_level)) + print('Anchor scale: %f' % anchor_scale) + print('Scales per octave: %d' % scales_per_octave) + print('Aspect ratios: %s' % str(aspect_ratios)) + else: + print('Unknown anchor_generator') + exit(0) + print('Number of classes: %d' % num_classes) + print('Number of layers: %d' % num_layers) + print('box predictor: %s' % box_predictor) + print('Input image size: %dx%d' % (image_width, image_height)) + outNames = ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes'] + writeTextGraph(modelPath, outputPath, outNames) + graph_def = parseTextGraph(outputPath) + + def getUnconnectedNodes(): + unconnected = [] + for node in graph_def.node: + unconnected.append(node.name) + for inp in node.input: + if inp in unconnected: + unconnected.remove(inp) + return unconnected + + def fuse_nodes(nodesToKeep): + nodesMap = {node.name: node for node in graph_def.node} + subgraphBatchNorm = ['Add', ['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']], ['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]] + subgraphBatchNormV2 = ['AddV2', ['Mul', 'input', ['Mul', ['Rsqrt', ['AddV2', 'moving_variance', 'add_y']], 'gamma']], ['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]] + subgraphResizeNN = ['Reshape', ['Mul', ['Reshape', 'input', ['Pack', 'shape_1', 'shape_2', 'shape_3', 'shape_4', 'shape_5']], 'ones'], ['Pack', ['StridedSlice', ['Shape', 'input'], 'stack', 'stack_1', 'stack_2'], 'out_height', 'out_width', 'out_channels']] + + def checkSubgraph(node, targetNode, inputs, fusedNodes): + op = targetNode[0] + if node.op == op and len(node.input) >= len(targetNode) - 1: + fusedNodes.append(node) + for (i, inpOp) in enumerate(targetNode[1:]): + if isinstance(inpOp, list): + if not node.input[i] in nodesMap or not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes): + return False + else: + inputs[inpOp] = node.input[i] + return True + else: + return False + nodesToRemove = [] + for node in graph_def.node: + inputs = {} + fusedNodes = [] + if checkSubgraph(node, subgraphBatchNorm, inputs, fusedNodes) or checkSubgraph(node, subgraphBatchNormV2, inputs, fusedNodes): + name = node.name + node.Clear() + node.name = name + node.op = 'FusedBatchNorm' + node.input.append(inputs['input']) + node.input.append(inputs['gamma']) + node.input.append(inputs['beta']) + node.input.append(inputs['moving_mean']) + node.input.append(inputs['moving_variance']) + node.addAttr('epsilon', 0.001) + nodesToRemove += fusedNodes[1:] + inputs = {} + fusedNodes = [] + if checkSubgraph(node, subgraphResizeNN, inputs, fusedNodes): + name = node.name + node.Clear() + node.name = name + node.op = 'ResizeNearestNeighbor' + node.input.append(inputs['input']) + node.input.append(name + '/output_shape') + out_height_node = nodesMap[inputs['out_height']] + out_width_node = nodesMap[inputs['out_width']] + out_height = int(out_height_node.attr['value']['tensor'][0]['int_val'][0]) + out_width = int(out_width_node.attr['value']['tensor'][0]['int_val'][0]) + shapeNode = NodeDef() + shapeNode.name = name + '/output_shape' + shapeNode.op = 'Const' + shapeNode.addAttr('value', [out_height, out_width]) + graph_def.node.insert(graph_def.node.index(node), shapeNode) + nodesToKeep.append(shapeNode.name) + nodesToRemove += fusedNodes[1:] + for node in nodesToRemove: + graph_def.node.remove(node) + nodesToKeep = [] + fuse_nodes(nodesToKeep) + removeIdentity(graph_def) + + def to_remove(name, op): + return not name in nodesToKeep and (op == 'Const' or not op in keepOps or name.startswith(prefixesToRemove)) + removeUnusedNodesAndAttrs(to_remove, graph_def) + assert graph_def.node[0].op == 'Placeholder' + try: + input_shape = graph_def.node[0].attr['shape']['shape'][0]['dim'] + input_shape[1]['size'] = image_height + input_shape[2]['size'] = image_width + except: + print('Input shapes are undefined') + weights = graph_def.node[1].input[-1] + for i in range(len(graph_def.node[1].input)): + graph_def.node[1].input.pop() + graph_def.node[1].input.append(graph_def.node[0].name) + graph_def.node[1].input.append(weights) + preproc_id = 'Preprocessor/' + if graph_def.node[2].name.startswith(preproc_id) and graph_def.node[2].input[0].startswith(preproc_id): + if not any((preproc_id in inp for inp in graph_def.node[3].input)): + graph_def.node[3].input.insert(0, graph_def.node[2].name) + + def addConcatNode(name, inputs, axisNodeName): + concat = NodeDef() + concat.name = name + concat.op = 'ConcatV2' + for inp in inputs: + concat.input.append(inp) + concat.input.append(axisNodeName) + graph_def.node.extend([concat]) + addConstNode('concat/axis_flatten', [-1], graph_def) + addConstNode('PriorBox/concat/axis', [-2], graph_def) + for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor == 'convolutional' else 'BoxPredictor']: + concatInputs = [] + for i in range(num_layers): + flatten = NodeDef() + if box_predictor == 'convolutional': + inpName = 'BoxPredictor_%d/%s/BiasAdd' % (i, label) + elif i == 0: + inpName = 'WeightSharedConvolutionalBoxPredictor/%s/BiasAdd' % label + else: + inpName = 'WeightSharedConvolutionalBoxPredictor_%d/%s/BiasAdd' % (i, label) + flatten.input.append(inpName) + flatten.name = inpName + '/Flatten' + flatten.op = 'Flatten' + concatInputs.append(flatten.name) + graph_def.node.extend([flatten]) + addConcatNode('%s/concat' % label, concatInputs, 'concat/axis_flatten') + num_matched_layers = 0 + for node in graph_def.node: + if re.match('BoxPredictor_\\d/BoxEncodingPredictor/convolution', node.name) or re.match('BoxPredictor_\\d/BoxEncodingPredictor/Conv2D', node.name) or re.match('WeightSharedConvolutionalBoxPredictor(_\\d)*/BoxPredictor/Conv2D', node.name): + node.addAttr('loc_pred_transposed', True) + num_matched_layers += 1 + assert num_matched_layers == num_layers + priorBoxes = [] + boxCoder = config['box_coder'][0] + fasterRcnnBoxCoder = boxCoder['faster_rcnn_box_coder'][0] + boxCoderVariance = [1.0 / float(fasterRcnnBoxCoder['x_scale'][0]), 1.0 / float(fasterRcnnBoxCoder['y_scale'][0]), 1.0 / float(fasterRcnnBoxCoder['width_scale'][0]), 1.0 / float(fasterRcnnBoxCoder['height_scale'][0])] + for i in range(num_layers): + priorBox = NodeDef() + priorBox.name = 'PriorBox_%d' % i + priorBox.op = 'PriorBox' + if box_predictor == 'convolutional': + priorBox.input.append('BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % i) + elif i == 0: + priorBox.input.append('WeightSharedConvolutionalBoxPredictor/BoxPredictor/Conv2D') + else: + priorBox.input.append('WeightSharedConvolutionalBoxPredictor_%d/BoxPredictor/BiasAdd' % i) + priorBox.input.append(graph_def.node[0].name) + priorBox.addAttr('flip', False) + priorBox.addAttr('clip', False) + (widths, heights) = priors_generator.get(i) + priorBox.addAttr('width', widths) + priorBox.addAttr('height', heights) + priorBox.addAttr('variance', boxCoderVariance) + graph_def.node.extend([priorBox]) + priorBoxes.append(priorBox.name) + addConcatNode('PriorBox/concat', priorBoxes, 'concat/axis_flatten') + addReshape('ClassPredictor/concat', 'ClassPredictor/concat3d', [0, -1, num_classes + 1], graph_def) + sigmoid = NodeDef() + sigmoid.name = 'ClassPredictor/concat/sigmoid' + sigmoid.op = 'Sigmoid' + sigmoid.input.append('ClassPredictor/concat3d') + graph_def.node.extend([sigmoid]) + addFlatten(sigmoid.name, sigmoid.name + '/Flatten', graph_def) + detectionOut = NodeDef() + detectionOut.name = 'detection_out' + detectionOut.op = 'DetectionOutput' + if box_predictor == 'convolutional': + detectionOut.input.append('BoxEncodingPredictor/concat') + else: + detectionOut.input.append('BoxPredictor/concat') + detectionOut.input.append(sigmoid.name + '/Flatten') + detectionOut.input.append('PriorBox/concat') + detectionOut.addAttr('num_classes', num_classes + 1) + detectionOut.addAttr('share_location', True) + detectionOut.addAttr('background_label_id', 0) + postProcessing = config['post_processing'][0] + batchNMS = postProcessing['batch_non_max_suppression'][0] + if 'iou_threshold' in batchNMS: + detectionOut.addAttr('nms_threshold', float(batchNMS['iou_threshold'][0])) + else: + detectionOut.addAttr('nms_threshold', 0.6) + if 'score_threshold' in batchNMS: + detectionOut.addAttr('confidence_threshold', float(batchNMS['score_threshold'][0])) + else: + detectionOut.addAttr('confidence_threshold', 0.01) + if 'max_detections_per_class' in batchNMS: + detectionOut.addAttr('top_k', int(batchNMS['max_detections_per_class'][0])) + else: + detectionOut.addAttr('top_k', 100) + if 'max_total_detections' in batchNMS: + detectionOut.addAttr('keep_top_k', int(batchNMS['max_total_detections'][0])) + else: + detectionOut.addAttr('keep_top_k', 100) + detectionOut.addAttr('code_type', 'CENTER_SIZE') + graph_def.node.extend([detectionOut]) + while True: + unconnectedNodes = getUnconnectedNodes() + unconnectedNodes.remove(detectionOut.name) + if not unconnectedNodes: + break + for name in unconnectedNodes: + for i in range(len(graph_def.node)): + if graph_def.node[i].name == name: + del graph_def.node[i] + break + graph_def.save(outputPath) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run this script to get a text graph of SSD model from TensorFlow Object Detection API. Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.') + parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.') + parser.add_argument('--output', required=True, help='Path to output text graph.') + parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.') + args = parser.parse_args() + createSSDGraph(args.input, args.config, args.output) + +# File: opencv-master/samples/dnn/virtual_try_on.py +"""""" +import argparse +import os.path +import numpy as np +import cv2 as cv +from numpy import linalg +from common import findFile +from human_parsing import parse_human +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) +parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument('--input_image', '-i', required=True, help='Path to image with person.') +parser.add_argument('--input_cloth', '-c', required=True, help='Path to target cloth image') +parser.add_argument('--gmm_model', '-gmm', default='cp_vton_gmm.onnx', help='Path to Geometric Matching Module .onnx model.') +parser.add_argument('--tom_model', '-tom', default='cp_vton_tom.onnx', help='Path to Try-On Module .onnx model.') +parser.add_argument('--segmentation_model', default='lip_jppnet_384.pb', help='Path to cloth segmentation .pb model.') +parser.add_argument('--openpose_proto', default='openpose_pose_coco.prototxt', help='Path to OpenPose .prototxt model was trained on COCO dataset.') +parser.add_argument('--openpose_model', default='openpose_pose_coco.caffemodel', help='Path to OpenPose .caffemodel model was trained on COCO dataset.') +parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Halide language (http://halide-lang.org/), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) +parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: NCS2 VPU, %d: HDDL VPU, %d: Vulkan, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) +(args, _) = parser.parse_known_args() + +def get_pose_map(image, proto_path, model_path, backend, target, height=256, width=192): + radius = 5 + inp = cv.dnn.blobFromImage(image, 1.0 / 255, (width, height)) + net = cv.dnn.readNet(proto_path, model_path) + net.setPreferableBackend(backend) + net.setPreferableTarget(target) + net.setInput(inp) + out = net.forward() + threshold = 0.1 + (_, out_c, out_h, out_w) = out.shape + pose_map = np.zeros((height, width, out_c - 1)) + for i in range(0, out.shape[1] - 1): + heatMap = out[0, i, :, :] + keypoint = np.full((height, width), -1) + (_, conf, _, point) = cv.minMaxLoc(heatMap) + x = width * point[0] // out_w + y = height * point[1] // out_h + if conf > threshold and x > 0 and (y > 0): + keypoint[y - radius:y + radius, x - radius:x + radius] = 1 + pose_map[:, :, i] = keypoint + pose_map = pose_map.transpose(2, 0, 1) + return pose_map + +class BilinearFilter(object): + + def _precompute_coeffs(self, inSize, outSize): + filterscale = max(1.0, inSize / outSize) + ksize = int(np.ceil(filterscale)) * 2 + 1 + kk = np.zeros(shape=(outSize * ksize,), dtype=np.float32) + bounds = np.empty(shape=(outSize * 2,), dtype=np.int32) + centers = (np.arange(outSize) + 0.5) * filterscale + 0.5 + bounds[::2] = np.where(centers - filterscale < 0, 0, centers - filterscale) + bounds[1::2] = np.where(centers + filterscale > inSize, inSize, centers + filterscale) - bounds[::2] + xmins = bounds[::2] - centers + 1 + points = np.array([np.arange(row) + xmins[i] for (i, row) in enumerate(bounds[1::2])]) / filterscale + for xx in range(0, outSize): + point = points[xx] + bilinear = np.where(point < 1.0, 1.0 - abs(point), 0.0) + ww = np.sum(bilinear) + kk[xx * ksize:xx * ksize + bilinear.size] = np.where(ww == 0.0, bilinear, bilinear / ww) + return (bounds, kk, ksize) + + def _resample_horizontal(self, out, img, ksize, bounds, kk): + for yy in range(0, out.shape[0]): + for xx in range(0, out.shape[1]): + xmin = bounds[xx * 2 + 0] + xmax = bounds[xx * 2 + 1] + k = kk[xx * ksize:xx * ksize + xmax] + out[yy, xx] = np.round(np.sum(img[yy, xmin:xmin + xmax] * k)) + + def _resample_vertical(self, out, img, ksize, bounds, kk): + for yy in range(0, out.shape[0]): + ymin = bounds[yy * 2 + 0] + ymax = bounds[yy * 2 + 1] + k = kk[yy * ksize:yy * ksize + ymax] + out[yy] = np.round(np.sum(img[ymin:ymin + ymax, 0:out.shape[1]] * k[:, np.newaxis], axis=0)) + + def imaging_resample(self, img, xsize, ysize): + (height, width) = img.shape[0:2] + (bounds_horiz, kk_horiz, ksize_horiz) = self._precompute_coeffs(width, xsize) + (bounds_vert, kk_vert, ksize_vert) = self._precompute_coeffs(height, ysize) + out_hor = np.empty((img.shape[0], xsize), dtype=np.uint8) + self._resample_horizontal(out_hor, img, ksize_horiz, bounds_horiz, kk_horiz) + out = np.empty((ysize, xsize), dtype=np.uint8) + self._resample_vertical(out, out_hor, ksize_vert, bounds_vert, kk_vert) + return out + +class CpVton(object): + + def __init__(self, gmm_model, tom_model, backend, target): + super(CpVton, self).__init__() + self.gmm_net = cv.dnn.readNet(gmm_model) + self.tom_net = cv.dnn.readNet(tom_model) + self.gmm_net.setPreferableBackend(backend) + self.gmm_net.setPreferableTarget(target) + self.tom_net.setPreferableBackend(backend) + self.tom_net.setPreferableTarget(target) + + def prepare_agnostic(self, segm_image, input_image, pose_map, height=256, width=192): + palette = {'Background': (0, 0, 0), 'Hat': (128, 0, 0), 'Hair': (255, 0, 0), 'Glove': (0, 85, 0), 'Sunglasses': (170, 0, 51), 'UpperClothes': (255, 85, 0), 'Dress': (0, 0, 85), 'Coat': (0, 119, 221), 'Socks': (85, 85, 0), 'Pants': (0, 85, 85), 'Jumpsuits': (85, 51, 0), 'Scarf': (52, 86, 128), 'Skirt': (0, 128, 0), 'Face': (0, 0, 255), 'Left-arm': (51, 170, 221), 'Right-arm': (0, 255, 255), 'Left-leg': (85, 255, 170), 'Right-leg': (170, 255, 85), 'Left-shoe': (255, 255, 0), 'Right-shoe': (255, 170, 0)} + color2label = {val: key for (key, val) in palette.items()} + head_labels = ['Hat', 'Hair', 'Sunglasses', 'Face', 'Pants', 'Skirt'] + segm_image = cv.cvtColor(segm_image, cv.COLOR_BGR2RGB) + phead = np.zeros((1, height, width), dtype=np.float32) + pose_shape = np.zeros((height, width), dtype=np.uint8) + for r in range(height): + for c in range(width): + pixel = tuple(segm_image[r, c]) + if tuple(pixel) in color2label: + if color2label[pixel] in head_labels: + phead[0, r, c] = 1 + if color2label[pixel] != 'Background': + pose_shape[r, c] = 255 + input_image = cv.dnn.blobFromImage(input_image, 1.0 / 127.5, (width, height), mean=(127.5, 127.5, 127.5), swapRB=True) + input_image = input_image.squeeze(0) + img_head = input_image * phead - (1 - phead) + downsample = BilinearFilter() + down = downsample.imaging_resample(pose_shape, width // 16, height // 16) + res_shape = cv.resize(down, (width, height), cv.INTER_LINEAR) + res_shape = cv.dnn.blobFromImage(res_shape, 1.0 / 127.5, mean=(127.5, 127.5, 127.5), swapRB=True) + res_shape = res_shape.squeeze(0) + agnostic = np.concatenate((res_shape, img_head, pose_map), axis=0) + agnostic = np.expand_dims(agnostic, axis=0) + return agnostic.astype(np.float32) + + def get_warped_cloth(self, cloth_img, agnostic, height=256, width=192): + cloth = cv.dnn.blobFromImage(cloth_img, 1.0 / 127.5, (width, height), mean=(127.5, 127.5, 127.5), swapRB=True) + self.gmm_net.setInput(agnostic, 'input.1') + self.gmm_net.setInput(cloth, 'input.18') + theta = self.gmm_net.forward() + grid = self._generate_grid(theta) + warped_cloth = self._bilinear_sampler(cloth, grid).astype(np.float32) + return warped_cloth + + def get_tryon(self, agnostic, warp_cloth): + inp = np.concatenate([agnostic, warp_cloth], axis=1) + self.tom_net.setInput(inp) + out = self.tom_net.forward() + (p_rendered, m_composite) = np.split(out, [3], axis=1) + p_rendered = np.tanh(p_rendered) + m_composite = 1 / (1 + np.exp(-m_composite)) + p_tryon = warp_cloth * m_composite + p_rendered * (1 - m_composite) + rgb_p_tryon = cv.cvtColor(p_tryon.squeeze(0).transpose(1, 2, 0), cv.COLOR_BGR2RGB) + rgb_p_tryon = (rgb_p_tryon + 1) / 2 + return rgb_p_tryon + + def _compute_L_inverse(self, X, Y): + N = X.shape[0] + Xmat = np.tile(X, (1, N)) + Ymat = np.tile(Y, (1, N)) + P_dist_squared = np.power(Xmat - Xmat.transpose(1, 0), 2) + np.power(Ymat - Ymat.transpose(1, 0), 2) + P_dist_squared[P_dist_squared == 0] = 1 + K = np.multiply(P_dist_squared, np.log(P_dist_squared)) + O = np.ones([N, 1], dtype=np.float32) + Z = np.zeros([3, 3], dtype=np.float32) + P = np.concatenate([O, X, Y], axis=1) + first = np.concatenate((K, P), axis=1) + second = np.concatenate((P.transpose(1, 0), Z), axis=1) + L = np.concatenate((first, second), axis=0) + Li = linalg.inv(L) + return Li + + def _prepare_to_transform(self, out_h=256, out_w=192, grid_size=5): + (grid_X, grid_Y) = np.meshgrid(np.linspace(-1, 1, out_w), np.linspace(-1, 1, out_h)) + grid_X = np.expand_dims(np.expand_dims(grid_X, axis=0), axis=3) + grid_Y = np.expand_dims(np.expand_dims(grid_Y, axis=0), axis=3) + axis_coords = np.linspace(-1, 1, grid_size) + N = grid_size ** 2 + (P_Y, P_X) = np.meshgrid(axis_coords, axis_coords) + P_X = np.reshape(P_X, (-1, 1)) + P_Y = np.reshape(P_Y, (-1, 1)) + P_X = np.expand_dims(np.expand_dims(np.expand_dims(P_X, axis=2), axis=3), axis=4).transpose(4, 1, 2, 3, 0) + P_Y = np.expand_dims(np.expand_dims(np.expand_dims(P_Y, axis=2), axis=3), axis=4).transpose(4, 1, 2, 3, 0) + return (grid_X, grid_Y, N, P_X, P_Y) + + def _expand_torch(self, X, shape): + if len(X.shape) != len(shape): + return X.flatten().reshape(shape) + else: + axis = [1 if src == dst else dst for (src, dst) in zip(X.shape, shape)] + return np.tile(X, axis) + + def _apply_transformation(self, theta, points, N, P_X, P_Y): + if len(theta.shape) == 2: + theta = np.expand_dims(np.expand_dims(theta, axis=2), axis=3) + batch_size = theta.shape[0] + P_X_base = np.copy(P_X) + P_Y_base = np.copy(P_Y) + Li = self._compute_L_inverse(np.reshape(P_X, (N, -1)), np.reshape(P_Y, (N, -1))) + Li = np.expand_dims(Li, axis=0) + Q_X = np.squeeze(theta[:, :N, :, :], axis=3) + Q_Y = np.squeeze(theta[:, N:, :, :], axis=3) + Q_X += self._expand_torch(P_X_base, Q_X.shape) + Q_Y += self._expand_torch(P_Y_base, Q_Y.shape) + points_b = points.shape[0] + points_h = points.shape[1] + points_w = points.shape[2] + P_X = self._expand_torch(P_X, (1, points_h, points_w, 1, N)) + P_Y = self._expand_torch(P_Y, (1, points_h, points_w, 1, N)) + W_X = self._expand_torch(Li[:, :N, :N], (batch_size, N, N)) @ Q_X + W_Y = self._expand_torch(Li[:, :N, :N], (batch_size, N, N)) @ Q_Y + W_X = np.expand_dims(np.expand_dims(W_X, axis=3), axis=4).transpose(0, 4, 2, 3, 1) + W_X = np.repeat(W_X, points_h, axis=1) + W_X = np.repeat(W_X, points_w, axis=2) + W_Y = np.expand_dims(np.expand_dims(W_Y, axis=3), axis=4).transpose(0, 4, 2, 3, 1) + W_Y = np.repeat(W_Y, points_h, axis=1) + W_Y = np.repeat(W_Y, points_w, axis=2) + A_X = self._expand_torch(Li[:, N:, :N], (batch_size, 3, N)) @ Q_X + A_Y = self._expand_torch(Li[:, N:, :N], (batch_size, 3, N)) @ Q_Y + A_X = np.expand_dims(np.expand_dims(A_X, axis=3), axis=4).transpose(0, 4, 2, 3, 1) + A_X = np.repeat(A_X, points_h, axis=1) + A_X = np.repeat(A_X, points_w, axis=2) + A_Y = np.expand_dims(np.expand_dims(A_Y, axis=3), axis=4).transpose(0, 4, 2, 3, 1) + A_Y = np.repeat(A_Y, points_h, axis=1) + A_Y = np.repeat(A_Y, points_w, axis=2) + points_X_for_summation = np.expand_dims(np.expand_dims(points[:, :, :, 0], axis=3), axis=4) + points_X_for_summation = self._expand_torch(points_X_for_summation, points[:, :, :, 0].shape + (1, N)) + points_Y_for_summation = np.expand_dims(np.expand_dims(points[:, :, :, 1], axis=3), axis=4) + points_Y_for_summation = self._expand_torch(points_Y_for_summation, points[:, :, :, 0].shape + (1, N)) + if points_b == 1: + delta_X = points_X_for_summation - P_X + delta_Y = points_Y_for_summation - P_Y + else: + delta_X = points_X_for_summation - self._expand_torch(P_X, points_X_for_summation.shape) + delta_Y = points_Y_for_summation - self._expand_torch(P_Y, points_Y_for_summation.shape) + dist_squared = np.power(delta_X, 2) + np.power(delta_Y, 2) + dist_squared[dist_squared == 0] = 1 + U = np.multiply(dist_squared, np.log(dist_squared)) + points_X_batch = np.expand_dims(points[:, :, :, 0], axis=3) + points_Y_batch = np.expand_dims(points[:, :, :, 1], axis=3) + if points_b == 1: + points_X_batch = self._expand_torch(points_X_batch, (batch_size,) + points_X_batch.shape[1:]) + points_Y_batch = self._expand_torch(points_Y_batch, (batch_size,) + points_Y_batch.shape[1:]) + points_X_prime = A_X[:, :, :, :, 0] + np.multiply(A_X[:, :, :, :, 1], points_X_batch) + np.multiply(A_X[:, :, :, :, 2], points_Y_batch) + np.sum(np.multiply(W_X, self._expand_torch(U, W_X.shape)), 4) + points_Y_prime = A_Y[:, :, :, :, 0] + np.multiply(A_Y[:, :, :, :, 1], points_X_batch) + np.multiply(A_Y[:, :, :, :, 2], points_Y_batch) + np.sum(np.multiply(W_Y, self._expand_torch(U, W_Y.shape)), 4) + return np.concatenate((points_X_prime, points_Y_prime), 3) + + def _generate_grid(self, theta): + (grid_X, grid_Y, N, P_X, P_Y) = self._prepare_to_transform() + warped_grid = self._apply_transformation(theta, np.concatenate((grid_X, grid_Y), axis=3), N, P_X, P_Y) + return warped_grid + + def _bilinear_sampler(self, img, grid): + (x, y) = (grid[:, :, :, 0], grid[:, :, :, 1]) + H = img.shape[2] + W = img.shape[3] + max_y = H - 1 + max_x = W - 1 + x = 0.5 * (x + 1.0) * (max_x - 1) + y = 0.5 * (y + 1.0) * (max_y - 1) + x0 = np.floor(x).astype(int) + x1 = x0 + 1 + y0 = np.floor(y).astype(int) + y1 = y0 + 1 + wa = (x1 - x) * (y1 - y) + wb = (x1 - x) * (y - y0) + wc = (x - x0) * (y1 - y) + wd = (x - x0) * (y - y0) + x0 = np.clip(x0, 0, max_x) + x1 = np.clip(x1, 0, max_x) + y0 = np.clip(y0, 0, max_y) + y1 = np.clip(y1, 0, max_y) + img = img.reshape(-1, H, W) + Ia = img[:, y0, x0].swapaxes(0, 1) + Ib = img[:, y1, x0].swapaxes(0, 1) + Ic = img[:, y0, x1].swapaxes(0, 1) + Id = img[:, y1, x1].swapaxes(0, 1) + wa = np.expand_dims(wa, axis=0) + wb = np.expand_dims(wb, axis=0) + wc = np.expand_dims(wc, axis=0) + wd = np.expand_dims(wd, axis=0) + out = wa * Ia + wb * Ib + wc * Ic + wd * Id + return out + +class CorrelationLayer(object): + + def __init__(self, params, blobs): + super(CorrelationLayer, self).__init__() + + def getMemoryShapes(self, inputs): + fetureAShape = inputs[0] + (b, _, h, w) = fetureAShape + return [[b, h * w, h, w]] + + def forward(self, inputs): + (feature_A, feature_B) = inputs + (b, c, h, w) = feature_A.shape + feature_A = feature_A.transpose(0, 1, 3, 2) + feature_A = np.reshape(feature_A, (b, c, h * w)) + feature_B = np.reshape(feature_B, (b, c, h * w)) + feature_B = feature_B.transpose(0, 2, 1) + feature_mul = feature_B @ feature_A + feature_mul = np.reshape(feature_mul, (b, h, w, h * w)) + feature_mul = feature_mul.transpose(0, 1, 3, 2) + correlation_tensor = feature_mul.transpose(0, 2, 1, 3) + correlation_tensor = np.ascontiguousarray(correlation_tensor) + return [correlation_tensor] +if __name__ == '__main__': + if not os.path.isfile(args.gmm_model): + raise OSError('GMM model not exist') + if not os.path.isfile(args.tom_model): + raise OSError('TOM model not exist') + if not os.path.isfile(args.segmentation_model): + raise OSError('Segmentation model not exist') + if not os.path.isfile(findFile(args.openpose_proto)): + raise OSError('OpenPose proto not exist') + if not os.path.isfile(findFile(args.openpose_model)): + raise OSError('OpenPose model not exist') + person_img = cv.imread(args.input_image) + ratio = 256 / 192 + (inp_h, inp_w, _) = person_img.shape + current_ratio = inp_h / inp_w + if current_ratio > ratio: + center_h = inp_h // 2 + out_h = inp_w * ratio + start = int(center_h - out_h // 2) + end = int(center_h + out_h // 2) + person_img = person_img[start:end, ...] + else: + center_w = inp_w // 2 + out_w = inp_h / ratio + start = int(center_w - out_w // 2) + end = int(center_w + out_w // 2) + person_img = person_img[:, start:end, :] + cloth_img = cv.imread(args.input_cloth) + pose = get_pose_map(person_img, findFile(args.openpose_proto), findFile(args.openpose_model), args.backend, args.target) + segm_image = parse_human(person_img, args.segmentation_model) + segm_image = cv.resize(segm_image, (192, 256), cv.INTER_LINEAR) + cv.dnn_registerLayer('Correlation', CorrelationLayer) + model = CpVton(args.gmm_model, args.tom_model, args.backend, args.target) + agnostic = model.prepare_agnostic(segm_image, person_img, pose) + warped_cloth = model.get_warped_cloth(cloth_img, agnostic) + output = model.get_tryon(agnostic, warped_cloth) + cv.dnn_unregisterLayer('Correlation') + winName = 'Virtual Try-On' + cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) + cv.imshow(winName, output) + cv.waitKey() + +# File: opencv-master/samples/gdb/mat_pretty_printer.py +import gdb +import numpy as np +from enum import Enum +np.set_printoptions(suppress=True) + +def conv(obj, t): + return gdb.parse_and_eval(f'({t})({obj})') + +def booli(obj): + return conv(str(obj).lower(), 'bool') + +def stri(obj): + s = f'"{obj}"' + return conv(s.translate(s.maketrans('\n', ' ')), 'char*') + +class MagicValues(Enum): + MAGIC_VAL = 1124007936 + AUTO_STEP = 0 + CONTINUOUS_FLAG = 1 << 14 + SUBMATRIX_FLAG = 1 << 15 + +class MagicMasks(Enum): + MAGIC_MASK = 4294901760 + TYPE_MASK = 4095 + DEPTH_MASK = 7 + +class Depth(Enum): + CV_8U = 0 + CV_8S = 1 + CV_16U = 2 + CV_16S = 3 + CV_32S = 4 + CV_32F = 5 + CV_64F = 6 + CV_16F = 7 + +def create_enum(n): + + def make_type(depth, cn): + return depth.value + (cn - 1 << 3) + defs = [(f'{depth.name}C{i}', make_type(depth, i)) for depth in Depth for i in range(1, n + 1)] + return Enum('Type', defs) +Type = create_enum(512) + +class Flags: + + def depth(self): + return Depth(self.flags & MagicMasks.DEPTH_MASK.value) + + def dtype(self): + depth = self.depth() + ret = None + if depth == Depth.CV_8U: + ret = (np.uint8, 'uint8_t') + elif depth == Depth.CV_8S: + ret = (np.int8, 'int8_t') + elif depth == Depth.CV_16U: + ret = (np.uint16, 'uint16_t') + elif depth == Depth.CV_16S: + ret = (np.int16, 'int16_t') + elif depth == Depth.CV_32S: + ret = (np.int32, 'int32_t') + elif depth == Depth.CV_32F: + ret = (np.float32, 'float') + elif depth == Depth.CV_64F: + ret = (np.float64, 'double') + elif depth == Depth.CV_16F: + ret = (np.float16, 'float16') + return ret + + def type(self): + return Type(self.flags & MagicMasks.TYPE_MASK.value) + + def channels(self): + return ((self.flags & 511 << 3) >> 3) + 1 + + def is_continuous(self): + return self.flags & MagicValues.CONTINUOUS_FLAG.value != 0 + + def is_submatrix(self): + return self.flags & MagicValues.SUBMATRIX_FLAG.value != 0 + + def __init__(self, flags): + self.flags = flags + + def __iter__(self): + return iter({'type': stri(self.type().name), 'is_continuous': booli(self.is_continuous()), 'is_submatrix': booli(self.is_submatrix())}.items()) + +class Size: + + def __init__(self, ptr): + self.ptr = ptr + + def dims(self): + return int((self.ptr - 1).dereference()) + + def to_numpy(self): + return np.array([int(self.ptr[i]) for i in range(self.dims())], dtype=np.int64) + + def __iter__(self): + return iter({'size': stri(self.to_numpy())}.items()) + +class Mat: + + def __init__(self, m, size, flags): + (dtype, ctype) = flags.dtype() + elsize = np.dtype(dtype).itemsize + shape = size.to_numpy() + steps = np.asarray([int(m['step']['p'][i]) for i in range(len(shape))], dtype=np.int64) + ptr = m['data'] + if int(ptr) == 0 or np.prod(shape * steps) == 0: + self.mat = np.array([]) + self.view = self.mat + return + if flags.channels() != 1: + shape = np.append(shape, flags.channels()) + steps = np.append(steps, elsize) + length = 1 + np.sum((shape - 1) * steps) // elsize + if dtype != np.float16: + ctype = gdb.lookup_type(ctype) + ptr = ptr.cast(ctype.array(length - 1).pointer()).dereference() + self.mat = np.array([ptr[i] for i in range(length)], dtype=dtype) + else: + u16 = gdb.lookup_type('uint16_t') + ptr = ptr.cast(u16.array(length - 1).pointer()).dereference() + self.mat = np.array([ptr[i] for i in range(length)], dtype=np.uint16) + self.mat = self.mat.view(np.float16) + self.view = np.lib.stride_tricks.as_strided(self.mat, shape=shape, strides=steps) + + def __iter__(self): + return iter({'data': stri(self.view)}.items()) + +class MatPrinter: + + def __init__(self, mat): + self.mat = mat + + def views(self): + m = self.mat + flags = Flags(int(m['flags'])) + size = Size(m['size']['p']) + data = Mat(m, size, flags) + for x in [flags, size, data]: + for (k, v) in x: + yield ('view_' + k, v) + + def real(self): + m = self.mat + for field in m.type.fields(): + k = field.name + v = m[k] + yield (k, v) + + def children(self): + yield from self.views() + yield from self.real() + +def get_type(val): + vtype = val.type + if vtype.code == gdb.TYPE_CODE_REF: + vtype = vtype.target() + vtype = vtype.unqualified().strip_typedefs() + typename = vtype.tag + return typename + +def mat_printer(val): + typename = get_type(val) + if typename is None: + return None + if str(typename) == 'cv::Mat': + return MatPrinter(val) +gdb.pretty_printers.append(mat_printer) + +# File: opencv-master/samples/python/_coverage.py +"""""" +from __future__ import print_function +from glob import glob +import cv2 as cv +import re +if __name__ == '__main__': + cv2_callable = set(['cv.' + name for name in dir(cv) if callable(getattr(cv, name))]) + found = set() + for fn in glob('*.py'): + print(' --- ', fn) + code = open(fn).read() + found |= set(re.findall('cv2?\\.\\w+', code)) + cv2_used = found & cv2_callable + cv2_unused = cv2_callable - cv2_used + with open('unused_api.txt', 'w') as f: + f.write('\n'.join(sorted(cv2_unused))) + r = 1.0 * len(cv2_used) / len(cv2_callable) + print('\ncv api coverage: %d / %d (%.1f%%)' % (len(cv2_used), len(cv2_callable), r * 100)) + +# File: opencv-master/samples/python/_doc.py +"""""" +from __future__ import print_function +from glob import glob +if __name__ == '__main__': + print('--- undocumented files:') + for fn in glob('*.py'): + loc = {} + try: + try: + execfile(fn, loc) + except NameError: + exec(open(fn).read(), loc) + except Exception: + pass + if '__doc__' not in loc: + print(fn) + +# File: opencv-master/samples/python/aruco_detect_board_charuco.py +"""""" +import argparse +import numpy as np +import cv2 as cv +import sys + +def read_camera_parameters(filename): + fs = cv.FileStorage(cv.samples.findFile(filename, False), cv.FileStorage_READ) + if fs.isOpened(): + cam_matrix = fs.getNode('camera_matrix').mat() + dist_coefficients = fs.getNode('distortion_coefficients').mat() + return (True, cam_matrix, dist_coefficients) + return (False, [], []) + +def main(): + parser = argparse.ArgumentParser(description='detect markers and corners of charuco board, estimate pose of charucoboard', add_help=False) + parser.add_argument('-H', '--help', help='show help', action='store_true', dest='show_help') + parser.add_argument('-v', '--video', help='Input from video or image file, if omitted, input comes from camera', default='', action='store', dest='v') + parser.add_argument('-i', '--image', help='Input from image file', default='', action='store', dest='img_path') + parser.add_argument('-w', help='Number of squares in X direction', default='3', action='store', dest='w', type=int) + parser.add_argument('-h', help='Number of squares in Y direction', default='3', action='store', dest='h', type=int) + parser.add_argument('-sl', help='Square side length', default='1.', action='store', dest='sl', type=float) + parser.add_argument('-ml', help='Marker side length', default='0.5', action='store', dest='ml', type=float) + parser.add_argument('-d', help='dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2, DICT_4X4_1000=3,DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, DICT_6X6_50=8,DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12, DICT_7X7_100=13,DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}', default='0', action='store', dest='d', type=int) + parser.add_argument('-ci', help='Camera id if input doesnt come from video (-v)', default='0', action='store', dest='ci', type=int) + parser.add_argument('-c', help='Input file with calibrated camera parameters', default='', action='store', dest='cam_param') + args = parser.parse_args() + show_help = args.show_help + if show_help: + parser.print_help() + sys.exit() + width = args.w + height = args.h + square_len = args.sl + marker_len = args.ml + dict = args.d + video = args.v + camera_id = args.ci + img_path = args.img_path + cam_param = args.cam_param + cam_matrix = [] + dist_coefficients = [] + if cam_param != '': + (_, cam_matrix, dist_coefficients) = read_camera_parameters(cam_param) + aruco_dict = cv.aruco.getPredefinedDictionary(dict) + board_size = (width, height) + board = cv.aruco.CharucoBoard(board_size, square_len, marker_len, aruco_dict) + charuco_detector = cv.aruco.CharucoDetector(board) + image = None + input_video = None + wait_time = 10 + if video != '': + input_video = cv.VideoCapture(cv.samples.findFileOrKeep(video, False)) + image = input_video.retrieve()[1] if input_video.grab() else None + elif img_path == '': + input_video = cv.VideoCapture(camera_id) + image = input_video.retrieve()[1] if input_video.grab() else None + elif img_path != '': + wait_time = 0 + image = cv.imread(cv.samples.findFile(img_path, False)) + if image is None: + print('Error: unable to open video/image source') + sys.exit(0) + while image is not None: + image_copy = np.copy(image) + (charuco_corners, charuco_ids, marker_corners, marker_ids) = charuco_detector.detectBoard(image) + if not marker_ids is None and len(marker_ids) > 0: + cv.aruco.drawDetectedMarkers(image_copy, marker_corners) + if not charuco_ids is None and len(charuco_ids) > 0: + cv.aruco.drawDetectedCornersCharuco(image_copy, charuco_corners, charuco_ids) + if len(cam_matrix) > 0 and len(charuco_ids) >= 4: + try: + (obj_points, img_points) = board.matchImagePoints(charuco_corners, charuco_ids) + (flag, rvec, tvec) = cv.solvePnP(obj_points, img_points, cam_matrix, dist_coefficients) + if flag: + cv.drawFrameAxes(image_copy, cam_matrix, dist_coefficients, rvec, tvec, 0.2) + except cv.error as error_inst: + print('SolvePnP recognize calibration pattern as non-planar pattern. To process this need to use minimum 6 points. The planar pattern may be mistaken for non-planar if the pattern is deformed or incorrect camera parameters are used.') + print(error_inst.err) + cv.imshow('out', image_copy) + key = cv.waitKey(wait_time) + if key == 27: + break + image = input_video.retrieve()[1] if input_video is not None and input_video.grab() else None +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/asift.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import itertools as it +from multiprocessing.pool import ThreadPool +from common import Timer +from find_obj import init_feature, filter_matches, explore_match + +def affine_skew(tilt, phi, img, mask=None): + (h, w) = img.shape[:2] + if mask is None: + mask = np.zeros((h, w), np.uint8) + mask[:] = 255 + A = np.float32([[1, 0, 0], [0, 1, 0]]) + if phi != 0.0: + phi = np.deg2rad(phi) + (s, c) = (np.sin(phi), np.cos(phi)) + A = np.float32([[c, -s], [s, c]]) + corners = [[0, 0], [w, 0], [w, h], [0, h]] + tcorners = np.int32(np.dot(corners, A.T)) + (x, y, w, h) = cv.boundingRect(tcorners.reshape(1, -1, 2)) + A = np.hstack([A, [[-x], [-y]]]) + img = cv.warpAffine(img, A, (w, h), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_REPLICATE) + if tilt != 1.0: + s = 0.8 * np.sqrt(tilt * tilt - 1) + img = cv.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01) + img = cv.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv.INTER_NEAREST) + A[0] /= tilt + if phi != 0.0 or tilt != 1.0: + (h, w) = img.shape[:2] + mask = cv.warpAffine(mask, A, (w, h), flags=cv.INTER_NEAREST) + Ai = cv.invertAffineTransform(A) + return (img, mask, Ai) + +def affine_detect(detector, img, mask=None, pool=None): + params = [(1.0, 0.0)] + for t in 2 ** (0.5 * np.arange(1, 6)): + for phi in np.arange(0, 180, 72.0 / t): + params.append((t, phi)) + + def f(p): + (t, phi) = p + (timg, tmask, Ai) = affine_skew(t, phi, img) + (keypoints, descrs) = detector.detectAndCompute(timg, tmask) + for kp in keypoints: + (x, y) = kp.pt + kp.pt = tuple(np.dot(Ai, (x, y, 1))) + if descrs is None: + descrs = [] + return (keypoints, descrs) + (keypoints, descrs) = ([], []) + if pool is None: + ires = it.imap(f, params) + else: + ires = pool.imap(f, params) + for (i, (k, d)) in enumerate(ires): + print('affine sampling: %d / %d\r' % (i + 1, len(params)), end='') + keypoints.extend(k) + descrs.extend(d) + print() + return (keypoints, np.array(descrs)) + +def main(): + import sys, getopt + (opts, args) = getopt.getopt(sys.argv[1:], '', ['feature=']) + opts = dict(opts) + feature_name = opts.get('--feature', 'brisk-flann') + try: + (fn1, fn2) = args + except: + fn1 = 'aero1.jpg' + fn2 = 'aero3.jpg' + img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE) + img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE) + (detector, matcher) = init_feature(feature_name) + if img1 is None: + print('Failed to load fn1:', fn1) + sys.exit(1) + if img2 is None: + print('Failed to load fn2:', fn2) + sys.exit(1) + if detector is None: + print('unknown feature:', feature_name) + sys.exit(1) + print('using', feature_name) + pool = ThreadPool(processes=cv.getNumberOfCPUs()) + (kp1, desc1) = affine_detect(detector, img1, pool=pool) + (kp2, desc2) = affine_detect(detector, img2, pool=pool) + print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))) + + def match_and_draw(win): + with Timer('matching'): + raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2) + (p1, p2, kp_pairs) = filter_matches(kp1, kp2, raw_matches) + if len(p1) >= 4: + (H, status) = cv.findHomography(p1, p2, cv.RANSAC, 5.0) + print('%d / %d inliers/matched' % (np.sum(status), len(status))) + kp_pairs = [kpp for (kpp, flag) in zip(kp_pairs, status) if flag] + else: + (H, status) = (None, None) + print('%d matches found, not enough for homography estimation' % len(p1)) + explore_match(win, img1, img2, kp_pairs, None, H) + match_and_draw('affine find_obj') + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/audio_spectrogram.py +import numpy as np +import cv2 as cv +import math +import argparse + +class AudioDrawing: + + def __init__(self, args): + self.inputType = args.inputType + self.draw = args.draw + self.graph = args.graph + self.audio = cv.samples.findFile(args.audio) + self.audioStream = args.audioStream + self.windowType = args.windowType + self.windLen = args.windLen + self.overlap = args.overlap + self.enableGrid = args.enableGrid + self.rows = args.rows + self.cols = args.cols + self.xmarkup = args.xmarkup + self.ymarkup = args.ymarkup + self.zmarkup = args.zmarkup + self.microTime = args.microTime + self.frameSizeTime = args.frameSizeTime + self.updateTime = args.updateTime + self.waitTime = args.waitTime + if self.initAndCheckArgs(args) is False: + exit() + + def Draw(self): + if self.draw == 'static': + if self.inputType == 'file': + (samplingRate, inputAudio) = self.readAudioFile(self.audio) + elif self.inputType == 'microphone': + (samplingRate, inputAudio) = self.readAudioMicrophone() + duration = len(inputAudio) // samplingRate + remainder = len(inputAudio) % samplingRate + if remainder != 0: + sizeToFullSec = samplingRate - remainder + zeroArr = np.zeros(sizeToFullSec) + inputAudio = np.concatenate((inputAudio, zeroArr), axis=0) + duration += 1 + print('Update duration of audio to full second with ', sizeToFullSec, ' zero samples') + print('New number of samples ', len(inputAudio)) + if duration <= self.xmarkup: + self.xmarkup = duration + 1 + if self.graph == 'ampl': + imgAmplitude = self.drawAmplitude(inputAudio) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, inputAudio, samplingRate) + cv.imshow('Display window', imgAmplitude) + cv.waitKey(0) + elif self.graph == 'spec': + stft = self.STFT(inputAudio) + imgSpec = self.drawSpectrogram(stft) + imgSpec = self.drawSpectrogramColorbar(imgSpec, inputAudio, samplingRate, stft) + cv.imshow('Display window', imgSpec) + cv.waitKey(0) + elif self.graph == 'ampl_and_spec': + imgAmplitude = self.drawAmplitude(inputAudio) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, inputAudio, samplingRate) + stft = self.STFT(inputAudio) + imgSpec = self.drawSpectrogram(stft) + imgSpec = self.drawSpectrogramColorbar(imgSpec, inputAudio, samplingRate, stft) + imgTotal = self.concatenateImages(imgAmplitude, imgSpec) + cv.imshow('Display window', imgTotal) + cv.waitKey(0) + elif self.draw == 'dynamic': + if self.inputType == 'file': + self.dynamicFile(self.audio) + elif self.inputType == 'microphone': + self.dynamicMicrophone() + + def readAudioFile(self, file): + cap = cv.VideoCapture(file) + params = [cv.CAP_PROP_AUDIO_STREAM, self.audioStream, cv.CAP_PROP_VIDEO_STREAM, -1, cv.CAP_PROP_AUDIO_DATA_DEPTH, cv.CV_16S] + params = np.asarray(params) + cap.open(file, cv.CAP_ANY, params) + if cap.isOpened() == False: + print("Error : Can't read audio file: '", self.audio, "' with audioStream = ", self.audioStream) + print('Error: problems with audio reading, check input arguments') + exit() + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + numberOfChannels = int(cap.get(cv.CAP_PROP_AUDIO_TOTAL_CHANNELS)) + print('CAP_PROP_AUDIO_DATA_DEPTH: ', str(int(cap.get(cv.CAP_PROP_AUDIO_DATA_DEPTH)))) + print('CAP_PROP_AUDIO_SAMPLES_PER_SECOND: ', cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + print('CAP_PROP_AUDIO_TOTAL_CHANNELS: ', numberOfChannels) + print('CAP_PROP_AUDIO_TOTAL_STREAMS: ', cap.get(cv.CAP_PROP_AUDIO_TOTAL_STREAMS)) + frame = [] + frame = np.asarray(frame) + inputAudio = [] + while 1: + if cap.grab(): + frame = [] + frame = np.asarray(frame) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + inputAudio.append(frame[1][0][i]) + else: + break + inputAudio = np.asarray(inputAudio) + print('Number of samples: ', len(inputAudio)) + samplingRate = int(cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + return (samplingRate, inputAudio) + + def readAudioMicrophone(self): + cap = cv.VideoCapture() + params = [cv.CAP_PROP_AUDIO_STREAM, 0, cv.CAP_PROP_VIDEO_STREAM, -1] + params = np.asarray(params) + cap.open(0, cv.CAP_ANY, params) + if cap.isOpened() == False: + print("Error: Can't open microphone") + print('Error: problems with audio reading, check input arguments') + exit() + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + numberOfChannels = int(cap.get(cv.CAP_PROP_AUDIO_TOTAL_CHANNELS)) + print('CAP_PROP_AUDIO_DATA_DEPTH: ', str(int(cap.get(cv.CAP_PROP_AUDIO_DATA_DEPTH)))) + print('CAP_PROP_AUDIO_SAMPLES_PER_SECOND: ', cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + print('CAP_PROP_AUDIO_TOTAL_CHANNELS: ', numberOfChannels) + print('CAP_PROP_AUDIO_TOTAL_STREAMS: ', cap.get(cv.CAP_PROP_AUDIO_TOTAL_STREAMS)) + cvTickFreq = cv.getTickFrequency() + sysTimeCurr = cv.getTickCount() + sysTimePrev = sysTimeCurr + frame = [] + frame = np.asarray(frame) + inputAudio = [] + while (sysTimeCurr - sysTimePrev) / cvTickFreq < self.microTime: + if cap.grab(): + frame = [] + frame = np.asarray(frame) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + inputAudio.append(frame[1][0][i]) + sysTimeCurr = cv.getTickCount() + else: + print('Error: Grab error') + break + inputAudio = np.asarray(inputAudio) + print('Number of samples: ', len(inputAudio)) + samplingRate = int(cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + return (samplingRate, inputAudio) + + def drawAmplitude(self, inputAudio): + color = (247, 111, 87) + thickness = 5 + frameVectorRows = 500 + middle = frameVectorRows // 2 + frameVectorCols = 40000 + if len(inputAudio) < frameVectorCols: + frameVectorCols = len(inputAudio) + img = np.zeros((frameVectorRows, frameVectorCols, 3), np.uint8) + img += 255 + audio = np.array(0) + audio = cv.resize(inputAudio, (1, frameVectorCols), interpolation=cv.INTER_LINEAR) + reshapeAudio = np.reshape(audio, -1) + (minCv, maxCv, _, _) = cv.minMaxLoc(reshapeAudio) + maxElem = int(max(abs(minCv), abs(maxCv))) + if maxElem == 0: + maxElem = 1 + for i in range(len(reshapeAudio)): + reshapeAudio[i] = middle - reshapeAudio[i] * middle // maxElem + for i in range(1, frameVectorCols, 1): + cv.line(img, (i - 1, int(reshapeAudio[i - 1])), (i, int(reshapeAudio[i])), color, thickness) + img = cv.resize(img, (900, 400), interpolation=cv.INTER_AREA) + return img + + def drawAmplitudeScale(self, inputImg, inputAudio, samplingRate, xmin=None, xmax=None): + preCol = 100 + aftCol = 100 + preLine = 40 + aftLine = 50 + frameVectorRows = inputImg.shape[0] + frameVectorCols = inputImg.shape[1] + totalRows = preLine + frameVectorRows + aftLine + totalCols = preCol + frameVectorCols + aftCol + imgTotal = np.zeros((totalRows, totalCols, 3), np.uint8) + imgTotal += 255 + imgTotal[preLine:preLine + frameVectorRows, preCol:preCol + frameVectorCols] = inputImg + if xmin is None: + xmin = 0 + if xmax is None: + xmax = len(inputAudio) / samplingRate + if xmax > self.xmarkup: + xList = np.linspace(xmin, xmax, self.xmarkup).astype(int) + else: + tmp = np.arange(xmin, xmax, 1).astype(int) + 1 + xList = np.concatenate((np.zeros(self.xmarkup - len(tmp)), tmp[:]), axis=None) + ymin = np.min(inputAudio) + ymax = np.max(inputAudio) + yList = np.linspace(ymin, ymax, self.ymarkup) + textThickness = 1 + gridThickness = 1 + gridColor = (0, 0, 0) + textColor = (0, 0, 0) + font = cv.FONT_HERSHEY_SIMPLEX + fontScale = 0.5 + cv.line(imgTotal, (preCol, totalRows - aftLine), (preCol + frameVectorCols, totalRows - aftLine), gridColor, gridThickness) + cv.line(imgTotal, (preCol, preLine), (preCol, preLine + frameVectorRows), gridColor, gridThickness) + serifSize = 10 + indentDownX = serifSize * 2 + indentDownY = serifSize // 2 + indentLeftX = serifSize + indentLeftY = 2 * preCol // 3 + numX = frameVectorCols // (self.xmarkup - 1) + for i in range(len(xList)): + a1 = preCol + i * numX + a2 = frameVectorRows + preLine + b1 = a1 + b2 = a2 + serifSize + if self.enableGrid is True: + d1 = a1 + d2 = preLine + cv.line(imgTotal, (a1, a2), (d1, d2), gridColor, gridThickness) + cv.line(imgTotal, (a1, a2), (b1, b2), gridColor, gridThickness) + cv.putText(imgTotal, str(int(xList[i])), (b1 - indentLeftX, b2 + indentDownX), font, fontScale, textColor, textThickness) + numY = frameVectorRows // (self.ymarkup - 1) + for i in range(len(yList)): + a1 = preCol + a2 = totalRows - aftLine - i * numY + b1 = preCol - serifSize + b2 = a2 + if self.enableGrid is True: + d1 = preCol + frameVectorCols + d2 = a2 + cv.line(imgTotal, (a1, a2), (d1, d2), gridColor, gridThickness) + cv.line(imgTotal, (a1, a2), (b1, b2), gridColor, gridThickness) + cv.putText(imgTotal, str(int(yList[i])), (b1 - indentLeftY, b2 + indentDownY), font, fontScale, textColor, textThickness) + imgTotal = cv.resize(imgTotal, (self.cols, self.rows), interpolation=cv.INTER_AREA) + return imgTotal + + def STFT(self, inputAudio): + time_step = self.windLen - self.overlap + stft = [] + if self.windowType == 'Hann': + Hann_wind = [] + for i in range(1 - self.windLen, self.windLen, 2): + Hann_wind.append(i * (0.5 + 0.5 * math.cos(math.pi * i / (self.windLen - 1)))) + Hann_wind = np.asarray(Hann_wind) + elif self.windowType == 'Hamming': + Hamming_wind = [] + for i in range(1 - self.windLen, self.windLen, 2): + Hamming_wind.append(i * (0.53836 - 0.46164 * math.cos(2 * math.pi * i / (self.windLen - 1)))) + Hamming_wind = np.asarray(Hamming_wind) + for index in np.arange(0, len(inputAudio), time_step).astype(int): + section = inputAudio[index:index + self.windLen] + zeroArray = np.zeros(self.windLen - len(section)) + section = np.concatenate((section, zeroArray), axis=None) + if self.windowType == 'Hann': + section *= Hann_wind + elif self.windowType == 'Hamming': + section *= Hamming_wind + dst = np.empty(0) + dst = cv.dft(section, dst, flags=cv.DFT_COMPLEX_OUTPUT) + reshape_dst = np.reshape(dst, -1) + complexArr = np.zeros(len(dst) // 4, dtype=complex) + for i in range(len(dst) // 4): + complexArr[i] = complex(reshape_dst[2 * i], reshape_dst[2 * i + 1]) + stft.append(np.abs(complexArr)) + stft = np.array(stft).transpose() + np.log10(stft, out=stft, where=stft != 0.0) + return 10 * stft + + def drawSpectrogram(self, stft): + frameVectorRows = stft.shape[0] + frameVectorCols = stft.shape[1] + colormapImageRows = 255 + imgSpec = np.zeros((frameVectorRows, frameVectorCols, 3), np.uint8) + stftMat = np.zeros((frameVectorRows, frameVectorCols), np.float64) + cv.normalize(stft, stftMat, 1.0, 0.0, cv.NORM_INF) + for i in range(frameVectorRows): + for j in range(frameVectorCols): + imgSpec[frameVectorRows - i - 1, j] = int(stftMat[i][j] * colormapImageRows) + imgSpec = cv.applyColorMap(imgSpec, cv.COLORMAP_INFERNO) + imgSpec = cv.resize(imgSpec, (900, 400), interpolation=cv.INTER_LINEAR) + return imgSpec + + def drawSpectrogramColorbar(self, inputImg, inputAudio, samplingRate, stft, xmin=None, xmax=None): + preCol = 100 + aftCol = 100 + preLine = 40 + aftLine = 50 + colColor = 20 + ind_col = 20 + frameVectorRows = inputImg.shape[0] + frameVectorCols = inputImg.shape[1] + totalRows = preLine + frameVectorRows + aftLine + totalCols = preCol + frameVectorCols + aftCol + colColor + imgTotal = np.zeros((totalRows, totalCols, 3), np.uint8) + imgTotal += 255 + imgTotal[preLine:preLine + frameVectorRows, preCol:preCol + frameVectorCols] = inputImg + colorArrSize = 256 + imgColorBar = np.zeros((colorArrSize, colColor, 1), np.uint8) + for i in range(colorArrSize): + imgColorBar[i] += colorArrSize - 1 - i + imgColorBar = cv.applyColorMap(imgColorBar, cv.COLORMAP_INFERNO) + imgColorBar = cv.resize(imgColorBar, (colColor, frameVectorRows), interpolation=cv.INTER_AREA) + imgTotal[preLine:preLine + frameVectorRows, preCol + frameVectorCols + ind_col:preCol + frameVectorCols + ind_col + colColor] = imgColorBar + if xmin is None: + xmin = 0 + if xmax is None: + xmax = len(inputAudio) / samplingRate + if xmax > self.xmarkup: + xList = np.linspace(xmin, xmax, self.xmarkup).astype(int) + else: + tmpXList = np.arange(xmin, xmax, 1).astype(int) + 1 + xList = np.concatenate((np.zeros(self.xmarkup - len(tmpXList)), tmpXList[:]), axis=None) + ymin = 0 + ymax = int(samplingRate / 2.0) + yList = np.linspace(ymin, ymax, self.ymarkup).astype(int) + zList = np.linspace(np.min(stft), np.max(stft), self.zmarkup) + textThickness = 1 + textColor = (0, 0, 0) + gridThickness = 1 + gridColor = (0, 0, 0) + font = cv.FONT_HERSHEY_SIMPLEX + fontScale = 0.5 + serifSize = 10 + indentDownX = serifSize * 2 + indentDownY = serifSize // 2 + indentLeftX = serifSize + indentLeftY = 2 * preCol // 3 + cv.line(imgTotal, (preCol, totalRows - aftLine), (preCol + frameVectorCols, totalRows - aftLine), gridColor, gridThickness) + cv.line(imgTotal, (preCol, preLine), (preCol, preLine + frameVectorRows), gridColor, gridThickness) + numX = frameVectorCols // (self.xmarkup - 1) + for i in range(len(xList)): + a1 = preCol + i * numX + a2 = frameVectorRows + preLine + b1 = a1 + b2 = a2 + serifSize + cv.line(imgTotal, (a1, a2), (b1, b2), gridColor, gridThickness) + cv.putText(imgTotal, str(int(xList[i])), (b1 - indentLeftX, b2 + indentDownX), font, fontScale, textColor, textThickness) + numY = frameVectorRows // (self.ymarkup - 1) + for i in range(len(yList)): + a1 = preCol + a2 = totalRows - aftLine - i * numY + b1 = preCol - serifSize + b2 = a2 + cv.line(imgTotal, (a1, a2), (b1, b2), gridColor, gridThickness) + cv.putText(imgTotal, str(int(yList[i])), (b1 - indentLeftY, b2 + indentDownY), font, fontScale, textColor, textThickness) + numZ = frameVectorRows // (self.zmarkup - 1) + for i in range(len(zList)): + a1 = preCol + frameVectorCols + ind_col + colColor + a2 = totalRows - aftLine - i * numZ + b1 = a1 + serifSize + b2 = a2 + cv.line(imgTotal, (a1, a2), (b1, b2), gridColor, gridThickness) + cv.putText(imgTotal, str(int(zList[i])), (b1 + 10, b2 + indentDownY), font, fontScale, textColor, textThickness) + imgTotal = cv.resize(imgTotal, (self.cols, self.rows), interpolation=cv.INTER_AREA) + return imgTotal + + def concatenateImages(self, img1, img2): + totalRows = img1.shape[0] + img2.shape[0] + totalCols = max(img1.shape[1], img2.shape[1]) + imgTotal = np.zeros((totalRows, totalCols, 3), np.uint8) + imgTotal += 255 + imgTotal[:img1.shape[0], :img1.shape[1]] = img1 + imgTotal[img2.shape[0]:, :img2.shape[1]] = img2 + return imgTotal + + def dynamicFile(self, file): + cap = cv.VideoCapture(file) + params = [cv.CAP_PROP_AUDIO_STREAM, self.audioStream, cv.CAP_PROP_VIDEO_STREAM, -1, cv.CAP_PROP_AUDIO_DATA_DEPTH, cv.CV_16S] + params = np.asarray(params) + cap.open(file, cv.CAP_ANY, params) + if cap.isOpened() == False: + print("ERROR! Can't to open file") + return + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + numberOfChannels = int(cap.get(cv.CAP_PROP_AUDIO_TOTAL_CHANNELS)) + samplingRate = int(cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + print('CAP_PROP_AUDIO_DATA_DEPTH: ', str(int(cap.get(cv.CAP_PROP_AUDIO_DATA_DEPTH)))) + print('CAP_PROP_AUDIO_SAMPLES_PER_SECOND: ', cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + print('CAP_PROP_AUDIO_TOTAL_CHANNELS: ', numberOfChannels) + print('CAP_PROP_AUDIO_TOTAL_STREAMS: ', cap.get(cv.CAP_PROP_AUDIO_TOTAL_STREAMS)) + step = int(self.updateTime * samplingRate) + frameSize = int(self.frameSizeTime * samplingRate) + if self.frameSizeTime <= self.xmarkup: + self.xmarkup = self.frameSizeTime + buffer = [] + section = np.zeros(frameSize, dtype=np.int16) + currentSamples = 0 + while 1: + if cap.grab(): + frame = [] + frame = np.asarray(frame) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + buffer.append(frame[1][0][i]) + buffer_size = len(buffer) + if buffer_size >= step: + section = list(section) + currentSamples += step + del section[0:step] + section.extend(buffer[0:step]) + del buffer[0:step] + section = np.asarray(section) + if currentSamples < frameSize: + xmin = 0 + xmax = currentSamples / samplingRate + else: + xmin = (currentSamples - frameSize) / samplingRate + 1 + xmax = currentSamples / samplingRate + if self.graph == 'ampl': + imgAmplitude = self.drawAmplitude(section) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, section, samplingRate, xmin, xmax) + cv.imshow('Display amplitude graph', imgAmplitude) + cv.waitKey(self.waitTime) + elif self.graph == 'spec': + stft = self.STFT(section) + imgSpec = self.drawSpectrogram(stft) + imgSpec = self.drawSpectrogramColorbar(imgSpec, section, samplingRate, stft, xmin, xmax) + cv.imshow('Display spectrogram', imgSpec) + cv.waitKey(self.waitTime) + elif self.graph == 'ampl_and_spec': + imgAmplitude = self.drawAmplitude(section) + stft = self.STFT(section) + imgSpec = self.drawSpectrogram(stft) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, section, samplingRate, xmin, xmax) + imgSpec = self.drawSpectrogramColorbar(imgSpec, section, samplingRate, stft, xmin, xmax) + imgTotal = self.concatenateImages(imgAmplitude, imgSpec) + cv.imshow('Display amplitude graph and spectrogram', imgTotal) + cv.waitKey(self.waitTime) + else: + break + + def dynamicMicrophone(self): + cap = cv.VideoCapture() + params = [cv.CAP_PROP_AUDIO_STREAM, 0, cv.CAP_PROP_VIDEO_STREAM, -1] + params = np.asarray(params) + cap.open(0, cv.CAP_ANY, params) + if cap.isOpened() == False: + print("ERROR! Can't to open file") + return + audioBaseIndex = int(cap.get(cv.CAP_PROP_AUDIO_BASE_INDEX)) + numberOfChannels = int(cap.get(cv.CAP_PROP_AUDIO_TOTAL_CHANNELS)) + print('CAP_PROP_AUDIO_DATA_DEPTH: ', str(int(cap.get(cv.CAP_PROP_AUDIO_DATA_DEPTH)))) + print('CAP_PROP_AUDIO_SAMPLES_PER_SECOND: ', cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + print('CAP_PROP_AUDIO_TOTAL_CHANNELS: ', numberOfChannels) + print('CAP_PROP_AUDIO_TOTAL_STREAMS: ', cap.get(cv.CAP_PROP_AUDIO_TOTAL_STREAMS)) + frame = [] + frame = np.asarray(frame) + samplingRate = int(cap.get(cv.CAP_PROP_AUDIO_SAMPLES_PER_SECOND)) + step = int(self.updateTime * samplingRate) + frameSize = int(self.frameSizeTime * samplingRate) + self.xmarkup = self.frameSizeTime + currentSamples = 0 + buffer = [] + section = np.zeros(frameSize, dtype=np.int16) + cvTickFreq = cv.getTickFrequency() + sysTimeCurr = cv.getTickCount() + sysTimePrev = sysTimeCurr + self.waitTime = self.updateTime * 1000 + while (sysTimeCurr - sysTimePrev) / cvTickFreq < self.microTime: + if cap.grab(): + frame = [] + frame = np.asarray(frame) + frame = cap.retrieve(frame, audioBaseIndex) + for i in range(len(frame[1][0])): + buffer.append(frame[1][0][i]) + sysTimeCurr = cv.getTickCount() + buffer_size = len(buffer) + if buffer_size >= step: + section = list(section) + currentSamples += step + del section[0:step] + section.extend(buffer[0:step]) + del buffer[0:step] + section = np.asarray(section) + if currentSamples < frameSize: + xmin = 0 + xmax = currentSamples / samplingRate + else: + xmin = (currentSamples - frameSize) / samplingRate + 1 + xmax = currentSamples / samplingRate + if self.graph == 'ampl': + imgAmplitude = self.drawAmplitude(section) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, section, samplingRate, xmin, xmax) + cv.imshow('Display amplitude graph', imgAmplitude) + cv.waitKey(self.waitTime) + elif self.graph == 'spec': + stft = self.STFT(section) + imgSpec = self.drawSpectrogram(stft) + imgSpec = self.drawSpectrogramColorbar(imgSpec, section, samplingRate, stft, xmin, xmax) + cv.imshow('Display spectrogram', imgSpec) + cv.waitKey(self.waitTime) + elif self.graph == 'ampl_and_spec': + imgAmplitude = self.drawAmplitude(section) + stft = self.STFT(section) + imgSpec = self.drawSpectrogram(stft) + imgAmplitude = self.drawAmplitudeScale(imgAmplitude, section, samplingRate, xmin, xmax) + imgSpec = self.drawSpectrogramColorbar(imgSpec, section, samplingRate, stft, xmin, xmax) + imgTotal = self.concatenateImages(imgAmplitude, imgSpec) + cv.imshow('Display amplitude graph and spectrogram', imgTotal) + cv.waitKey(self.waitTime) + else: + break + + def initAndCheckArgs(self, args): + if args.inputType != 'file' and args.inputType != 'microphone': + print('Error: ', args.inputType, ' input method doesnt exist') + return False + if args.draw != 'static' and args.draw != 'dynamic': + print('Error: ', args.draw, ' draw type doesnt exist') + return False + if args.graph != 'ampl' and args.graph != 'spec' and (args.graph != 'ampl_and_spec'): + print('Error: ', args.graph, ' type of graph doesnt exist') + return False + if args.windowType != 'Rect' and args.windowType != 'Hann' and (args.windowType != 'Hamming'): + print('Error: ', args.windowType, ' type of window doesnt exist') + return False + if args.windLen <= 0: + print('Error: windLen = ', args.windLen, ' - incorrect value. Must be > 0') + return False + if args.overlap <= 0: + print('Error: overlap = ', args.overlap, ' - incorrect value. Must be > 0') + return False + if args.rows <= 0: + print('Error: rows = ', args.rows, ' - incorrect value. Must be > 0') + return False + if args.cols <= 0: + print('Error: cols = ', args.cols, ' - incorrect value. Must be > 0') + return False + if args.xmarkup < 2: + print('Error: xmarkup = ', args.xmarkup, ' - incorrect value. Must be >= 2') + return False + if args.ymarkup < 2: + print('Error: ymarkup = ', args.ymarkup, ' - incorrect value. Must be >= 2') + return False + if args.zmarkup < 2: + print('Error: zmarkup = ', args.zmarkup, ' - incorrect value. Must be >= 2') + return False + if args.microTime <= 0: + print('Error: microTime = ', args.microTime, ' - incorrect value. Must be > 0') + return False + if args.frameSizeTime <= 0: + print('Error: frameSizeTime = ', args.frameSizeTime, ' - incorrect value. Must be > 0') + return False + if args.updateTime <= 0: + print('Error: updateTime = ', args.updateTime, ' - incorrect value. Must be > 0') + return False + if args.waitTime < 0: + print('Error: waitTime = ', args.waitTime, ' - incorrect value. Must be >= 0') + return False + return True +if __name__ == '__main__': + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='this sample draws a volume graph and/or spectrogram of audio/video files and microphone\nDefault usage: ./Spectrogram.exe') + parser.add_argument('-i', '--inputType', dest='inputType', type=str, default='file', help='file or microphone') + parser.add_argument('-d', '--draw', dest='draw', type=str, default='static', help='type of drawing: static - for plotting graph(s) across the entire input audio; dynamic - for plotting graph(s) in a time-updating window') + parser.add_argument('-g', '--graph', dest='graph', type=str, default='ampl_and_spec', help='type of graph: amplitude graph or/and spectrogram. Please use tags below : ampl - draw the amplitude graph; spec - draw the spectrogram; ampl_and_spec - draw the amplitude graph and spectrogram on one image under each other') + parser.add_argument('-a', '--audio', dest='audio', type=str, default='Megamind.avi', help='name and path to file') + parser.add_argument('-s', '--audioStream', dest='audioStream', type=int, default=1, help=' CAP_PROP_AUDIO_STREAM value') + parser.add_argument('-t', '--windowType', dest='windowType', type=str, default='Rect', help='type of window for STFT. Please use tags below : Rect/Hann/Hamming') + parser.add_argument('-l', '--windLen', dest='windLen', type=int, default=256, help='size of window for STFT') + parser.add_argument('-o', '--overlap', dest='overlap', type=int, default=128, help='overlap of windows for STFT') + parser.add_argument('-gd', '--grid', dest='enableGrid', type=bool, default=False, help='grid on amplitude graph(on/off)') + parser.add_argument('-r', '--rows', dest='rows', type=int, default=400, help='rows of output image') + parser.add_argument('-c', '--cols', dest='cols', type=int, default=900, help='cols of output image') + parser.add_argument('-x', '--xmarkup', dest='xmarkup', type=int, default=5, help='number of x axis divisions (time asix)') + parser.add_argument('-y', '--ymarkup', dest='ymarkup', type=int, default=5, help='number of y axis divisions (frequency or/and amplitude axis)') + parser.add_argument('-z', '--zmarkup', dest='zmarkup', type=int, default=5, help='number of z axis divisions (colorbar)') + parser.add_argument('-m', '--microTime', dest='microTime', type=int, default=20, help='time of recording audio with microphone in seconds') + parser.add_argument('-f', '--frameSizeTime', dest='frameSizeTime', type=int, default=5, help='size of sliding window in seconds') + parser.add_argument('-u', '--updateTime', dest='updateTime', type=int, default=1, help='update time of sliding window in seconds') + parser.add_argument('-w', '--waitTime', dest='waitTime', type=int, default=10, help='parameter to cv.waitKey() for dynamic update, takes values in milliseconds') + args = parser.parse_args() + AudioDrawing(args).Draw() + +# File: opencv-master/samples/python/browse.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +import sys + +def main(): + if len(sys.argv) > 1: + fn = cv.samples.findFile(sys.argv[1]) + print('loading %s ...' % fn) + img = cv.imread(fn) + if img is None: + print('Failed to load fn:', fn) + sys.exit(1) + else: + sz = 4096 + print('generating %dx%d procedural image ...' % (sz, sz)) + img = np.zeros((sz, sz), np.uint8) + track = np.cumsum(np.random.rand(500000, 2) - 0.5, axis=0) + track = np.int32(track * 10 + (sz / 2, sz / 2)) + cv.polylines(img, [track], 0, 255, 1, cv.LINE_AA) + small = img + for _i in xrange(3): + small = cv.pyrDown(small) + + def onmouse(event, x, y, flags, param): + (h, _w) = img.shape[:2] + (h1, _w1) = small.shape[:2] + (x, y) = (1.0 * x * h / h1, 1.0 * y * h / h1) + zoom = cv.getRectSubPix(img, (800, 600), (x + 0.5, y + 0.5)) + cv.imshow('zoom', zoom) + cv.imshow('preview', small) + cv.setMouseCallback('preview', onmouse) + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/calibrate.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import splitfn +import os + +def main(): + import sys + import getopt + from glob import glob + (args, img_names) = getopt.getopt(sys.argv[1:], 'w:h:t:', ['debug=', 'square_size=', 'marker_size=', 'aruco_dict=', 'threads=']) + args = dict(args) + args.setdefault('--debug', './output/') + args.setdefault('-w', 4) + args.setdefault('-h', 6) + args.setdefault('-t', 'chessboard') + args.setdefault('--square_size', 10) + args.setdefault('--marker_size', 5) + args.setdefault('--aruco_dict', 'DICT_4X4_50') + args.setdefault('--threads', 4) + if not img_names: + img_mask = '../data/left??.jpg' + img_names = glob(img_mask) + debug_dir = args.get('--debug') + if debug_dir and (not os.path.isdir(debug_dir)): + os.mkdir(debug_dir) + height = int(args.get('-h')) + width = int(args.get('-w')) + pattern_type = str(args.get('-t')) + square_size = float(args.get('--square_size')) + marker_size = float(args.get('--marker_size')) + aruco_dict_name = str(args.get('--aruco_dict')) + pattern_size = (width, height) + if pattern_type == 'chessboard': + pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) + pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2) + pattern_points *= square_size + obj_points = [] + img_points = [] + (h, w) = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2] + aruco_dicts = {'DICT_4X4_50': cv.aruco.DICT_4X4_50, 'DICT_4X4_100': cv.aruco.DICT_4X4_100, 'DICT_4X4_250': cv.aruco.DICT_4X4_250, 'DICT_4X4_1000': cv.aruco.DICT_4X4_1000, 'DICT_5X5_50': cv.aruco.DICT_5X5_50, 'DICT_5X5_100': cv.aruco.DICT_5X5_100, 'DICT_5X5_250': cv.aruco.DICT_5X5_250, 'DICT_5X5_1000': cv.aruco.DICT_5X5_1000, 'DICT_6X6_50': cv.aruco.DICT_6X6_50, 'DICT_6X6_100': cv.aruco.DICT_6X6_100, 'DICT_6X6_250': cv.aruco.DICT_6X6_250, 'DICT_6X6_1000': cv.aruco.DICT_6X6_1000, 'DICT_7X7_50': cv.aruco.DICT_7X7_50, 'DICT_7X7_100': cv.aruco.DICT_7X7_100, 'DICT_7X7_250': cv.aruco.DICT_7X7_250, 'DICT_7X7_1000': cv.aruco.DICT_7X7_1000, 'DICT_ARUCO_ORIGINAL': cv.aruco.DICT_ARUCO_ORIGINAL, 'DICT_APRILTAG_16h5': cv.aruco.DICT_APRILTAG_16h5, 'DICT_APRILTAG_25h9': cv.aruco.DICT_APRILTAG_25h9, 'DICT_APRILTAG_36h10': cv.aruco.DICT_APRILTAG_36h10, 'DICT_APRILTAG_36h11': cv.aruco.DICT_APRILTAG_36h11} + if aruco_dict_name not in set(aruco_dicts.keys()): + print('unknown aruco dictionary name') + return None + aruco_dict = cv.aruco.getPredefinedDictionary(aruco_dicts[aruco_dict_name]) + board = cv.aruco.CharucoBoard(pattern_size, square_size, marker_size, aruco_dict) + charuco_detector = cv.aruco.CharucoDetector(board) + + def processImage(fn): + print('processing %s... ' % fn) + img = cv.imread(fn, cv.IMREAD_GRAYSCALE) + if img is None: + print('Failed to load', fn) + return None + assert w == img.shape[1] and h == img.shape[0], 'size: %d x %d ... ' % (img.shape[1], img.shape[0]) + found = False + corners = 0 + if pattern_type == 'chessboard': + (found, corners) = cv.findChessboardCorners(img, pattern_size) + if found: + term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1) + cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + frame_img_points = corners.reshape(-1, 2) + frame_obj_points = pattern_points + elif pattern_type == 'charucoboard': + (corners, charucoIds, _, _) = charuco_detector.detectBoard(img) + if len(corners) > 0: + (frame_obj_points, frame_img_points) = board.matchImagePoints(corners, charucoIds) + found = True + else: + found = False + else: + print('unknown pattern type', pattern_type) + return None + if debug_dir: + vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + if pattern_type == 'chessboard': + cv.drawChessboardCorners(vis, pattern_size, corners, found) + elif pattern_type == 'charucoboard': + cv.aruco.drawDetectedCornersCharuco(vis, corners, charucoIds=charucoIds) + (_path, name, _ext) = splitfn(fn) + outfile = os.path.join(debug_dir, name + '_board.png') + cv.imwrite(outfile, vis) + if not found: + print('pattern not found') + return None + print(' %s... OK' % fn) + return (frame_img_points, frame_obj_points) + threads_num = int(args.get('--threads')) + if threads_num <= 1: + chessboards = [processImage(fn) for fn in img_names] + else: + print('Run with %d threads...' % threads_num) + from multiprocessing.dummy import Pool as ThreadPool + pool = ThreadPool(threads_num) + chessboards = pool.map(processImage, img_names) + chessboards = [x for x in chessboards if x is not None] + for (corners, pattern_points) in chessboards: + img_points.append(corners) + obj_points.append(pattern_points) + (rms, camera_matrix, dist_coefs, _rvecs, _tvecs) = cv.calibrateCamera(obj_points, img_points, (w, h), None, None) + print('\nRMS:', rms) + print('camera matrix:\n', camera_matrix) + print('distortion coefficients: ', dist_coefs.ravel()) + print('') + for fn in img_names if debug_dir else []: + (_path, name, _ext) = splitfn(fn) + img_found = os.path.join(debug_dir, name + '_board.png') + outfile = os.path.join(debug_dir, name + '_undistorted.png') + img = cv.imread(img_found) + if img is None: + continue + (h, w) = img.shape[:2] + (newcameramtx, roi) = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h)) + dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx) + (x, y, w, h) = roi + dst = dst[y:y + h, x:x + w] + print('Undistorted image written to: %s' % outfile) + cv.imwrite(outfile, dst) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/camera_calibration_show_extrinsics.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from numpy import linspace + +def inverse_homogeneoux_matrix(M): + R = M[0:3, 0:3] + T = M[0:3, 3] + M_inv = np.identity(4) + M_inv[0:3, 0:3] = R.T + M_inv[0:3, 3] = -R.T.dot(T) + return M_inv + +def transform_to_matplotlib_frame(cMo, X, inverse=False): + M = np.identity(4) + M[1, 1] = 0 + M[1, 2] = 1 + M[2, 1] = -1 + M[2, 2] = 0 + if inverse: + return M.dot(inverse_homogeneoux_matrix(cMo).dot(X)) + else: + return M.dot(cMo.dot(X)) + +def create_camera_model(camera_matrix, width, height, scale_focal, draw_frame_axis=False): + fx = camera_matrix[0, 0] + fy = camera_matrix[1, 1] + focal = 2 / (fx + fy) + f_scale = scale_focal * focal + X_img_plane = np.ones((4, 5)) + X_img_plane[0:3, 0] = [-width, height, f_scale] + X_img_plane[0:3, 1] = [width, height, f_scale] + X_img_plane[0:3, 2] = [width, -height, f_scale] + X_img_plane[0:3, 3] = [-width, -height, f_scale] + X_img_plane[0:3, 4] = [-width, height, f_scale] + X_triangle = np.ones((4, 3)) + X_triangle[0:3, 0] = [-width, -height, f_scale] + X_triangle[0:3, 1] = [0, -2 * height, f_scale] + X_triangle[0:3, 2] = [width, -height, f_scale] + X_center1 = np.ones((4, 2)) + X_center1[0:3, 0] = [0, 0, 0] + X_center1[0:3, 1] = [-width, height, f_scale] + X_center2 = np.ones((4, 2)) + X_center2[0:3, 0] = [0, 0, 0] + X_center2[0:3, 1] = [width, height, f_scale] + X_center3 = np.ones((4, 2)) + X_center3[0:3, 0] = [0, 0, 0] + X_center3[0:3, 1] = [width, -height, f_scale] + X_center4 = np.ones((4, 2)) + X_center4[0:3, 0] = [0, 0, 0] + X_center4[0:3, 1] = [-width, -height, f_scale] + X_frame1 = np.ones((4, 2)) + X_frame1[0:3, 0] = [0, 0, 0] + X_frame1[0:3, 1] = [f_scale / 2, 0, 0] + X_frame2 = np.ones((4, 2)) + X_frame2[0:3, 0] = [0, 0, 0] + X_frame2[0:3, 1] = [0, f_scale / 2, 0] + X_frame3 = np.ones((4, 2)) + X_frame3[0:3, 0] = [0, 0, 0] + X_frame3[0:3, 1] = [0, 0, f_scale / 2] + if draw_frame_axis: + return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4, X_frame1, X_frame2, X_frame3] + else: + return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4] + +def create_board_model(extrinsics, board_width, board_height, square_size, draw_frame_axis=False): + width = board_width * square_size + height = board_height * square_size + X_board = np.ones((4, 5)) + X_board[0:3, 0] = [0, 0, 0] + X_board[0:3, 1] = [width, 0, 0] + X_board[0:3, 2] = [width, height, 0] + X_board[0:3, 3] = [0, height, 0] + X_board[0:3, 4] = [0, 0, 0] + X_frame1 = np.ones((4, 2)) + X_frame1[0:3, 0] = [0, 0, 0] + X_frame1[0:3, 1] = [height / 2, 0, 0] + X_frame2 = np.ones((4, 2)) + X_frame2[0:3, 0] = [0, 0, 0] + X_frame2[0:3, 1] = [0, height / 2, 0] + X_frame3 = np.ones((4, 2)) + X_frame3[0:3, 0] = [0, 0, 0] + X_frame3[0:3, 1] = [0, 0, height / 2] + if draw_frame_axis: + return [X_board, X_frame1, X_frame2, X_frame3] + else: + return [X_board] + +def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal, extrinsics, board_width, board_height, square_size, patternCentric): + from matplotlib import cm + min_values = np.zeros((3, 1)) + min_values = np.inf + max_values = np.zeros((3, 1)) + max_values = -np.inf + if patternCentric: + X_moving = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal) + X_static = create_board_model(extrinsics, board_width, board_height, square_size) + else: + X_static = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal, True) + X_moving = create_board_model(extrinsics, board_width, board_height, square_size) + cm_subsection = linspace(0.0, 1.0, extrinsics.shape[0]) + colors = [cm.jet(x) for x in cm_subsection] + for i in range(len(X_static)): + X = np.zeros(X_static[i].shape) + for j in range(X_static[i].shape[1]): + X[:, j] = transform_to_matplotlib_frame(np.eye(4), X_static[i][:, j]) + ax.plot3D(X[0, :], X[1, :], X[2, :], color='r') + min_values = np.minimum(min_values, X[0:3, :].min(1)) + max_values = np.maximum(max_values, X[0:3, :].max(1)) + for idx in range(extrinsics.shape[0]): + (R, _) = cv.Rodrigues(extrinsics[idx, 0:3]) + cMo = np.eye(4, 4) + cMo[0:3, 0:3] = R + cMo[0:3, 3] = extrinsics[idx, 3:6] + for i in range(len(X_moving)): + X = np.zeros(X_moving[i].shape) + for j in range(X_moving[i].shape[1]): + X[0:4, j] = transform_to_matplotlib_frame(cMo, X_moving[i][0:4, j], patternCentric) + ax.plot3D(X[0, :], X[1, :], X[2, :], color=colors[idx]) + min_values = np.minimum(min_values, X[0:3, :].min(1)) + max_values = np.maximum(max_values, X[0:3, :].max(1)) + return (min_values, max_values) + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--calibration', type=str, default='left_intrinsics.yml', help='YAML camera calibration file.') + parser.add_argument('--cam_width', type=float, default=0.064 / 2, help='Width/2 of the displayed camera.') + parser.add_argument('--cam_height', type=float, default=0.048 / 2, help='Height/2 of the displayed camera.') + parser.add_argument('--scale_focal', type=float, default=40, help='Value to scale the focal length.') + parser.add_argument('--patternCentric', action='store_true', help='The calibration board is static and the camera is moving.') + args = parser.parse_args() + fs = cv.FileStorage(cv.samples.findFile(args.calibration), cv.FILE_STORAGE_READ) + board_width = int(fs.getNode('board_width').real()) + board_height = int(fs.getNode('board_height').real()) + square_size = fs.getNode('square_size').real() + camera_matrix = fs.getNode('camera_matrix').mat() + extrinsics = fs.getNode('extrinsic_parameters').mat() + import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d import Axes3D + fig = plt.figure() + ax = fig.gca(projection='3d') + ax.set_aspect('auto') + cam_width = args.cam_width + cam_height = args.cam_height + scale_focal = args.scale_focal + (min_values, max_values) = draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal, extrinsics, board_width, board_height, square_size, args.patternCentric) + X_min = min_values[0] + X_max = max_values[0] + Y_min = min_values[1] + Y_max = max_values[1] + Z_min = min_values[2] + Z_max = max_values[2] + max_range = np.array([X_max - X_min, Y_max - Y_min, Z_max - Z_min]).max() / 2.0 + mid_x = (X_max + X_min) * 0.5 + mid_y = (Y_max + Y_min) * 0.5 + mid_z = (Z_max + Z_min) * 0.5 + ax.set_xlim(mid_x - max_range, mid_x + max_range) + ax.set_ylim(mid_y - max_range, mid_y + max_range) + ax.set_zlim(mid_z - max_range, mid_z + max_range) + ax.set_xlabel('x') + ax.set_ylabel('z') + ax.set_zlabel('-y') + ax.set_title('Extrinsic Parameters Visualization') + plt.show() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/camshift.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +import video +from video import presets + +class App(object): + + def __init__(self, video_src): + self.cam = video.create_capture(video_src, presets['cube']) + (_ret, self.frame) = self.cam.read() + cv.namedWindow('camshift') + cv.setMouseCallback('camshift', self.onmouse) + self.selection = None + self.drag_start = None + self.show_backproj = False + self.track_window = None + + def onmouse(self, event, x, y, flags, param): + if event == cv.EVENT_LBUTTONDOWN: + self.drag_start = (x, y) + self.track_window = None + if self.drag_start: + xmin = min(x, self.drag_start[0]) + ymin = min(y, self.drag_start[1]) + xmax = max(x, self.drag_start[0]) + ymax = max(y, self.drag_start[1]) + self.selection = (xmin, ymin, xmax, ymax) + if event == cv.EVENT_LBUTTONUP: + self.drag_start = None + self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin) + + def show_hist(self): + bin_count = self.hist.shape[0] + bin_w = 24 + img = np.zeros((256, bin_count * bin_w, 3), np.uint8) + for i in xrange(bin_count): + h = int(self.hist[i]) + cv.rectangle(img, (i * bin_w + 2, 255), ((i + 1) * bin_w - 2, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1) + img = cv.cvtColor(img, cv.COLOR_HSV2BGR) + cv.imshow('hist', img) + + def run(self): + while True: + (_ret, self.frame) = self.cam.read() + vis = self.frame.copy() + hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV) + mask = cv.inRange(hsv, np.array((0.0, 60.0, 32.0)), np.array((180.0, 255.0, 255.0))) + if self.selection: + (x0, y0, x1, y1) = self.selection + hsv_roi = hsv[y0:y1, x0:x1] + mask_roi = mask[y0:y1, x0:x1] + hist = cv.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180]) + cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX) + self.hist = hist.reshape(-1) + self.show_hist() + vis_roi = vis[y0:y1, x0:x1] + cv.bitwise_not(vis_roi, vis_roi) + vis[mask == 0] = 0 + if self.track_window and self.track_window[2] > 0 and (self.track_window[3] > 0): + self.selection = None + prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1) + prob &= mask + term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1) + (track_box, self.track_window) = cv.CamShift(prob, self.track_window, term_crit) + if self.show_backproj: + vis[:] = prob[..., np.newaxis] + try: + cv.ellipse(vis, track_box, (0, 0, 255), 2) + except: + print(track_box) + cv.imshow('camshift', vis) + ch = cv.waitKey(5) + if ch == 27: + break + if ch == ord('b'): + self.show_backproj = not self.show_backproj + cv.destroyAllWindows() +if __name__ == '__main__': + print(__doc__) + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + +# File: opencv-master/samples/python/coherence.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv + +def coherence_filter(img, sigma=11, str_sigma=11, blend=0.5, iter_n=4): + (h, w) = img.shape[:2] + for i in xrange(iter_n): + print(i) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3) + eigen = eigen.reshape(h, w, 3, 2) + (x, y) = (eigen[:, :, 1, 0], eigen[:, :, 1, 1]) + gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma) + gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma) + gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma) + gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy + m = gvv < 0 + ero = cv.erode(img, None) + dil = cv.dilate(img, None) + img1 = ero + img1[m] = dil[m] + img = np.uint8(img * (1.0 - blend) + img1 * blend) + print('done') + return img + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 'baboon.jpg' + src = cv.imread(cv.samples.findFile(fn)) + + def nothing(*argv): + pass + + def update(): + sigma = cv.getTrackbarPos('sigma', 'control') * 2 + 1 + str_sigma = cv.getTrackbarPos('str_sigma', 'control') * 2 + 1 + blend = cv.getTrackbarPos('blend', 'control') / 10.0 + print('sigma: %d str_sigma: %d blend_coef: %f' % (sigma, str_sigma, blend)) + dst = coherence_filter(src, sigma=sigma, str_sigma=str_sigma, blend=blend) + cv.imshow('dst', dst) + cv.namedWindow('control', 0) + cv.createTrackbar('sigma', 'control', 9, 15, nothing) + cv.createTrackbar('blend', 'control', 7, 10, nothing) + cv.createTrackbar('str_sigma', 'control', 9, 15, nothing) + print('Press SPACE to update the image\n') + cv.imshow('src', src) + update() + while True: + ch = cv.waitKey() + if ch == ord(' '): + update() + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/color_histogram.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys +import video + +class App: + + def set_scale(self, val): + self.hist_scale = val + + def run(self): + hsv_map = np.zeros((180, 256, 3), np.uint8) + (h, s) = np.indices(hsv_map.shape[:2]) + hsv_map[:, :, 0] = h + hsv_map[:, :, 1] = s + hsv_map[:, :, 2] = 255 + hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR) + cv.imshow('hsv_map', hsv_map) + cv.namedWindow('hist', 0) + self.hist_scale = 10 + cv.createTrackbar('scale', 'hist', self.hist_scale, 32, self.set_scale) + try: + fn = sys.argv[1] + except: + fn = 0 + cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05') + while True: + (_flag, frame) = cam.read() + cv.imshow('camera', frame) + small = cv.pyrDown(frame) + hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV) + dark = hsv[..., 2] < 32 + hsv[dark] = 0 + h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) + h = np.clip(h * 0.005 * self.hist_scale, 0, 1) + vis = hsv_map * h[:, :, np.newaxis] / 255.0 + cv.imshow('hist', vis) + ch = cv.waitKey(1) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + App().run() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/common.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + from functools import reduce +import numpy as np +import cv2 as cv +import os +import itertools as it +from contextlib import contextmanager +image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm'] + +class Bunch(object): + + def __init__(self, **kw): + self.__dict__.update(kw) + + def __str__(self): + return str(self.__dict__) + +def splitfn(fn): + (path, fn) = os.path.split(fn) + (name, ext) = os.path.splitext(fn) + return (path, name, ext) + +def anorm2(a): + return (a * a).sum(-1) + +def anorm(a): + return np.sqrt(anorm2(a)) + +def homotrans(H, x, y): + xs = H[0, 0] * x + H[0, 1] * y + H[0, 2] + ys = H[1, 0] * x + H[1, 1] * y + H[1, 2] + s = H[2, 0] * x + H[2, 1] * y + H[2, 2] + return (xs / s, ys / s) + +def to_rect(a): + a = np.ravel(a) + if len(a) == 2: + a = (0, 0, a[0], a[1]) + return np.array(a, np.float64).reshape(2, 2) + +def rect2rect_mtx(src, dst): + (src, dst) = (to_rect(src), to_rect(dst)) + (cx, cy) = (dst[1] - dst[0]) / (src[1] - src[0]) + (tx, ty) = dst[0] - src[0] * (cx, cy) + M = np.float64([[cx, 0, tx], [0, cy, ty], [0, 0, 1]]) + return M + +def lookat(eye, target, up=(0, 0, 1)): + fwd = np.asarray(target, np.float64) - eye + fwd /= anorm(fwd) + right = np.cross(fwd, up) + right /= anorm(right) + down = np.cross(fwd, right) + R = np.float64([right, down, fwd]) + tvec = -np.dot(R, eye) + return (R, tvec) + +def mtx2rvec(R): + (w, u, vt) = cv.SVDecomp(R - np.eye(3)) + p = vt[0] + u[:, 0] * w[0] + c = np.dot(vt[0], p) + s = np.dot(vt[1], p) + axis = np.cross(vt[0], vt[1]) + return axis * np.arctan2(s, c) + +def draw_str(dst, target, s): + (x, y) = target + cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA) + cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA) + +class Sketcher: + + def __init__(self, windowname, dests, colors_func): + self.prev_pt = None + self.windowname = windowname + self.dests = dests + self.colors_func = colors_func + self.dirty = False + self.show() + cv.setMouseCallback(self.windowname, self.on_mouse) + + def show(self): + cv.imshow(self.windowname, self.dests[0]) + + def on_mouse(self, event, x, y, flags, param): + pt = (x, y) + if event == cv.EVENT_LBUTTONDOWN: + self.prev_pt = pt + elif event == cv.EVENT_LBUTTONUP: + self.prev_pt = None + if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON: + for (dst, color) in zip(self.dests, self.colors_func()): + cv.line(dst, self.prev_pt, pt, color, 5) + self.dirty = True + self.prev_pt = pt + self.show() +_jet_data = {'red': ((0.0, 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1), (1, 0.5, 0.5)), 'green': ((0.0, 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1), (0.91, 0, 0), (1, 0, 0)), 'blue': ((0.0, 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65, 0, 0), (1, 0, 0))} +cmap_data = {'jet': _jet_data} + +def make_cmap(name, n=256): + data = cmap_data[name] + xs = np.linspace(0.0, 1.0, n) + channels = [] + eps = 1e-06 + for ch_name in ['blue', 'green', 'red']: + ch_data = data[ch_name] + (xp, yp) = ([], []) + for (x, y1, y2) in ch_data: + xp += [x, x + eps] + yp += [y1, y2] + ch = np.interp(xs, xp, yp) + channels.append(ch) + return np.uint8(np.array(channels).T * 255) + +def nothing(*arg, **kw): + pass + +def clock(): + return cv.getTickCount() / cv.getTickFrequency() + +@contextmanager +def Timer(msg): + print(msg, '...') + start = clock() + try: + yield + finally: + print('%.2f ms' % ((clock() - start) * 1000)) + +class StatValue: + + def __init__(self, smooth_coef=0.5): + self.value = None + self.smooth_coef = smooth_coef + + def update(self, v): + if self.value is None: + self.value = v + else: + c = self.smooth_coef + self.value = c * self.value + (1.0 - c) * v + +class RectSelector: + + def __init__(self, win, callback): + self.win = win + self.callback = callback + cv.setMouseCallback(win, self.onmouse) + self.drag_start = None + self.drag_rect = None + + def onmouse(self, event, x, y, flags, param): + (x, y) = np.int16([x, y]) + if event == cv.EVENT_LBUTTONDOWN: + self.drag_start = (x, y) + return + if self.drag_start: + if flags & cv.EVENT_FLAG_LBUTTON: + (xo, yo) = self.drag_start + (x0, y0) = np.minimum([xo, yo], [x, y]) + (x1, y1) = np.maximum([xo, yo], [x, y]) + self.drag_rect = None + if x1 - x0 > 0 and y1 - y0 > 0: + self.drag_rect = (x0, y0, x1, y1) + else: + rect = self.drag_rect + self.drag_start = None + self.drag_rect = None + if rect: + self.callback(rect) + + def draw(self, vis): + if not self.drag_rect: + return False + (x0, y0, x1, y1) = self.drag_rect + cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2) + return True + + @property + def dragging(self): + return self.drag_rect is not None + +def grouper(n, iterable, fillvalue=None): + args = [iter(iterable)] * n + if PY3: + output = it.zip_longest(*args, fillvalue=fillvalue) + else: + output = it.izip_longest(*args, fillvalue=fillvalue) + return output + +def mosaic(w, imgs): + imgs = iter(imgs) + if PY3: + img0 = next(imgs) + else: + img0 = imgs.next() + pad = np.zeros_like(img0) + imgs = it.chain([img0], imgs) + rows = grouper(w, imgs, pad) + return np.vstack(list(map(np.hstack, rows))) + +def getsize(img): + (h, w) = img.shape[:2] + return (w, h) + +def mdot(*args): + return reduce(np.dot, args) + +def draw_keypoints(vis, keypoints, color=(0, 255, 255)): + for kp in keypoints: + (x, y) = kp.pt + cv.circle(vis, (int(x), int(y)), 2, color) + +# File: opencv-master/samples/python/contours.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv + +def make_image(): + img = np.zeros((500, 500), np.uint8) + (black, white) = (0, 255) + for i in xrange(6): + dx = int(i % 2 * 250 - 30) + dy = int(i / 2.0 * 150) + if i == 0: + for j in xrange(11): + angle = (j + 5) * np.pi / 21 + (c, s) = (np.cos(angle), np.sin(angle)) + (x1, y1) = np.int32([dx + 100 + j * 10 - 80 * c, dy + 100 - 90 * s]) + (x2, y2) = np.int32([dx + 100 + j * 10 - 30 * c, dy + 100 - 30 * s]) + cv.line(img, (x1, y1), (x2, y2), white) + cv.ellipse(img, (dx + 150, dy + 100), (100, 70), 0, 0, 360, white, -1) + cv.ellipse(img, (dx + 115, dy + 70), (30, 20), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 185, dy + 70), (30, 20), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 115, dy + 70), (15, 15), 0, 0, 360, white, -1) + cv.ellipse(img, (dx + 185, dy + 70), (15, 15), 0, 0, 360, white, -1) + cv.ellipse(img, (dx + 115, dy + 70), (5, 5), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 185, dy + 70), (5, 5), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 150, dy + 100), (10, 5), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 150, dy + 150), (40, 10), 0, 0, 360, black, -1) + cv.ellipse(img, (dx + 27, dy + 100), (20, 35), 0, 0, 360, white, -1) + cv.ellipse(img, (dx + 273, dy + 100), (20, 35), 0, 0, 360, white, -1) + return img + +def main(): + img = make_image() + (h, w) = img.shape[:2] + (contours0, hierarchy) = cv.findContours(img.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + contours = [cv.approxPolyDP(cnt, 3, True) for cnt in contours0] + + def update(levels): + vis = np.zeros((h, w, 3), np.uint8) + levels = levels - 3 + cv.drawContours(vis, contours, (-1, 2)[levels <= 0], (128, 255, 255), 3, cv.LINE_AA, hierarchy, abs(levels)) + cv.imshow('contours', vis) + update(3) + cv.createTrackbar('levels+3', 'contours', 3, 7, update) + cv.imshow('image', img) + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/deconvolution.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import nothing + +def blur_edge(img, d=31): + (h, w) = img.shape[:2] + img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP) + img_blur = cv.GaussianBlur(img_pad, (2 * d + 1, 2 * d + 1), -1)[d:-d, d:-d] + (y, x) = np.indices((h, w)) + dist = np.dstack([x, w - x - 1, y, h - y - 1]).min(-1) + w = np.minimum(np.float32(dist) / d, 1.0) + return img * w + img_blur * (1 - w) + +def motion_kernel(angle, d, sz=65): + kern = np.ones((1, d), np.float32) + (c, s) = (np.cos(angle), np.sin(angle)) + A = np.float32([[c, -s, 0], [s, c, 0]]) + sz2 = sz // 2 + A[:, 2] = (sz2, sz2) - np.dot(A[:, :2], ((d - 1) * 0.5, 0)) + kern = cv.warpAffine(kern, A, (sz, sz), flags=cv.INTER_CUBIC) + return kern + +def defocus_kernel(d, sz=65): + kern = np.zeros((sz, sz), np.uint8) + cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1) + kern = np.float32(kern) / 255.0 + return kern + +def main(): + import sys, getopt + (opts, args) = getopt.getopt(sys.argv[1:], '', ['circle', 'angle=', 'd=', 'snr=']) + opts = dict(opts) + try: + fn = args[0] + except: + fn = 'licenseplate_motion.jpg' + win = 'deconvolution' + img = cv.imread(cv.samples.findFile(fn), cv.IMREAD_GRAYSCALE) + if img is None: + print('Failed to load file:', fn) + sys.exit(1) + img = np.float32(img) / 255.0 + cv.imshow('input', img) + img = blur_edge(img) + IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT) + defocus = '--circle' in opts + + def update(_): + ang = np.deg2rad(cv.getTrackbarPos('angle', win)) + d = cv.getTrackbarPos('d', win) + noise = 10 ** (-0.1 * cv.getTrackbarPos('SNR (db)', win)) + if defocus: + psf = defocus_kernel(d) + else: + psf = motion_kernel(ang, d) + cv.imshow('psf', psf) + psf /= psf.sum() + psf_pad = np.zeros_like(img) + (kh, kw) = psf.shape + psf_pad[:kh, :kw] = psf + PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows=kh) + PSF2 = (PSF ** 2).sum(-1) + iPSF = PSF / (PSF2 + noise)[..., np.newaxis] + RES = cv.mulSpectrums(IMG, iPSF, 0) + res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT) + res = np.roll(res, -kh // 2, 0) + res = np.roll(res, -kw // 2, 1) + cv.imshow(win, res) + cv.namedWindow(win) + cv.namedWindow('psf', 0) + cv.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update) + cv.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update) + cv.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update) + update(None) + while True: + ch = cv.waitKey() + if ch == 27: + break + if ch == ord(' '): + defocus = not defocus + update(None) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/demo.py +"""""" +from __future__ import print_function +import sys +from common import splitfn +import webbrowser +from glob import glob +from subprocess import Popen +try: + import tkinter as tk + from tkinter.scrolledtext import ScrolledText +except ImportError: + import Tkinter as tk + from ScrolledText import ScrolledText +exclude_list = ['demo', 'common'] + +class LinkManager: + + def __init__(self, text, url_callback=None): + self.text = text + self.text.tag_config('link', foreground='blue', underline=1) + self.text.tag_bind('link', '', self._enter) + self.text.tag_bind('link', '', self._leave) + self.text.tag_bind('link', '', self._click) + self.url_callback = url_callback + self.reset() + + def reset(self): + self.links = {} + + def add(self, action): + tag = 'link-%d' % len(self.links) + self.links[tag] = action + return ('link', tag) + + def _enter(self, event): + self.text.config(cursor='hand2') + + def _leave(self, event): + self.text.config(cursor='') + + def _click(self, event): + for tag in self.text.tag_names(tk.CURRENT): + if tag.startswith('link-'): + proc = self.links[tag] + if callable(proc): + proc() + elif self.url_callback: + self.url_callback(proc) + +class App: + + def __init__(self): + root = tk.Tk() + root.title('OpenCV Demo') + self.win = win = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED, sashwidth=4) + self.win.pack(fill=tk.BOTH, expand=1) + left = tk.Frame(win) + right = tk.Frame(win) + win.add(left) + win.add(right) + scrollbar = tk.Scrollbar(left, orient=tk.VERTICAL) + self.demos_lb = demos_lb = tk.Listbox(left, yscrollcommand=scrollbar.set) + scrollbar.config(command=demos_lb.yview) + scrollbar.pack(side=tk.RIGHT, fill=tk.Y) + demos_lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1) + self.samples = {} + for fn in glob('*.py'): + name = splitfn(fn)[1] + if fn[0] != '_' and name not in exclude_list: + self.samples[name] = fn + for name in sorted(self.samples): + demos_lb.insert(tk.END, name) + demos_lb.bind('<>', self.on_demo_select) + self.cmd_entry = cmd_entry = tk.Entry(right) + cmd_entry.bind('', self.on_run) + run_btn = tk.Button(right, command=self.on_run, text='Run', width=8) + self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width=30, wrap='word') + self.linker = _linker = LinkManager(text, self.on_link) + self.text.tag_config('header1', font=('arial', 14, 'bold')) + self.text.tag_config('header2', font=('arial', 12, 'bold')) + text.config(state='disabled') + text.pack(fill='both', expand=1, side=tk.BOTTOM) + cmd_entry.pack(fill='x', side='left', expand=1) + run_btn.pack() + + def on_link(self, url): + print(url) + webbrowser.open(url) + + def on_demo_select(self, evt): + name = self.demos_lb.get(self.demos_lb.curselection()[0]) + fn = self.samples[name] + descr = '' + try: + if sys.version_info[0] > 2: + module_globals = {} + module_locals = {} + with open(fn, 'r') as f: + module_code = f.read() + exec(compile(module_code, fn, 'exec'), module_globals, module_locals) + descr = module_locals.get('__doc__', 'no-description') + else: + module_globals = {} + execfile(fn, module_globals) + descr = module_globals.get('__doc__', 'no-description') + except Exception as e: + descr = str(e) + self.linker.reset() + self.text.config(state='normal') + self.text.delete(1.0, tk.END) + self.format_text(descr) + self.text.config(state='disabled') + self.cmd_entry.delete(0, tk.END) + self.cmd_entry.insert(0, fn) + + def format_text(self, s): + text = self.text + lines = s.splitlines() + for (i, s) in enumerate(lines): + s = s.rstrip() + if i == 0 and (not s): + continue + if s and s == '=' * len(s): + text.tag_add('header1', 'end-2l', 'end-1l') + elif s and s == '-' * len(s): + text.tag_add('header2', 'end-2l', 'end-1l') + else: + text.insert('end', s + '\n') + + def add_link(start, end, url): + for tag in self.linker.add(url): + text.tag_add(tag, start, end) + self.match_text('http://\\S+', add_link) + + def match_text(self, pattern, tag_proc, regexp=True): + text = self.text + text.mark_set('matchPos', '1.0') + count = tk.IntVar() + while True: + match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end') + if not match_index: + break + end_index = text.index('%s+%sc' % (match_index, count.get())) + text.mark_set('matchPos', end_index) + if callable(tag_proc): + tag_proc(match_index, end_index, text.get(match_index, end_index)) + else: + text.tag_add(tag_proc, match_index, end_index) + + def on_run(self, *args): + cmd = self.cmd_entry.get() + print('running:', cmd) + Popen(sys.executable + ' ' + cmd, shell=True) + + def run(self): + tk.mainloop() +if __name__ == '__main__': + App().run() + +# File: opencv-master/samples/python/dft.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +def shift_dft(src, dst=None): + if dst is None: + dst = np.empty(src.shape, src.dtype) + elif src.shape != dst.shape: + raise ValueError('src and dst must have equal sizes') + elif src.dtype != dst.dtype: + raise TypeError('src and dst must have equal types') + if src is dst: + ret = np.empty(src.shape, src.dtype) + else: + ret = dst + (h, w) = src.shape[:2] + cx1 = cx2 = w // 2 + cy1 = cy2 = h // 2 + if w % 2 != 0: + cx2 += 1 + if h % 2 != 0: + cy2 += 1 + ret[h - cy1:, w - cx1:] = src[0:cy1, 0:cx1] + ret[0:cy2, 0:cx2] = src[h - cy2:, w - cx2:] + ret[0:cy2, w - cx2:] = src[h - cy2:, 0:cx2] + ret[h - cy1:, 0:cx1] = src[0:cy1, w - cx1:] + if src is dst: + dst[:, :] = ret + return dst + +def main(): + if len(sys.argv) > 1: + fname = sys.argv[1] + else: + fname = 'baboon.jpg' + print('usage : python dft.py ') + im = cv.imread(cv.samples.findFile(fname)) + im = cv.cvtColor(im, cv.COLOR_BGR2GRAY) + (h, w) = im.shape[:2] + realInput = im.astype(np.float64) + dft_M = cv.getOptimalDFTSize(w) + dft_N = cv.getOptimalDFTSize(h) + dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64) + dft_A[:h, :w, 0] = realInput + cv.dft(dft_A, dst=dft_A, nonzeroRows=h) + cv.imshow('win', im) + (image_Re, image_Im) = cv.split(dft_A) + magnitude = cv.sqrt(image_Re ** 2.0 + image_Im ** 2.0) + log_spectrum = cv.log(1.0 + magnitude) + shift_dft(log_spectrum, log_spectrum) + cv.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv.NORM_MINMAX) + cv.imshow('magnitude', log_spectrum) + cv.waitKey(0) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/digits.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from multiprocessing.pool import ThreadPool +from numpy.linalg import norm +from common import clock, mosaic +SZ = 20 +CLASS_N = 10 +DIGITS_FN = 'digits.png' + +def split2d(img, cell_size, flatten=True): + (h, w) = img.shape[:2] + (sx, sy) = cell_size + cells = [np.hsplit(row, w // sx) for row in np.vsplit(img, h // sy)] + cells = np.array(cells) + if flatten: + cells = cells.reshape(-1, sy, sx) + return cells + +def load_digits(fn): + fn = cv.samples.findFile(fn) + print('loading "%s" ...' % fn) + digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE) + digits = split2d(digits_img, (SZ, SZ)) + labels = np.repeat(np.arange(CLASS_N), len(digits) / CLASS_N) + return (digits, labels) + +def deskew(img): + m = cv.moments(img) + if abs(m['mu02']) < 0.01: + return img.copy() + skew = m['mu11'] / m['mu02'] + M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) + img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) + return img + +class KNearest(object): + + def __init__(self, k=3): + self.k = k + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + (_retval, results, _neigh_resp, _dists) = self.model.findNearest(samples, self.k) + return results.ravel() + + def load(self, fn): + self.model = cv.ml.KNearest_load(fn) + + def save(self, fn): + self.model.save(fn) + +class SVM(object): + + def __init__(self, C=1, gamma=0.5): + self.model = cv.ml.SVM_create() + self.model.setGamma(gamma) + self.model.setC(C) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setType(cv.ml.SVM_C_SVC) + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + return self.model.predict(samples)[1].ravel() + + def load(self, fn): + self.model = cv.ml.SVM_load(fn) + + def save(self, fn): + self.model.save(fn) + +def evaluate_model(model, digits, samples, labels): + resp = model.predict(samples) + err = (labels != resp).mean() + print('error: %.2f %%' % (err * 100)) + confusion = np.zeros((10, 10), np.int32) + for (i, j) in zip(labels, resp): + confusion[i, int(j)] += 1 + print('confusion matrix:') + print(confusion) + print() + vis = [] + for (img, flag) in zip(digits, resp == labels): + img = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + if not flag: + img[..., :2] = 0 + vis.append(img) + return mosaic(25, vis) + +def preprocess_simple(digits): + return np.float32(digits).reshape(-1, SZ * SZ) / 255.0 + +def preprocess_hog(digits): + samples = [] + for img in digits: + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + (mag, ang) = cv.cartToPolar(gx, gy) + bin_n = 16 + bin = np.int32(bin_n * ang / (2 * np.pi)) + bin_cells = (bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]) + mag_cells = (mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]) + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for (b, m) in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) + eps = 1e-07 + hist /= hist.sum() + eps + hist = np.sqrt(hist) + hist /= norm(hist) + eps + samples.append(hist) + return np.float32(samples) +if __name__ == '__main__': + print(__doc__) + (digits, labels) = load_digits(DIGITS_FN) + print('preprocessing...') + rand = np.random.RandomState(321) + shuffle = rand.permutation(len(digits)) + (digits, labels) = (digits[shuffle], labels[shuffle]) + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + train_n = int(0.9 * len(samples)) + cv.imshow('test set', mosaic(25, digits[train_n:])) + (digits_train, digits_test) = np.split(digits2, [train_n]) + (samples_train, samples_test) = np.split(samples, [train_n]) + (labels_train, labels_test) = np.split(labels, [train_n]) + print('training KNearest...') + model = KNearest(k=4) + model.train(samples_train, labels_train) + vis = evaluate_model(model, digits_test, samples_test, labels_test) + cv.imshow('KNearest test', vis) + print('training SVM...') + model = SVM(C=2.67, gamma=5.383) + model.train(samples_train, labels_train) + vis = evaluate_model(model, digits_test, samples_test, labels_test) + cv.imshow('SVM test', vis) + print('saving SVM as "digits_svm.dat"...') + model.save('digits_svm.dat') + cv.waitKey(0) + cv.destroyAllWindows() + +# File: opencv-master/samples/python/digits_adjust.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +from multiprocessing.pool import ThreadPool +from digits import * + +def cross_validate(model_class, params, samples, labels, kfold=3, pool=None): + n = len(samples) + folds = np.array_split(np.arange(n), kfold) + + def f(i): + model = model_class(**params) + test_idx = folds[i] + train_idx = list(folds) + train_idx.pop(i) + train_idx = np.hstack(train_idx) + (train_samples, train_labels) = (samples[train_idx], labels[train_idx]) + (test_samples, test_labels) = (samples[test_idx], labels[test_idx]) + model.train(train_samples, train_labels) + resp = model.predict(test_samples) + score = (resp != test_labels).mean() + print('.', end='') + return score + if pool is None: + scores = list(map(f, xrange(kfold))) + else: + scores = pool.map(f, xrange(kfold)) + return np.mean(scores) + +class App(object): + + def __init__(self): + (self._samples, self._labels) = self.preprocess() + + def preprocess(self): + (digits, labels) = load_digits(DIGITS_FN) + shuffle = np.random.permutation(len(digits)) + (digits, labels) = (digits[shuffle], labels[shuffle]) + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + return (samples, labels) + + def get_dataset(self): + return (self._samples, self._labels) + + def run_jobs(self, f, jobs): + pool = ThreadPool(processes=cv.getNumberOfCPUs()) + ires = pool.imap_unordered(f, jobs) + return ires + + def adjust_SVM(self): + Cs = np.logspace(0, 10, 15, base=2) + gammas = np.logspace(-7, 4, 15, base=2) + scores = np.zeros((len(Cs), len(gammas))) + scores[:] = np.nan + print('adjusting SVM (may take a long time) ...') + + def f(job): + (i, j) = job + (samples, labels) = self.get_dataset() + params = dict(C=Cs[i], gamma=gammas[j]) + score = cross_validate(SVM, params, samples, labels) + return (i, j, score) + ires = self.run_jobs(f, np.ndindex(*scores.shape)) + for (count, (i, j, score)) in enumerate(ires): + scores[i, j] = score + print('%d / %d (best error: %.2f %%, last: %.2f %%)' % (count + 1, scores.size, np.nanmin(scores) * 100, score * 100)) + print(scores) + print('writing score table to "svm_scores.npz"') + np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas) + (i, j) = np.unravel_index(scores.argmin(), scores.shape) + best_params = dict(C=Cs[i], gamma=gammas[j]) + print('best params:', best_params) + print('best error: %.2f %%' % (scores.min() * 100)) + return best_params + + def adjust_KNearest(self): + print('adjusting KNearest ...') + + def f(k): + (samples, labels) = self.get_dataset() + err = cross_validate(KNearest, dict(k=k), samples, labels) + return (k, err) + (best_err, best_k) = (np.inf, -1) + for (k, err) in self.run_jobs(f, xrange(1, 9)): + if err < best_err: + (best_err, best_k) = (err, k) + print('k = %d, error: %.2f %%' % (k, err * 100)) + best_params = dict(k=best_k) + print('best params:', best_params, 'err: %.2f' % (best_err * 100)) + return best_params +if __name__ == '__main__': + import getopt + import sys + print(__doc__) + (args, _) = getopt.getopt(sys.argv[1:], '', ['model=']) + args = dict(args) + args.setdefault('--model', 'svm') + args.setdefault('--env', '') + if args['--model'] not in ['svm', 'knearest']: + print('unknown model "%s"' % args['--model']) + sys.exit(1) + t = clock() + app = App() + if args['--model'] == 'knearest': + app.adjust_KNearest() + else: + app.adjust_SVM() + print('work time: %f s' % (clock() - t)) + +# File: opencv-master/samples/python/digits_video.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import os +import sys +import video +from common import mosaic +from digits import * + +def main(): + try: + src = sys.argv[1] + except: + src = 0 + cap = video.create_capture(src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('sudoku.png'))) + classifier_fn = 'digits_svm.dat' + if not os.path.exists(classifier_fn): + print('"%s" not found, run digits.py first' % classifier_fn) + return + model = cv.ml.SVM_load(classifier_fn) + while True: + (_ret, frame) = cap.read() + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10) + bin = cv.medianBlur(bin, 3) + (contours, heirs) = cv.findContours(bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE) + try: + heirs = heirs[0] + except: + heirs = [] + for (cnt, heir) in zip(contours, heirs): + (_, _, _, outer_i) = heir + if outer_i >= 0: + continue + (x, y, w, h) = cv.boundingRect(cnt) + if not (16 <= h <= 64 and w <= 1.2 * h): + continue + pad = max(h - w, 0) + (x, w) = (x - pad // 2, w + pad) + cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0)) + bin_roi = bin[y:, x:][:h, :w] + m = bin_roi != 0 + if not 0.1 < m.mean() < 0.4: + continue + '' + s = 1.5 * float(h) / SZ + m = cv.moments(bin_roi) + c1 = np.float32([m['m10'], m['m01']]) / m['m00'] + c0 = np.float32([SZ / 2, SZ / 2]) + t = c1 - s * c0 + A = np.zeros((2, 3), np.float32) + A[:, :2] = np.eye(2) * s + A[:, 2] = t + bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) + bin_norm = deskew(bin_norm) + if x + w + SZ < frame.shape[1] and y + SZ < frame.shape[0]: + frame[y:, x + w:][:SZ, :SZ] = bin_norm[..., np.newaxis] + sample = preprocess_hog([bin_norm]) + digit = model.predict(sample)[1].ravel() + cv.putText(frame, '%d' % digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness=1) + cv.imshow('frame', frame) + cv.imshow('bin', bin) + ch = cv.waitKey(1) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/dis_opt_flow.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video + +def draw_flow(img, flow, step=16): + (h, w) = img.shape[:2] + (y, x) = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int) + (fx, fy) = flow[y, x].T + lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2) + lines = np.int32(lines + 0.5) + vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + cv.polylines(vis, lines, 0, (0, 255, 0)) + for ((x1, y1), (_x2, _y2)) in lines: + cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1) + return vis + +def draw_hsv(flow): + (h, w) = flow.shape[:2] + (fx, fy) = (flow[:, :, 0], flow[:, :, 1]) + ang = np.arctan2(fy, fx) + np.pi + v = np.sqrt(fx * fx + fy * fy) + hsv = np.zeros((h, w, 3), np.uint8) + hsv[..., 0] = ang * (180 / np.pi / 2) + hsv[..., 1] = 255 + hsv[..., 2] = np.minimum(v * 4, 255) + bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) + return bgr + +def warp_flow(img, flow): + (h, w) = flow.shape[:2] + flow = -flow + flow[:, :, 0] += np.arange(w) + flow[:, :, 1] += np.arange(h)[:, np.newaxis] + res = cv.remap(img, flow, None, cv.INTER_LINEAR) + return res + +def main(): + import sys + print(__doc__) + try: + fn = sys.argv[1] + except IndexError: + fn = 0 + cam = video.create_capture(fn) + (_ret, prev) = cam.read() + prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY) + show_hsv = False + show_glitch = False + use_spatial_propagation = False + use_temporal_propagation = True + cur_glitch = prev.copy() + inst = cv.DISOpticalFlow.create(cv.DISOPTICAL_FLOW_PRESET_MEDIUM) + inst.setUseSpatialPropagation(use_spatial_propagation) + flow = None + while True: + (_ret, img) = cam.read() + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + if flow is not None and use_temporal_propagation: + flow = inst.calc(prevgray, gray, warp_flow(flow, flow)) + else: + flow = inst.calc(prevgray, gray, None) + prevgray = gray + cv.imshow('flow', draw_flow(gray, flow)) + if show_hsv: + cv.imshow('flow HSV', draw_hsv(flow)) + if show_glitch: + cur_glitch = warp_flow(cur_glitch, flow) + cv.imshow('glitch', cur_glitch) + ch = 255 & cv.waitKey(5) + if ch == 27: + break + if ch == ord('1'): + show_hsv = not show_hsv + print('HSV flow visualization is', ['off', 'on'][show_hsv]) + if ch == ord('2'): + show_glitch = not show_glitch + if show_glitch: + cur_glitch = img.copy() + print('glitch is', ['off', 'on'][show_glitch]) + if ch == ord('3'): + use_spatial_propagation = not use_spatial_propagation + inst.setUseSpatialPropagation(use_spatial_propagation) + print('spatial propagation is', ['off', 'on'][use_spatial_propagation]) + if ch == ord('4'): + use_temporal_propagation = not use_temporal_propagation + print('temporal propagation is', ['off', 'on'][use_temporal_propagation]) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/distrans.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import make_cmap + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 'fruits.jpg' + fn = cv.samples.findFile(fn) + img = cv.imread(fn, cv.IMREAD_GRAYSCALE) + if img is None: + print('Failed to load fn:', fn) + sys.exit(1) + cm = make_cmap('jet') + need_update = True + voronoi = False + + def update(dummy=None): + global need_update + need_update = False + thrs = cv.getTrackbarPos('threshold', 'distrans') + mark = cv.Canny(img, thrs, 3 * thrs) + (dist, labels) = cv.distanceTransformWithLabels(~mark, cv.DIST_L2, 5) + if voronoi: + vis = cm[np.uint8(labels)] + else: + vis = cm[np.uint8(dist * 2)] + vis[mark != 0] = 255 + cv.imshow('distrans', vis) + + def invalidate(dummy=None): + global need_update + need_update = True + cv.namedWindow('distrans') + cv.createTrackbar('threshold', 'distrans', 60, 255, invalidate) + update() + while True: + ch = cv.waitKey(50) + if ch == 27: + break + if ch == ord('v'): + voronoi = not voronoi + print('showing', ['distance', 'voronoi'][voronoi]) + update() + if need_update: + update() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/drawing.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def lines(): + for i in range(NUMBER * 2): + (pt1, pt2) = ([], []) + pt1.append(np.random.randint(x1, x2)) + pt1.append(np.random.randint(y1, y2)) + pt2.append(np.random.randint(x1, x2)) + pt2.append(np.random.randint(y1, y2)) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + arrowed = np.random.randint(0, 6) + if arrowed < 3: + cv.line(image, tuple(pt1), tuple(pt2), color, np.random.randint(1, 10), lineType) + else: + cv.arrowedLine(image, tuple(pt1), tuple(pt2), color, np.random.randint(1, 10), lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def rectangle(): + for i in range(NUMBER * 2): + (pt1, pt2) = ([], []) + pt1.append(np.random.randint(x1, x2)) + pt1.append(np.random.randint(y1, y2)) + pt2.append(np.random.randint(x1, x2)) + pt2.append(np.random.randint(y1, y2)) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + thickness = np.random.randint(-3, 10) + marker = np.random.randint(0, 10) + marker_size = np.random.randint(30, 80) + if marker > 5: + cv.rectangle(image, tuple(pt1), tuple(pt2), color, max(thickness, -1), lineType) + else: + cv.drawMarker(image, tuple(pt1), color, marker, marker_size) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def ellipse(): + for i in range(NUMBER * 2): + center = [] + center.append(np.random.randint(x1, x2)) + center.append(np.random.randint(x1, x2)) + axes = [] + axes.append(np.random.randint(0, 200)) + axes.append(np.random.randint(0, 200)) + angle = np.random.randint(0, 180) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + thickness = np.random.randint(-1, 9) + cv.ellipse(image, tuple(center), tuple(axes), angle, angle - 100, angle + 200, color, thickness, lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def polygonal(): + for i in range(NUMBER): + pt = [(0, 0)] * 6 + pt = np.resize(pt, (2, 3, 2)) + pt[0][0][0] = np.random.randint(x1, x2) + pt[0][0][1] = np.random.randint(y1, y2) + pt[0][1][0] = np.random.randint(x1, x2) + pt[0][1][1] = np.random.randint(y1, y2) + pt[0][2][0] = np.random.randint(x1, x2) + pt[0][2][1] = np.random.randint(y1, y2) + pt[1][0][0] = np.random.randint(x1, x2) + pt[1][0][1] = np.random.randint(y1, y2) + pt[1][1][0] = np.random.randint(x1, x2) + pt[1][1][1] = np.random.randint(y1, y2) + pt[1][2][0] = np.random.randint(x1, x2) + pt[1][2][1] = np.random.randint(y1, y2) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + alist = [] + for k in pt[0]: + alist.append(k) + for k in pt[1]: + alist.append(k) + ppt = np.array(alist) + cv.polylines(image, [ppt], True, color, thickness=np.random.randint(1, 10), lineType=lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def fill(): + for i in range(NUMBER): + pt = [(0, 0)] * 6 + pt = np.resize(pt, (2, 3, 2)) + pt[0][0][0] = np.random.randint(x1, x2) + pt[0][0][1] = np.random.randint(y1, y2) + pt[0][1][0] = np.random.randint(x1, x2) + pt[0][1][1] = np.random.randint(y1, y2) + pt[0][2][0] = np.random.randint(x1, x2) + pt[0][2][1] = np.random.randint(y1, y2) + pt[1][0][0] = np.random.randint(x1, x2) + pt[1][0][1] = np.random.randint(y1, y2) + pt[1][1][0] = np.random.randint(x1, x2) + pt[1][1][1] = np.random.randint(y1, y2) + pt[1][2][0] = np.random.randint(x1, x2) + pt[1][2][1] = np.random.randint(y1, y2) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + alist = [] + for k in pt[0]: + alist.append(k) + for k in pt[1]: + alist.append(k) + ppt = np.array(alist) + cv.fillPoly(image, [ppt], color, lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def circles(): + for i in range(NUMBER): + center = [] + center.append(np.random.randint(x1, x2)) + center.append(np.random.randint(x1, x2)) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + cv.circle(image, tuple(center), np.random.randint(0, 300), color, np.random.randint(-1, 9), lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def string(): + for i in range(NUMBER): + org = [] + org.append(np.random.randint(x1, x2)) + org.append(np.random.randint(x1, x2)) + color = '%06x' % np.random.randint(0, 16777215) + color = tuple((int(color[i:i + 2], 16) for i in (0, 2, 4))) + cv.putText(image, 'Testing text rendering', tuple(org), np.random.randint(0, 8), np.random.randint(0, 100) * 0.05 + 0.1, color, np.random.randint(1, 10), lineType) + cv.imshow(wndname, image) + if cv.waitKey(DELAY) >= 0: + return + +def string1(): + textsize = cv.getTextSize('OpenCV forever!', cv.FONT_HERSHEY_COMPLEX, 3, 5) + org = (int((width - textsize[0][0]) / 2), int((height - textsize[0][1]) / 2)) + for i in range(0, 255, 2): + image2 = np.array(image) - i + cv.putText(image2, 'OpenCV forever!', org, cv.FONT_HERSHEY_COMPLEX, 3, (i, i, 255), 5, lineType) + cv.imshow(wndname, image2) + if cv.waitKey(DELAY) >= 0: + return +if __name__ == '__main__': + print(__doc__) + wndname = 'Drawing Demo' + NUMBER = 100 + DELAY = 5 + (width, height) = (1000, 700) + lineType = cv.LINE_AA + (x1, x2, y1, y2) = (-width / 2, width * 3 / 2, -height / 2, height * 3 / 2) + image = np.zeros((height, width, 3), dtype=np.uint8) + cv.imshow(wndname, image) + cv.waitKey(DELAY) + lines() + rectangle() + ellipse() + polygonal() + fill() + circles() + string() + string1() + cv.waitKey(0) + cv.destroyAllWindows() + +# File: opencv-master/samples/python/edge.py +"""""" +from __future__ import print_function +import cv2 as cv +import numpy as np +import video +import sys + +def main(): + try: + fn = sys.argv[1] + except: + fn = 0 + + def nothing(*arg): + pass + cv.namedWindow('edge') + cv.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) + cv.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) + cap = video.create_capture(fn) + while True: + (_flag, img) = cap.read() + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + thrs1 = cv.getTrackbarPos('thrs1', 'edge') + thrs2 = cv.getTrackbarPos('thrs2', 'edge') + edge = cv.Canny(gray, thrs1, thrs2, apertureSize=5) + vis = img.copy() + vis = np.uint8(vis / 2.0) + vis[edge != 0] = (0, 255, 0) + cv.imshow('edge', vis) + ch = cv.waitKey(5) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/essential_mat_reconstr.py +import numpy as np, cv2 as cv, matplotlib.pyplot as plt, time, sys, os +from mpl_toolkits.mplot3d import axes3d, Axes3D + +def getEpipolarError(F, pts1_, pts2_, inliers): + pts1 = np.concatenate((pts1_.T, np.ones((1, pts1_.shape[0]))))[:, inliers] + pts2 = np.concatenate((pts2_.T, np.ones((1, pts2_.shape[0]))))[:, inliers] + lines2 = np.dot(F, pts1) + lines1 = np.dot(F.T, pts2) + return np.median((np.abs(np.sum(pts1 * lines1, axis=0)) / np.sqrt(lines1[0, :] ** 2 + lines1[1, :] ** 2) + np.abs(np.sum(pts2 * lines2, axis=0)) / np.sqrt(lines2[0, :] ** 2 + lines2[1, :] ** 2)) / 2) +if __name__ == '__main__': + if len(sys.argv) < 3: + print('Path to data file and directory to image files are missing!\nData file must have format:\n--------------\n image_name_1\nimage_name_2\nk11 k12 k13\n0 k22 k23\n0 0 1\n--------------\nIf image_name_{1,2} are not in the same directory as the data file then add argument with directory to image files.\nFor example: python essential_mat_reconstr.py essential_mat_data.txt ./') + exit(1) + else: + data_file = sys.argv[1] + image_dir = sys.argv[2] + if not os.path.isfile(data_file): + print('Incorrect path to data file!') + exit(1) + with open(data_file, 'r') as f: + image1 = cv.imread(image_dir + f.readline()[:-1]) + image2 = cv.imread(image_dir + f.readline()[:-1]) + K = np.array([[float(x) for x in f.readline().split(' ')], [float(x) for x in f.readline().split(' ')], [float(x) for x in f.readline().split(' ')]]) + if image1 is None or image2 is None: + print('Incorrect directory to images!') + exit(1) + if K.shape != (3, 3): + print('Intrinsic matrix has incorrect format!') + exit(1) + print('find keypoints and compute descriptors') + detector = cv.SIFT_create(nfeatures=20000) + (keypoints1, descriptors1) = detector.detectAndCompute(cv.cvtColor(image1, cv.COLOR_BGR2GRAY), None) + (keypoints2, descriptors2) = detector.detectAndCompute(cv.cvtColor(image2, cv.COLOR_BGR2GRAY), None) + matcher = cv.FlannBasedMatcher(dict(algorithm=0, trees=5), dict(checks=32)) + print('match with FLANN, size of descriptors', descriptors1.shape, descriptors2.shape) + matches_vector = matcher.knnMatch(descriptors1, descriptors2, k=2) + print('find good keypoints') + pts1 = [] + pts2 = [] + for m in matches_vector: + if m[0].distance / m[1].distance < 0.75: + pts1.append(keypoints1[m[0].queryIdx].pt) + pts2.append(keypoints2[m[0].trainIdx].pt) + pts1 = np.array(pts1) + pts2 = np.array(pts2) + print('points size', pts1.shape[0]) + print('Essential matrix RANSAC') + start = time.time() + (E, inliers) = cv.findEssentialMat(pts1, pts2, K, cv.RANSAC, 0.999, 1.0) + print('RANSAC time', time.time() - start, 'seconds') + print('Median error to epipolar lines', getEpipolarError(np.dot(np.linalg.inv(K).T, np.dot(E, np.linalg.inv(K))), pts1, pts2, inliers.squeeze()), 'number of inliers', inliers.sum()) + print('Decompose essential matrix') + (R1, R2, t) = cv.decomposeEssentialMat(E) + P1 = np.concatenate((K, np.zeros((3, 1))), axis=1) + P2s = [np.dot(K, np.concatenate((R1, t), axis=1)), np.dot(K, np.concatenate((R1, -t), axis=1)), np.dot(K, np.concatenate((R2, t), axis=1)), np.dot(K, np.concatenate((R2, -t), axis=1))] + obj_pts_per_cam = [] + for (cam_idx, P2) in enumerate(P2s): + obj_pts = [] + for (i, (pt1, pt2)) in enumerate(zip(pts1, pts2)): + if not inliers[i]: + continue + obj_pt = cv.triangulatePoints(P1, P2, pt1, pt2) + obj_pt /= obj_pt[3] + if obj_pt[2] > 0: + obj_pts.append([obj_pt[0], obj_pt[1], obj_pt[2]]) + obj_pts_per_cam.append(obj_pts) + best_cam_idx = np.array([len(obj_pts_per_cam[0]), len(obj_pts_per_cam[1]), len(obj_pts_per_cam[2]), len(obj_pts_per_cam[3])]).argmax() + max_pts = len(obj_pts_per_cam[best_cam_idx]) + print('Number of object points', max_pts) + MAX_DEPTH = 6.0 + obj_pts = [] + for pt in obj_pts_per_cam[best_cam_idx]: + if pt[2] < MAX_DEPTH: + obj_pts.append(pt) + obj_pts = np.array(obj_pts).reshape(len(obj_pts), 3) + for (i, (pt1, pt2)) in enumerate(zip(pts1, pts2)): + if inliers[i]: + cv.circle(image1, (int(pt1[0]), int(pt1[1])), 7, (255, 0, 0), -1) + cv.circle(image2, (int(pt2[0]), int(pt2[1])), 7, (255, 0, 0), -1) + image1 = np.concatenate((image1, image2), axis=1) + new_img_size = 1200.0 * 800.0 + image1 = cv.resize(image1, (int(np.sqrt(image1.shape[1] * new_img_size / image1.shape[0])), int(np.sqrt(image1.shape[0] * new_img_size / image1.shape[1])))) + fig = plt.figure(figsize=(13.0, 11.0)) + ax = fig.add_subplot(111, projection='3d') + ax.set_aspect('equal') + ax.scatter(obj_pts[:, 0], obj_pts[:, 1], obj_pts[:, 2], c='r', marker='o', s=3) + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_zlabel('depth') + ax.view_init(azim=-80, elev=110) + cv.imshow('matches', image1) + cv.imwrite('matches_E.png', image1) + plt.savefig('reconstruction_3D.png') + cv.waitKey(0) + plt.show() + +# File: opencv-master/samples/python/facedetect.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from video import create_capture +from common import clock, draw_str + +def detect(img, cascade): + rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE) + if len(rects) == 0: + return [] + rects[:, 2:] += rects[:, :2] + return rects + +def draw_rects(img, rects, color): + for (x1, y1, x2, y2) in rects: + cv.rectangle(img, (x1, y1), (x2, y2), color, 2) + +def main(): + import sys, getopt + (args, video_src) = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade=']) + try: + video_src = video_src[0] + except: + video_src = 0 + args = dict(args) + cascade_fn = args.get('--cascade', 'haarcascades/haarcascade_frontalface_alt.xml') + nested_fn = args.get('--nested-cascade', 'haarcascades/haarcascade_eye.xml') + cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn)) + nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn)) + cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('lena.jpg'))) + while True: + (_ret, img) = cam.read() + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + gray = cv.equalizeHist(gray) + t = clock() + rects = detect(gray, cascade) + vis = img.copy() + draw_rects(vis, rects, (0, 255, 0)) + if not nested.empty(): + for (x1, y1, x2, y2) in rects: + roi = gray[y1:y2, x1:x2] + vis_roi = vis[y1:y2, x1:x2] + subrects = detect(roi.copy(), nested) + draw_rects(vis_roi, subrects, (255, 0, 0)) + dt = clock() - t + draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000)) + cv.imshow('facedetect', vis) + if cv.waitKey(5) == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/feature_homography.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video +from video import presets +import common +from common import getsize, draw_keypoints +from plane_tracker import PlaneTracker + +class App: + + def __init__(self, src): + self.cap = video.create_capture(src, presets['book']) + self.frame = None + self.paused = False + self.tracker = PlaneTracker() + cv.namedWindow('plane') + self.rect_sel = common.RectSelector('plane', self.on_rect) + + def on_rect(self, rect): + self.tracker.clear() + self.tracker.add_target(self.frame, rect) + + def run(self): + while True: + playing = not self.paused and (not self.rect_sel.dragging) + if playing or self.frame is None: + (ret, frame) = self.cap.read() + if not ret: + break + self.frame = frame.copy() + (w, h) = getsize(self.frame) + vis = np.zeros((h, w * 2, 3), np.uint8) + vis[:h, :w] = self.frame + if len(self.tracker.targets) > 0: + target = self.tracker.targets[0] + vis[:, w:] = target.image + draw_keypoints(vis[:, w:], target.keypoints) + (x0, y0, x1, y1) = target.rect + cv.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2) + if playing: + tracked = self.tracker.track(self.frame) + if len(tracked) > 0: + tracked = tracked[0] + cv.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2) + for ((x0, y0), (x1, y1)) in zip(np.int32(tracked.p0), np.int32(tracked.p1)): + cv.line(vis, (x0 + w, y0), (x1, y1), (0, 255, 0)) + draw_keypoints(vis, self.tracker.frame_points) + self.rect_sel.draw(vis) + cv.imshow('plane', vis) + ch = cv.waitKey(1) + if ch == ord(' '): + self.paused = not self.paused + if ch == 27: + break +if __name__ == '__main__': + print(__doc__) + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + +# File: opencv-master/samples/python/find_obj.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import anorm, getsize +FLANN_INDEX_KDTREE = 1 +FLANN_INDEX_LSH = 6 + +def init_feature(name): + chunks = name.split('-') + if chunks[0] == 'sift': + detector = cv.SIFT_create() + norm = cv.NORM_L2 + elif chunks[0] == 'surf': + detector = cv.xfeatures2d.SURF_create(800) + norm = cv.NORM_L2 + elif chunks[0] == 'orb': + detector = cv.ORB_create(400) + norm = cv.NORM_HAMMING + elif chunks[0] == 'akaze': + detector = cv.AKAZE_create() + norm = cv.NORM_HAMMING + elif chunks[0] == 'brisk': + detector = cv.BRISK_create() + norm = cv.NORM_HAMMING + else: + return (None, None) + if 'flann' in chunks: + if norm == cv.NORM_L2: + flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) + else: + flann_params = dict(algorithm=FLANN_INDEX_LSH, table_number=6, key_size=12, multi_probe_level=1) + matcher = cv.FlannBasedMatcher(flann_params, {}) + else: + matcher = cv.BFMatcher(norm) + return (detector, matcher) + +def filter_matches(kp1, kp2, matches, ratio=0.75): + (mkp1, mkp2) = ([], []) + for m in matches: + if len(m) == 2 and m[0].distance < m[1].distance * ratio: + m = m[0] + mkp1.append(kp1[m.queryIdx]) + mkp2.append(kp2[m.trainIdx]) + p1 = np.float32([kp.pt for kp in mkp1]) + p2 = np.float32([kp.pt for kp in mkp2]) + kp_pairs = zip(mkp1, mkp2) + return (p1, p2, list(kp_pairs)) + +def explore_match(win, img1, img2, kp_pairs, status=None, H=None): + (h1, w1) = img1.shape[:2] + (h2, w2) = img2.shape[:2] + vis = np.zeros((max(h1, h2), w1 + w2), np.uint8) + vis[:h1, :w1] = img1 + vis[:h2, w1:w1 + w2] = img2 + vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR) + if H is not None: + corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) + corners = np.int32(cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0)) + cv.polylines(vis, [corners], True, (255, 255, 255)) + if status is None: + status = np.ones(len(kp_pairs), np.bool_) + status = status.reshape((len(kp_pairs), 1)) + (p1, p2) = ([], []) + for kpp in kp_pairs: + p1.append(np.int32(kpp[0].pt)) + p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0])) + green = (0, 255, 0) + red = (0, 0, 255) + kp_color = (51, 103, 236) + for ((x1, y1), (x2, y2), inlier) in zip(p1, p2, status): + if inlier: + col = green + cv.circle(vis, (x1, y1), 2, col, -1) + cv.circle(vis, (x2, y2), 2, col, -1) + else: + col = red + r = 2 + thickness = 3 + cv.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness) + cv.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness) + cv.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col, thickness) + cv.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col, thickness) + vis0 = vis.copy() + for ((x1, y1), (x2, y2), inlier) in zip(p1, p2, status): + if inlier: + cv.line(vis, (x1, y1), (x2, y2), green) + cv.imshow(win, vis) + + def onmouse(event, x, y, flags, param): + cur_vis = vis + if flags & cv.EVENT_FLAG_LBUTTON: + cur_vis = vis0.copy() + r = 8 + m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r) + idxs = np.where(m)[0] + (kp1s, kp2s) = ([], []) + for i in idxs: + ((x1, y1), (x2, y2)) = (p1[i], p2[i]) + col = (red, green)[status[i][0]] + cv.line(cur_vis, (x1, y1), (x2, y2), col) + (kp1, kp2) = kp_pairs[i] + kp1s.append(kp1) + kp2s.append(kp2) + cur_vis = cv.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color) + cur_vis[:, w1:] = cv.drawKeypoints(cur_vis[:, w1:], kp2s, None, flags=4, color=kp_color) + cv.imshow(win, cur_vis) + cv.setMouseCallback(win, onmouse) + return vis + +def main(): + import sys, getopt + (opts, args) = getopt.getopt(sys.argv[1:], '', ['feature=']) + opts = dict(opts) + feature_name = opts.get('--feature', 'brisk') + try: + (fn1, fn2) = args + except: + fn1 = 'box.png' + fn2 = 'box_in_scene.png' + img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE) + img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE) + (detector, matcher) = init_feature(feature_name) + if img1 is None: + print('Failed to load fn1:', fn1) + sys.exit(1) + if img2 is None: + print('Failed to load fn2:', fn2) + sys.exit(1) + if detector is None: + print('unknown feature:', feature_name) + sys.exit(1) + print('using', feature_name) + (kp1, desc1) = detector.detectAndCompute(img1, None) + (kp2, desc2) = detector.detectAndCompute(img2, None) + print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))) + + def match_and_draw(win): + print('matching...') + raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2) + (p1, p2, kp_pairs) = filter_matches(kp1, kp2, raw_matches) + if len(p1) >= 4: + (H, status) = cv.findHomography(p1, p2, cv.RANSAC, 5.0) + print('%d / %d inliers/matched' % (np.sum(status), len(status))) + else: + (H, status) = (None, None) + print('%d matches found, not enough for homography estimation' % len(p1)) + _vis = explore_match(win, img1, img2, kp_pairs, status, H) + match_and_draw('find_obj') + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/fitline.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +import numpy as np +import cv2 as cv +import itertools as it +from common import draw_str +(w, h) = (512, 256) + +def toint(p): + return tuple(map(int, p)) + +def sample_line(p1, p2, n, noise=0.0): + p1 = np.float32(p1) + t = np.random.rand(n, 1) + return p1 + (p2 - p1) * t + np.random.normal(size=(n, 2)) * noise +dist_func_names = it.cycle('DIST_L2 DIST_L1 DIST_L12 DIST_FAIR DIST_WELSCH DIST_HUBER'.split()) +if PY3: + cur_func_name = next(dist_func_names) +else: + cur_func_name = dist_func_names.next() + +def update(_=None): + noise = cv.getTrackbarPos('noise', 'fit line') + n = cv.getTrackbarPos('point n', 'fit line') + r = cv.getTrackbarPos('outlier %', 'fit line') / 100.0 + outn = int(n * r) + (p0, p1) = ((90, 80), (w - 90, h - 80)) + img = np.zeros((h, w, 3), np.uint8) + cv.line(img, toint(p0), toint(p1), (0, 255, 0)) + if n > 0: + line_points = sample_line(p0, p1, n - outn, noise) + outliers = np.random.rand(outn, 2) * (w, h) + points = np.vstack([line_points, outliers]) + for p in line_points: + cv.circle(img, toint(p), 2, (255, 255, 255), -1) + for p in outliers: + cv.circle(img, toint(p), 2, (64, 64, 255), -1) + func = getattr(cv, cur_func_name) + (vx, vy, cx, cy) = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01) + cv.line(img, (int(cx - vx * w), int(cy - vy * w)), (int(cx + vx * w), int(cy + vy * w)), (0, 0, 255)) + draw_str(img, (20, 20), cur_func_name) + cv.imshow('fit line', img) + +def main(): + cv.namedWindow('fit line') + cv.createTrackbar('noise', 'fit line', 3, 50, update) + cv.createTrackbar('point n', 'fit line', 100, 500, update) + cv.createTrackbar('outlier %', 'fit line', 30, 100, update) + while True: + update() + ch = cv.waitKey(0) + if ch == ord('f'): + global cur_func_name + if PY3: + cur_func_name = next(dist_func_names) + else: + cur_func_name = dist_func_names.next() + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/floodfill.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +class App: + + def update(self, dummy=None): + if self.seed_pt is None: + cv.imshow('floodfill', self.img) + return + flooded = self.img.copy() + self.mask[:] = 0 + lo = cv.getTrackbarPos('lo', 'floodfill') + hi = cv.getTrackbarPos('hi', 'floodfill') + flags = self.connectivity + if self.fixed_range: + flags |= cv.FLOODFILL_FIXED_RANGE + cv.floodFill(flooded, self.mask, self.seed_pt, (255, 255, 255), (lo,) * 3, (hi,) * 3, flags) + cv.circle(flooded, self.seed_pt, 2, (0, 0, 255), -1) + cv.imshow('floodfill', flooded) + + def onmouse(self, event, x, y, flags, param): + if flags & cv.EVENT_FLAG_LBUTTON: + self.seed_pt = (x, y) + self.update() + + def run(self): + try: + fn = sys.argv[1] + except: + fn = 'fruits.jpg' + self.img = cv.imread(cv.samples.findFile(fn)) + if self.img is None: + print('Failed to load image file:', fn) + sys.exit(1) + (h, w) = self.img.shape[:2] + self.mask = np.zeros((h + 2, w + 2), np.uint8) + self.seed_pt = None + self.fixed_range = True + self.connectivity = 4 + self.update() + cv.setMouseCallback('floodfill', self.onmouse) + cv.createTrackbar('lo', 'floodfill', 20, 255, self.update) + cv.createTrackbar('hi', 'floodfill', 20, 255, self.update) + while True: + ch = cv.waitKey() + if ch == 27: + break + if ch == ord('f'): + self.fixed_range = not self.fixed_range + print('using %s range' % ('floating', 'fixed')[self.fixed_range]) + self.update() + if ch == ord('c'): + self.connectivity = 12 - self.connectivity + print('connectivity =', self.connectivity) + self.update() + print('Done') +if __name__ == '__main__': + print(__doc__) + App().run() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/gabor_threads.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from multiprocessing.pool import ThreadPool + +def build_filters(): + filters = [] + ksize = 31 + for theta in np.arange(0, np.pi, np.pi / 16): + kern = cv.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv.CV_32F) + kern /= 1.5 * kern.sum() + filters.append(kern) + return filters + +def process(img, filters): + accum = np.zeros_like(img) + for kern in filters: + fimg = cv.filter2D(img, cv.CV_8UC3, kern) + np.maximum(accum, fimg, accum) + return accum + +def process_threaded(img, filters, threadn=8): + accum = np.zeros_like(img) + + def f(kern): + return cv.filter2D(img, cv.CV_8UC3, kern) + pool = ThreadPool(processes=threadn) + for fimg in pool.imap_unordered(f, filters): + np.maximum(accum, fimg, accum) + return accum + +def main(): + import sys + from common import Timer + try: + img_fn = sys.argv[1] + except: + img_fn = 'baboon.jpg' + img = cv.imread(cv.samples.findFile(img_fn)) + if img is None: + print('Failed to load image file:', img_fn) + sys.exit(1) + filters = build_filters() + with Timer('running single-threaded'): + res1 = process(img, filters) + with Timer('running multi-threaded'): + res2 = process_threaded(img, filters) + print('res1 == res2: ', (res1 == res2).all()) + cv.imshow('img', img) + cv.imshow('result', res2) + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/gaussian_mix.py +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +from numpy import random + +def make_gaussians(cluster_n, img_size): + points = [] + ref_distrs = [] + for _i in xrange(cluster_n): + mean = (0.1 + 0.8 * random.rand(2)) * img_size + a = (random.rand(2, 2) - 0.5) * img_size * 0.1 + cov = np.dot(a.T, a) + img_size * 0.05 * np.eye(2) + n = 100 + random.randint(900) + pts = random.multivariate_normal(mean, cov, n) + points.append(pts) + ref_distrs.append((mean, cov)) + points = np.float32(np.vstack(points)) + return (points, ref_distrs) + +def draw_gaussain(img, mean, cov, color): + (x, y) = mean + (w, u, _vt) = cv.SVDecomp(cov) + ang = np.arctan2(u[1, 0], u[0, 0]) * (180 / np.pi) + (s1, s2) = np.sqrt(w) * 3.0 + cv.ellipse(img, (int(x), int(y)), (int(s1), int(s2)), ang, 0, 360, color, 1, cv.LINE_AA) + +def main(): + cluster_n = 5 + img_size = 512 + print('press any key to update distributions, ESC - exit\n') + while True: + print('sampling distributions...') + (points, ref_distrs) = make_gaussians(cluster_n, img_size) + print('EM (opencv) ...') + em = cv.ml.EM_create() + em.setClustersNumber(cluster_n) + em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) + em.trainEM(points) + means = em.getMeans() + covs = em.getCovs() + found_distrs = zip(means, covs) + print('ready!\n') + img = np.zeros((img_size, img_size, 3), np.uint8) + for (x, y) in np.int32(points): + cv.circle(img, (x, y), 1, (255, 255, 255), -1) + for (m, cov) in ref_distrs: + draw_gaussain(img, m, cov, (0, 255, 0)) + for (m, cov) in found_distrs: + draw_gaussain(img, m, cov, (0, 0, 255)) + cv.imshow('gaussian mixture', img) + ch = cv.waitKey(0) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/grabcut.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +class App: + BLUE = [255, 0, 0] + RED = [0, 0, 255] + GREEN = [0, 255, 0] + BLACK = [0, 0, 0] + WHITE = [255, 255, 255] + DRAW_BG = {'color': BLACK, 'val': 0} + DRAW_FG = {'color': WHITE, 'val': 1} + DRAW_PR_BG = {'color': RED, 'val': 2} + DRAW_PR_FG = {'color': GREEN, 'val': 3} + rect = (0, 0, 1, 1) + drawing = False + rectangle = False + rect_over = False + rect_or_mask = 100 + value = DRAW_FG + thickness = 3 + + def onmouse(self, event, x, y, flags, param): + if event == cv.EVENT_RBUTTONDOWN: + self.rectangle = True + (self.ix, self.iy) = (x, y) + elif event == cv.EVENT_MOUSEMOVE: + if self.rectangle == True: + self.img = self.img2.copy() + cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2) + self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y)) + self.rect_or_mask = 0 + elif event == cv.EVENT_RBUTTONUP: + self.rectangle = False + self.rect_over = True + cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2) + self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y)) + self.rect_or_mask = 0 + print(" Now press the key 'n' a few times until no further change \n") + if event == cv.EVENT_LBUTTONDOWN: + if self.rect_over == False: + print('first draw rectangle \n') + else: + self.drawing = True + cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1) + cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1) + elif event == cv.EVENT_MOUSEMOVE: + if self.drawing == True: + cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1) + cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1) + elif event == cv.EVENT_LBUTTONUP: + if self.drawing == True: + self.drawing = False + cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1) + cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1) + + def run(self): + if len(sys.argv) == 2: + filename = sys.argv[1] + else: + print('No input image given, so loading default image, lena.jpg \n') + print('Correct Usage: python grabcut.py \n') + filename = 'lena.jpg' + self.img = cv.imread(cv.samples.findFile(filename)) + self.img2 = self.img.copy() + self.mask = np.zeros(self.img.shape[:2], dtype=np.uint8) + self.output = np.zeros(self.img.shape, np.uint8) + cv.namedWindow('output') + cv.namedWindow('input') + cv.setMouseCallback('input', self.onmouse) + cv.moveWindow('input', self.img.shape[1] + 10, 90) + print(' Instructions: \n') + print(' Draw a rectangle around the object using right mouse button \n') + while 1: + cv.imshow('output', self.output) + cv.imshow('input', self.img) + k = cv.waitKey(1) + if k == 27: + break + elif k == ord('0'): + print(' mark background regions with left mouse button \n') + self.value = self.DRAW_BG + elif k == ord('1'): + print(' mark foreground regions with left mouse button \n') + self.value = self.DRAW_FG + elif k == ord('2'): + self.value = self.DRAW_PR_BG + elif k == ord('3'): + self.value = self.DRAW_PR_FG + elif k == ord('s'): + bar = np.zeros((self.img.shape[0], 5, 3), np.uint8) + res = np.hstack((self.img2, bar, self.img, bar, self.output)) + cv.imwrite('grabcut_output.png', res) + print(' Result saved as image \n') + elif k == ord('r'): + print('resetting \n') + self.rect = (0, 0, 1, 1) + self.drawing = False + self.rectangle = False + self.rect_or_mask = 100 + self.rect_over = False + self.value = self.DRAW_FG + self.img = self.img2.copy() + self.mask = np.zeros(self.img.shape[:2], dtype=np.uint8) + self.output = np.zeros(self.img.shape, np.uint8) + elif k == ord('n'): + print(" For finer touchups, mark foreground and background after pressing keys 0-3\n and again press 'n' \n") + try: + bgdmodel = np.zeros((1, 65), np.float64) + fgdmodel = np.zeros((1, 65), np.float64) + if self.rect_or_mask == 0: + cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_RECT) + self.rect_or_mask = 1 + elif self.rect_or_mask == 1: + cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_MASK) + except: + import traceback + traceback.print_exc() + mask2 = np.where((self.mask == 1) + (self.mask == 3), 255, 0).astype('uint8') + self.output = cv.bitwise_and(self.img2, self.img2, mask=mask2) + print('Done') +if __name__ == '__main__': + print(__doc__) + App().run() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/hist.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +bins = np.arange(256).reshape(256, 1) + +def hist_curve(im): + h = np.zeros((300, 256, 3)) + if len(im.shape) == 2: + color = [(255, 255, 255)] + elif im.shape[2] == 3: + color = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] + for (ch, col) in enumerate(color): + hist_item = cv.calcHist([im], [ch], None, [256], [0, 256]) + cv.normalize(hist_item, hist_item, 0, 255, cv.NORM_MINMAX) + hist = np.int32(np.around(hist_item)) + pts = np.int32(np.column_stack((bins, hist))) + cv.polylines(h, [pts], False, col) + y = np.flipud(h) + return y + +def hist_lines(im): + h = np.zeros((300, 256, 3)) + if len(im.shape) != 2: + print('hist_lines applicable only for grayscale images') + im = cv.cvtColor(im, cv.COLOR_BGR2GRAY) + hist_item = cv.calcHist([im], [0], None, [256], [0, 256]) + cv.normalize(hist_item, hist_item, 0, 255, cv.NORM_MINMAX) + hist = np.int32(np.around(hist_item)) + for (x, y) in enumerate(hist): + cv.line(h, (x, 0), (x, y[0]), (255, 255, 255)) + y = np.flipud(h) + return y + +def main(): + import sys + if len(sys.argv) > 1: + fname = sys.argv[1] + else: + fname = 'lena.jpg' + print('usage : python hist.py ') + im = cv.imread(cv.samples.findFile(fname)) + if im is None: + print('Failed to load image file:', fname) + sys.exit(1) + gray = cv.cvtColor(im, cv.COLOR_BGR2GRAY) + print(' Histogram plotting \n\n Keymap :\n\n a - show histogram for color image in curve mode \n\n b - show histogram in bin mode \n\n c - show equalized histogram (always in bin mode) \n\n d - show histogram for gray image in curve mode \n\n e - show histogram for a normalized image in curve mode \n\n Esc - exit \n\n ') + cv.imshow('image', im) + while True: + k = cv.waitKey(0) + if k == ord('a'): + curve = hist_curve(im) + cv.imshow('histogram', curve) + cv.imshow('image', im) + print('a') + elif k == ord('b'): + print('b') + lines = hist_lines(im) + cv.imshow('histogram', lines) + cv.imshow('image', gray) + elif k == ord('c'): + print('c') + equ = cv.equalizeHist(gray) + lines = hist_lines(equ) + cv.imshow('histogram', lines) + cv.imshow('image', equ) + elif k == ord('d'): + print('d') + curve = hist_curve(gray) + cv.imshow('histogram', curve) + cv.imshow('image', gray) + elif k == ord('e'): + print('e') + norm = cv.normalize(gray, gray, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) + lines = hist_lines(norm) + cv.imshow('histogram', lines) + cv.imshow('image', norm) + elif k == 27: + print('ESC') + cv.destroyAllWindows() + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/houghcircles.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +def main(): + try: + fn = sys.argv[1] + except IndexError: + fn = 'board.jpg' + src = cv.imread(cv.samples.findFile(fn)) + img = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + img = cv.medianBlur(img, 5) + cimg = src.copy() + circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) + if circles is not None: + circles = np.uint16(np.around(circles)) + (_a, b, _c) = circles.shape + for i in range(b): + cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv.LINE_AA) + cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv.LINE_AA) + cv.imshow('detected circles', cimg) + cv.imshow('source', src) + cv.waitKey(0) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/houghlines.py +"""""" +from __future__ import print_function +import cv2 as cv +import numpy as np +import sys +import math + +def main(): + try: + fn = sys.argv[1] + except IndexError: + fn = 'pic1.png' + src = cv.imread(cv.samples.findFile(fn)) + dst = cv.Canny(src, 50, 200) + cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) + if True: + lines = cv.HoughLinesP(dst, 1, math.pi / 180.0, 40, np.array([]), 50, 10) + (a, b, _c) = lines.shape + for i in range(a): + cv.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv.LINE_AA) + else: + lines = cv.HoughLines(dst, 1, math.pi / 180.0, 50, np.array([]), 0, 0) + if lines is not None: + (a, b, _c) = lines.shape + for i in range(a): + rho = lines[i][0][0] + theta = lines[i][0][1] + a = math.cos(theta) + b = math.sin(theta) + (x0, y0) = (a * rho, b * rho) + pt1 = (int(x0 + 1000 * -b), int(y0 + 1000 * a)) + pt2 = (int(x0 - 1000 * -b), int(y0 - 1000 * a)) + cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA) + cv.imshow('detected lines', cdst) + cv.imshow('source', src) + cv.waitKey(0) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/inpaint.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import Sketcher + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 'fruits.jpg' + img = cv.imread(cv.samples.findFile(fn)) + if img is None: + print('Failed to load image file:', fn) + sys.exit(1) + img_mark = img.copy() + mark = np.zeros(img.shape[:2], np.uint8) + sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255)) + while True: + ch = cv.waitKey() + if ch == 27: + break + if ch == ord(' '): + res = cv.inpaint(img_mark, mark, 3, cv.INPAINT_TELEA) + cv.imshow('inpaint', res) + if ch == ord('r'): + img_mark[:] = img + mark[:] = 0 + sketch.show() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/kalman.py +"""""" +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + long = int +import numpy as np +import cv2 as cv +from math import cos, sin, sqrt, pi + +def main(): + img_height = 500 + img_width = 500 + kalman = cv.KalmanFilter(2, 1, 0) + code = long(-1) + num_circle_steps = 12 + while True: + img = np.zeros((img_height, img_width, 3), np.uint8) + state = np.array([[0.0], [2 * pi / num_circle_steps]]) + kalman.transitionMatrix = np.array([[1.0, 1.0], [0.0, 1.0]]) + kalman.measurementMatrix = 1.0 * np.eye(1, 2) + kalman.processNoiseCov = 1e-05 * np.eye(2) + kalman.measurementNoiseCov = 0.1 * np.ones((1, 1)) + kalman.errorCovPost = 1.0 * np.eye(2, 2) + kalman.statePost = 0.1 * np.random.randn(2, 1) + while True: + + def calc_point(angle): + return (np.around(img_width / 2.0 + img_width / 3.0 * cos(angle), 0).astype(int), np.around(img_height / 2.0 - img_width / 3.0 * sin(angle), 1).astype(int)) + img = img * 0.001 + state_angle = state[0, 0] + state_pt = calc_point(state_angle) + prediction = kalman.predict() + predict_pt = calc_point(prediction[0, 0]) + measurement = kalman.measurementNoiseCov * np.random.randn(1, 1) + measurement = np.dot(kalman.measurementMatrix, state) + measurement + measurement_angle = measurement[0, 0] + measurement_pt = calc_point(measurement_angle) + kalman.correct(measurement) + improved_pt = calc_point(kalman.statePost[0, 0]) + cv.drawMarker(img, measurement_pt, (0, 0, 255), cv.MARKER_SQUARE, 5, 2) + cv.drawMarker(img, predict_pt, (0, 255, 255), cv.MARKER_SQUARE, 5, 2) + cv.drawMarker(img, improved_pt, (0, 255, 0), cv.MARKER_SQUARE, 5, 2) + cv.drawMarker(img, state_pt, (255, 255, 255), cv.MARKER_STAR, 10, 1) + cv.drawMarker(img, calc_point(np.dot(kalman.transitionMatrix, kalman.statePost)[0, 0]), (255, 255, 0), cv.MARKER_SQUARE, 12, 1) + cv.line(img, state_pt, measurement_pt, (0, 0, 255), 1, cv.LINE_AA, 0) + cv.line(img, state_pt, predict_pt, (0, 255, 255), 1, cv.LINE_AA, 0) + cv.line(img, state_pt, improved_pt, (0, 255, 0), 1, cv.LINE_AA, 0) + process_noise = sqrt(kalman.processNoiseCov[0, 0]) * np.random.randn(2, 1) + state = np.dot(kalman.transitionMatrix, state) + process_noise + cv.imshow('Kalman', img) + code = cv.waitKey(1000) + if code != -1: + break + if code in [27, ord('q'), ord('Q')]: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/kmeans.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from gaussian_mix import make_gaussians + +def main(): + cluster_n = 5 + img_size = 512 + colors = np.zeros((1, cluster_n, 3), np.uint8) + colors[0, :] = 255 + colors[0, :, 0] = np.arange(0, 180, 180.0 / cluster_n) + colors = cv.cvtColor(colors, cv.COLOR_HSV2BGR)[0] + while True: + print('sampling distributions...') + (points, _) = make_gaussians(cluster_n, img_size) + term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1) + (_ret, labels, _centers) = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) + img = np.zeros((img_size, img_size, 3), np.uint8) + for ((x, y), label) in zip(np.int32(points), labels.ravel()): + c = list(map(int, colors[label])) + cv.circle(img, (x, y), 1, c, -1) + cv.imshow('kmeans', img) + ch = cv.waitKey(0) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/laplace.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +def main(): + ddepth = cv.CV_16S + smoothType = 'MedianBlur' + sigma = 3 + if len(sys.argv) == 4: + ddepth = sys.argv[1] + smoothType = sys.argv[2] + sigma = sys.argv[3] + cap = cv.VideoCapture(0) + cv.namedWindow('Laplace of Image', cv.WINDOW_AUTOSIZE) + cv.createTrackbar('Kernel Size Bar', 'Laplace of Image', sigma, 15, lambda x: x) + print('==' * 40) + print('Frame Width: ', cap.get(cv.CAP_PROP_FRAME_WIDTH), 'Frame Height: ', cap.get(cv.CAP_PROP_FRAME_HEIGHT), 'FPS: ', cap.get(cv.CAP_PROP_FPS)) + while True: + (ret, frame) = cap.read() + if ret == False: + print("Can't open camera/video stream") + break + sigma = cv.getTrackbarPos('Kernel Size Bar', 'Laplace of Image') + ksize = sigma * 5 | 1 + if smoothType == 'GAUSSIAN': + smoothed = cv.GaussianBlur(frame, (ksize, ksize), sigma, sigma) + if smoothType == 'BLUR': + smoothed = cv.blur(frame, (ksize, ksize)) + if smoothType == 'MedianBlur': + smoothed = cv.medianBlur(frame, ksize) + laplace = cv.Laplacian(smoothed, ddepth, 5) + result = cv.convertScaleAbs(laplace, (sigma + 1) * 0.25) + cv.imshow('Laplace of Image', result) + k = cv.waitKey(30) + if k == 27: + return +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/lappyr.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +import video +from common import nothing, getsize + +def build_lappyr(img, leveln=6, dtype=np.int16): + img = dtype(img) + levels = [] + for _i in xrange(leveln - 1): + next_img = cv.pyrDown(img) + img1 = cv.pyrUp(next_img, dstsize=getsize(img)) + levels.append(img - img1) + img = next_img + levels.append(img) + return levels + +def merge_lappyr(levels): + img = levels[-1] + for lev_img in levels[-2::-1]: + img = cv.pyrUp(img, dstsize=getsize(lev_img)) + img += lev_img + return np.uint8(np.clip(img, 0, 255)) + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 0 + cap = video.create_capture(fn) + leveln = 6 + cv.namedWindow('level control') + for i in xrange(leveln): + cv.createTrackbar('%d' % i, 'level control', 5, 50, nothing) + while True: + (_ret, frame) = cap.read() + pyr = build_lappyr(frame, leveln) + for i in xrange(leveln): + v = int(cv.getTrackbarPos('%d' % i, 'level control') / 5) + pyr[i] *= v + res = merge_lappyr(pyr) + cv.imshow('laplacian pyramid filter', res) + if cv.waitKey(1) == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/letter_recog.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def load_base(fn): + a = np.loadtxt(fn, np.float32, delimiter=',', converters={0: lambda ch: ord(ch) - ord('A')}) + (samples, responses) = (a[:, 1:], a[:, 0]) + return (samples, responses) + +class LetterStatModel(object): + class_n = 26 + train_ratio = 0.5 + + def load(self, fn): + self.model = self.model.load(fn) + + def save(self, fn): + self.model.save(fn) + + def unroll_samples(self, samples): + (sample_n, var_n) = samples.shape + new_samples = np.zeros((sample_n * self.class_n, var_n + 1), np.float32) + new_samples[:, :-1] = np.repeat(samples, self.class_n, axis=0) + new_samples[:, -1] = np.tile(np.arange(self.class_n), sample_n) + return new_samples + + def unroll_responses(self, responses): + sample_n = len(responses) + new_responses = np.zeros(sample_n * self.class_n, np.int32) + resp_idx = np.int32(responses + np.arange(sample_n) * self.class_n) + new_responses[resp_idx] = 1 + return new_responses + +class RTrees(LetterStatModel): + + def __init__(self): + self.model = cv.ml.RTrees_create() + + def train(self, samples, responses): + self.model.setMaxDepth(20) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + (_ret, resp) = self.model.predict(samples) + return resp.ravel() + +class KNearest(LetterStatModel): + + def __init__(self): + self.model = cv.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + (_retval, results, _neigh_resp, _dists) = self.model.findNearest(samples, k=10) + return results.ravel() + +class Boost(LetterStatModel): + + def __init__(self): + self.model = cv.ml.Boost_create() + + def train(self, samples, responses): + (_sample_n, var_n) = samples.shape + new_samples = self.unroll_samples(samples) + new_responses = self.unroll_responses(responses) + var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) + self.model.setWeakCount(15) + self.model.setMaxDepth(10) + self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType=var_types)) + + def predict(self, samples): + new_samples = self.unroll_samples(samples) + (_ret, resp) = self.model.predict(new_samples) + return resp.ravel().reshape(-1, self.class_n).argmax(1) + +class SVM(LetterStatModel): + + def __init__(self): + self.model = cv.ml.SVM_create() + + def train(self, samples, responses): + self.model.setType(cv.ml.SVM_C_SVC) + self.model.setC(1) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setGamma(0.1) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + (_ret, resp) = self.model.predict(samples) + return resp.ravel() + +class MLP(LetterStatModel): + + def __init__(self): + self.model = cv.ml.ANN_MLP_create() + + def train(self, samples, responses): + (_sample_n, var_n) = samples.shape + new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) + layer_sizes = np.int32([var_n, 100, 100, self.class_n]) + self.model.setLayerSizes(layer_sizes) + self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) + self.model.setBackpropMomentumScale(0.0) + self.model.setBackpropWeightScale(0.001) + self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) + + def predict(self, samples): + (_ret, resp) = self.model.predict(samples) + return resp.argmax(-1) + +def main(): + import getopt + import sys + models = [RTrees, KNearest, Boost, SVM, MLP] + models = dict([(cls.__name__.lower(), cls) for cls in models]) + (args, dummy) = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) + args = dict(args) + args.setdefault('--model', 'svm') + args.setdefault('--data', 'letter-recognition.data') + datafile = cv.samples.findFile(args['--data']) + print('loading data %s ...' % datafile) + (samples, responses) = load_base(datafile) + Model = models[args['--model']] + model = Model() + train_n = int(len(samples) * model.train_ratio) + if '--load' in args: + fn = args['--load'] + print('loading model from %s ...' % fn) + model.load(fn) + else: + print('training %s ...' % Model.__name__) + model.train(samples[:train_n], responses[:train_n]) + print('testing...') + train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n].astype(int)) + test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:].astype(int)) + print('train rate: %f test rate: %f' % (train_rate * 100, test_rate * 100)) + if '--save' in args: + fn = args['--save'] + print('saving model to %s ...' % fn) + model.save(fn) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/lk_homography.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video +from common import draw_str +from video import presets +lk_params = dict(winSize=(19, 19), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) +feature_params = dict(maxCorners=1000, qualityLevel=0.01, minDistance=8, blockSize=19) + +def checkedTrace(img0, img1, p0, back_threshold=1.0): + (p1, _st, _err) = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + (p0r, _st, _err) = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + d = abs(p0 - p0r).reshape(-1, 2).max(-1) + status = d < back_threshold + return (p1, status) +green = (0, 255, 0) +red = (0, 0, 255) + +class App: + + def __init__(self, video_src): + self.cam = self.cam = video.create_capture(video_src, presets['book']) + self.p0 = None + self.use_ransac = True + + def run(self): + while True: + (_ret, frame) = self.cam.read() + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + vis = frame.copy() + if self.p0 is not None: + (p2, trace_status) = checkedTrace(self.gray1, frame_gray, self.p1) + self.p1 = p2[trace_status].copy() + self.p0 = self.p0[trace_status].copy() + self.gray1 = frame_gray + if len(self.p0) < 4: + self.p0 = None + continue + (H, status) = cv.findHomography(self.p0, self.p1, (0, cv.RANSAC)[self.use_ransac], 10.0) + (h, w) = frame.shape[:2] + overlay = cv.warpPerspective(self.frame0, H, (w, h)) + vis = cv.addWeighted(vis, 0.5, overlay, 0.5, 0.0) + for ((x0, y0), (x1, y1), good) in zip(self.p0[:, 0], self.p1[:, 0], status[:, 0]): + if good: + cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (0, 128, 0)) + cv.circle(vis, (int(x1), int(y1)), 2, (red, green)[good], -1) + draw_str(vis, (20, 20), 'track count: %d' % len(self.p1)) + if self.use_ransac: + draw_str(vis, (20, 40), 'RANSAC') + else: + p = cv.goodFeaturesToTrack(frame_gray, **feature_params) + if p is not None: + for (x, y) in p[:, 0]: + cv.circle(vis, (int(x), int(y)), 2, green, -1) + draw_str(vis, (20, 20), 'feature count: %d' % len(p)) + cv.imshow('lk_homography', vis) + ch = cv.waitKey(1) + if ch == 27: + break + if ch == ord(' '): + self.frame0 = frame.copy() + self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) + if self.p0 is not None: + self.p1 = self.p0 + self.gray0 = frame_gray + self.gray1 = frame_gray + if ch == ord('r'): + self.use_ransac = not self.use_ransac + +def main(): + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/lk_track.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video +from common import anorm2, draw_str +lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) +feature_params = dict(maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7) + +class App: + + def __init__(self, video_src): + self.track_len = 10 + self.detect_interval = 5 + self.tracks = [] + self.cam = video.create_capture(video_src) + self.frame_idx = 0 + + def run(self): + while True: + (_ret, frame) = self.cam.read() + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + vis = frame.copy() + if len(self.tracks) > 0: + (img0, img1) = (self.prev_gray, frame_gray) + p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) + (p1, _st, _err) = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + (p0r, _st, _err) = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + d = abs(p0 - p0r).reshape(-1, 2).max(-1) + good = d < 1 + new_tracks = [] + for (tr, (x, y), good_flag) in zip(self.tracks, p1.reshape(-1, 2), good): + if not good_flag: + continue + tr.append((x, y)) + if len(tr) > self.track_len: + del tr[0] + new_tracks.append(tr) + cv.circle(vis, (int(x), int(y)), 2, (0, 255, 0), -1) + self.tracks = new_tracks + cv.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) + draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) + if self.frame_idx % self.detect_interval == 0: + mask = np.zeros_like(frame_gray) + mask[:] = 255 + for (x, y) in [np.int32(tr[-1]) for tr in self.tracks]: + cv.circle(mask, (x, y), 5, 0, -1) + p = cv.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params) + if p is not None: + for (x, y) in np.float32(p).reshape(-1, 2): + self.tracks.append([(x, y)]) + self.frame_idx += 1 + self.prev_gray = frame_gray + cv.imshow('lk_track', vis) + ch = cv.waitKey(1) + if ch == 27: + break + +def main(): + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/logpolar.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def main(): + import sys + try: + fn = sys.argv[1] + except IndexError: + fn = 'fruits.jpg' + img = cv.imread(cv.samples.findFile(fn)) + if img is None: + print('Failed to load image file:', fn) + sys.exit(1) + img2 = cv.logPolar(img, (img.shape[0] / 2, img.shape[1] / 2), 40, cv.WARP_FILL_OUTLIERS) + img3 = cv.linearPolar(img, (img.shape[0] / 2, img.shape[1] / 2), 40, cv.WARP_FILL_OUTLIERS) + cv.imshow('before', img) + cv.imshow('logpolar', img2) + cv.imshow('linearpolar', img3) + cv.waitKey(0) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/morphology.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +import numpy as np +import cv2 as cv + +def main(): + import sys + from itertools import cycle + from common import draw_str + try: + fn = sys.argv[1] + except: + fn = 'baboon.jpg' + img = cv.imread(cv.samples.findFile(fn)) + if img is None: + print('Failed to load image file:', fn) + sys.exit(1) + cv.imshow('original', img) + modes = cycle(['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']) + str_modes = cycle(['ellipse', 'rect', 'cross']) + if PY3: + cur_mode = next(modes) + cur_str_mode = next(str_modes) + else: + cur_mode = modes.next() + cur_str_mode = str_modes.next() + + def update(dummy=None): + try: + sz = cv.getTrackbarPos('op/size', 'morphology') + iters = cv.getTrackbarPos('iters', 'morphology') + except: + return + opers = cur_mode.split('/') + if len(opers) > 1: + sz = sz - 10 + op = opers[sz > 0] + sz = abs(sz) + else: + op = opers[0] + sz = sz * 2 + 1 + str_name = 'MORPH_' + cur_str_mode.upper() + oper_name = 'MORPH_' + op.upper() + st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz)) + res = cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters) + draw_str(res, (10, 20), 'mode: ' + cur_mode) + draw_str(res, (10, 40), 'operation: ' + oper_name) + draw_str(res, (10, 60), 'structure: ' + str_name) + draw_str(res, (10, 80), 'ksize: %d iters: %d' % (sz, iters)) + cv.imshow('morphology', res) + cv.namedWindow('morphology') + cv.createTrackbar('op/size', 'morphology', 12, 20, update) + cv.createTrackbar('iters', 'morphology', 1, 10, update) + update() + while True: + ch = cv.waitKey() + if ch == 27: + break + if ch == ord('1'): + if PY3: + cur_mode = next(modes) + else: + cur_mode = modes.next() + if ch == ord('2'): + if PY3: + cur_str_mode = next(str_modes) + else: + cur_str_mode = str_modes.next() + update() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/mosse.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +from common import draw_str, RectSelector +import video + +def rnd_warp(a): + (h, w) = a.shape[:2] + T = np.zeros((2, 3)) + coef = 0.2 + ang = (np.random.rand() - 0.5) * coef + (c, s) = (np.cos(ang), np.sin(ang)) + T[:2, :2] = [[c, -s], [s, c]] + T[:2, :2] += (np.random.rand(2, 2) - 0.5) * coef + c = (w / 2, h / 2) + T[:, 2] = c - np.dot(T[:2, :2], c) + return cv.warpAffine(a, T, (w, h), borderMode=cv.BORDER_REFLECT) + +def divSpec(A, B): + (Ar, Ai) = (A[..., 0], A[..., 1]) + (Br, Bi) = (B[..., 0], B[..., 1]) + C = (Ar + 1j * Ai) / (Br + 1j * Bi) + C = np.dstack([np.real(C), np.imag(C)]).copy() + return C +eps = 1e-05 + +class MOSSE: + + def __init__(self, frame, rect): + (x1, y1, x2, y2) = rect + (w, h) = map(cv.getOptimalDFTSize, [x2 - x1, y2 - y1]) + (x1, y1) = ((x1 + x2 - w) // 2, (y1 + y2 - h) // 2) + self.pos = (x, y) = (x1 + 0.5 * (w - 1), y1 + 0.5 * (h - 1)) + self.size = (w, h) + img = cv.getRectSubPix(frame, (w, h), (x, y)) + self.win = cv.createHanningWindow((w, h), cv.CV_32F) + g = np.zeros((h, w), np.float32) + g[h // 2, w // 2] = 1 + g = cv.GaussianBlur(g, (-1, -1), 2.0) + g /= g.max() + self.G = cv.dft(g, flags=cv.DFT_COMPLEX_OUTPUT) + self.H1 = np.zeros_like(self.G) + self.H2 = np.zeros_like(self.G) + for _i in xrange(128): + a = self.preprocess(rnd_warp(img)) + A = cv.dft(a, flags=cv.DFT_COMPLEX_OUTPUT) + self.H1 += cv.mulSpectrums(self.G, A, 0, conjB=True) + self.H2 += cv.mulSpectrums(A, A, 0, conjB=True) + self.update_kernel() + self.update(frame) + + def update(self, frame, rate=0.125): + ((x, y), (w, h)) = (self.pos, self.size) + self.last_img = img = cv.getRectSubPix(frame, (w, h), (x, y)) + img = self.preprocess(img) + (self.last_resp, (dx, dy), self.psr) = self.correlate(img) + self.good = self.psr > 8.0 + if not self.good: + return + self.pos = (x + dx, y + dy) + self.last_img = img = cv.getRectSubPix(frame, (w, h), self.pos) + img = self.preprocess(img) + A = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT) + H1 = cv.mulSpectrums(self.G, A, 0, conjB=True) + H2 = cv.mulSpectrums(A, A, 0, conjB=True) + self.H1 = self.H1 * (1.0 - rate) + H1 * rate + self.H2 = self.H2 * (1.0 - rate) + H2 * rate + self.update_kernel() + + @property + def state_vis(self): + f = cv.idft(self.H, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT) + (h, w) = f.shape + f = np.roll(f, -h // 2, 0) + f = np.roll(f, -w // 2, 1) + kernel = np.uint8((f - f.min()) / f.ptp() * 255) + resp = self.last_resp + resp = np.uint8(np.clip(resp / resp.max(), 0, 1) * 255) + vis = np.hstack([self.last_img, kernel, resp]) + return vis + + def draw_state(self, vis): + ((x, y), (w, h)) = (self.pos, self.size) + (x1, y1, x2, y2) = (int(x - 0.5 * w), int(y - 0.5 * h), int(x + 0.5 * w), int(y + 0.5 * h)) + cv.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255)) + if self.good: + cv.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1) + else: + cv.line(vis, (x1, y1), (x2, y2), (0, 0, 255)) + cv.line(vis, (x2, y1), (x1, y2), (0, 0, 255)) + draw_str(vis, (x1, y2 + 16), 'PSR: %.2f' % self.psr) + + def preprocess(self, img): + img = np.log(np.float32(img) + 1.0) + img = (img - img.mean()) / (img.std() + eps) + return img * self.win + + def correlate(self, img): + C = cv.mulSpectrums(cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True) + resp = cv.idft(C, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT) + (h, w) = resp.shape + (_, mval, _, (mx, my)) = cv.minMaxLoc(resp) + side_resp = resp.copy() + cv.rectangle(side_resp, (mx - 5, my - 5), (mx + 5, my + 5), 0, -1) + (smean, sstd) = (side_resp.mean(), side_resp.std()) + psr = (mval - smean) / (sstd + eps) + return (resp, (mx - w // 2, my - h // 2), psr) + + def update_kernel(self): + self.H = divSpec(self.H1, self.H2) + self.H[..., 1] *= -1 + +class App: + + def __init__(self, video_src, paused=False): + self.cap = video.create_capture(video_src) + (_, self.frame) = self.cap.read() + cv.imshow('frame', self.frame) + self.rect_sel = RectSelector('frame', self.onrect) + self.trackers = [] + self.paused = paused + + def onrect(self, rect): + frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) + tracker = MOSSE(frame_gray, rect) + self.trackers.append(tracker) + + def run(self): + while True: + if not self.paused: + (ret, self.frame) = self.cap.read() + if not ret: + break + frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) + for tracker in self.trackers: + tracker.update(frame_gray) + vis = self.frame.copy() + for tracker in self.trackers: + tracker.draw_state(vis) + if len(self.trackers) > 0: + cv.imshow('tracker state', self.trackers[-1].state_vis) + self.rect_sel.draw(vis) + cv.imshow('frame', vis) + ch = cv.waitKey(10) + if ch == 27: + break + if ch == ord(' '): + self.paused = not self.paused + if ch == ord('c'): + self.trackers = [] +if __name__ == '__main__': + print(__doc__) + import sys, getopt + (opts, args) = getopt.getopt(sys.argv[1:], '', ['pause']) + opts = dict(opts) + try: + video_src = args[0] + except: + video_src = '0' + App(video_src, paused='--pause' in opts).run() + +# File: opencv-master/samples/python/mouse_and_match.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import os +import sys +import glob +import argparse +from math import * + +class App: + drag_start = None + sel = (0, 0, 0, 0) + + def onmouse(self, event, x, y, flags, param): + if event == cv.EVENT_LBUTTONDOWN: + self.drag_start = (x, y) + self.sel = (0, 0, 0, 0) + elif event == cv.EVENT_LBUTTONUP: + if self.sel[2] > self.sel[0] and self.sel[3] > self.sel[1]: + patch = self.gray[self.sel[1]:self.sel[3], self.sel[0]:self.sel[2]] + result = cv.matchTemplate(self.gray, patch, cv.TM_CCOEFF_NORMED) + result = np.abs(result) ** 3 + (_val, result) = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO) + result8 = cv.normalize(result, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U) + cv.imshow('result', result8) + self.drag_start = None + elif self.drag_start: + if flags & cv.EVENT_FLAG_LBUTTON: + minpos = (min(self.drag_start[0], x), min(self.drag_start[1], y)) + maxpos = (max(self.drag_start[0], x), max(self.drag_start[1], y)) + self.sel = (minpos[0], minpos[1], maxpos[0], maxpos[1]) + img = cv.cvtColor(self.gray, cv.COLOR_GRAY2BGR) + cv.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0, 255, 255), 1) + cv.imshow('gray', img) + else: + print('selection is complete') + self.drag_start = None + + def run(self): + parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images') + parser.add_argument('-i', '--input', default='../data/', help='Input directory.') + args = parser.parse_args() + path = args.input + cv.namedWindow('gray', 1) + cv.setMouseCallback('gray', self.onmouse) + '' + for infile in glob.glob(os.path.join(path, '*.*')): + ext = os.path.splitext(infile)[1][1:] + if ext == 'png' or ext == 'jpg' or ext == 'bmp' or (ext == 'tiff') or (ext == 'pbm'): + print(infile) + img = cv.imread(infile, cv.IMREAD_COLOR) + if img is None: + continue + self.sel = (0, 0, 0, 0) + self.drag_start = None + self.gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + cv.imshow('gray', self.gray) + if cv.waitKey() == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + App().run() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/mser.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video +import sys + +def main(): + try: + video_src = sys.argv[1] + except: + video_src = 0 + cam = video.create_capture(video_src) + mser = cv.MSER_create() + while True: + (ret, img) = cam.read() + if ret == 0: + break + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + vis = img.copy() + (regions, _) = mser.detectRegions(gray) + hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions] + cv.polylines(vis, hulls, 1, (0, 255, 0)) + cv.imshow('img', vis) + if cv.waitKey(5) == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/opencv_version.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def main(): + import sys + try: + param = sys.argv[1] + except IndexError: + param = '' + if '--build' == param: + print(cv.getBuildInformation()) + elif '--help' == param: + print('\t--build\n\t\tprint complete build info') + print('\t--help\n\t\tprint this help') + else: + print('Welcome to OpenCV') + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/opt_flow.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video + +def draw_flow(img, flow, step=16): + (h, w) = img.shape[:2] + (y, x) = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int) + (fx, fy) = flow[y, x].T + lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2) + lines = np.int32(lines + 0.5) + vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + cv.polylines(vis, lines, 0, (0, 255, 0)) + for ((x1, y1), (_x2, _y2)) in lines: + cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1) + return vis + +def draw_hsv(flow): + (h, w) = flow.shape[:2] + (fx, fy) = (flow[:, :, 0], flow[:, :, 1]) + ang = np.arctan2(fy, fx) + np.pi + v = np.sqrt(fx * fx + fy * fy) + hsv = np.zeros((h, w, 3), np.uint8) + hsv[..., 0] = ang * (180 / np.pi / 2) + hsv[..., 1] = 255 + hsv[..., 2] = np.minimum(v * 4, 255) + bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) + return bgr + +def warp_flow(img, flow): + (h, w) = flow.shape[:2] + flow = -flow + flow[:, :, 0] += np.arange(w) + flow[:, :, 1] += np.arange(h)[:, np.newaxis] + res = cv.remap(img, flow, None, cv.INTER_LINEAR) + return res + +def main(): + import sys + try: + fn = sys.argv[1] + except IndexError: + fn = 0 + cam = video.create_capture(fn) + (_ret, prev) = cam.read() + prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY) + show_hsv = False + show_glitch = False + cur_glitch = prev.copy() + while True: + (_ret, img) = cam.read() + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) + prevgray = gray + cv.imshow('flow', draw_flow(gray, flow)) + if show_hsv: + cv.imshow('flow HSV', draw_hsv(flow)) + if show_glitch: + cur_glitch = warp_flow(cur_glitch, flow) + cv.imshow('glitch', cur_glitch) + ch = cv.waitKey(5) + if ch == 27: + break + if ch == ord('1'): + show_hsv = not show_hsv + print('HSV flow visualization is', ['off', 'on'][show_hsv]) + if ch == ord('2'): + show_glitch = not show_glitch + if show_glitch: + cur_glitch = img.copy() + print('glitch is', ['off', 'on'][show_glitch]) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/peopledetect.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def inside(r, q): + (rx, ry, rw, rh) = r + (qx, qy, qw, qh) = q + return rx > qx and ry > qy and (rx + rw < qx + qw) and (ry + rh < qy + qh) + +def draw_detections(img, rects, thickness=1): + for (x, y, w, h) in rects: + (pad_w, pad_h) = (int(0.15 * w), int(0.05 * h)) + cv.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h - pad_h), (0, 255, 0), thickness) + +def main(): + import sys + from glob import glob + import itertools as it + hog = cv.HOGDescriptor() + hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector()) + default = [cv.samples.findFile('basketball2.png')] if len(sys.argv[1:]) == 0 else [] + for fn in it.chain(*map(glob, default + sys.argv[1:])): + print(fn, ' - ') + try: + img = cv.imread(fn) + if img is None: + print('Failed to load image file:', fn) + continue + except: + print('loading error') + continue + (found, _w) = hog.detectMultiScale(img, winStride=(8, 8), padding=(32, 32), scale=1.05) + found_filtered = [] + for (ri, r) in enumerate(found): + for (qi, q) in enumerate(found): + if ri != qi and inside(r, q): + break + else: + found_filtered.append(r) + draw_detections(img, found) + draw_detections(img, found_filtered, 3) + print('%d (%d) found' % (len(found_filtered), len(found))) + cv.imshow('img', img) + ch = cv.waitKey() + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/plane_ar.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import video +import common +from plane_tracker import PlaneTracker +from video import presets +ar_verts = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 0.5, 2], [1, 0.5, 2]]) +ar_edges = [(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7), (4, 8), (5, 8), (6, 9), (7, 9), (8, 9)] + +class App: + + def __init__(self, src): + self.cap = video.create_capture(src, presets['book']) + self.frame = None + self.paused = False + self.tracker = PlaneTracker() + cv.namedWindow('plane') + cv.createTrackbar('focal', 'plane', 25, 50, common.nothing) + self.rect_sel = common.RectSelector('plane', self.on_rect) + + def on_rect(self, rect): + self.tracker.add_target(self.frame, rect) + + def run(self): + while True: + playing = not self.paused and (not self.rect_sel.dragging) + if playing or self.frame is None: + (ret, frame) = self.cap.read() + if not ret: + break + self.frame = frame.copy() + vis = self.frame.copy() + if playing: + tracked = self.tracker.track(self.frame) + for tr in tracked: + cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) + for (x, y) in np.int32(tr.p1): + cv.circle(vis, (x, y), 2, (255, 255, 255)) + self.draw_overlay(vis, tr) + self.rect_sel.draw(vis) + cv.imshow('plane', vis) + ch = cv.waitKey(1) + if ch == ord(' '): + self.paused = not self.paused + if ch == ord('c'): + self.tracker.clear() + if ch == 27: + break + + def draw_overlay(self, vis, tracked): + (x0, y0, x1, y1) = tracked.target.rect + quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]]) + fx = 0.5 + cv.getTrackbarPos('focal', 'plane') / 50.0 + (h, w) = vis.shape[:2] + K = np.float64([[fx * w, 0, 0.5 * (w - 1)], [0, fx * w, 0.5 * (h - 1)], [0.0, 0.0, 1.0]]) + dist_coef = np.zeros(4) + (_ret, rvec, tvec) = cv.solvePnP(quad_3d, tracked.quad, K, dist_coef) + verts = ar_verts * [x1 - x0, y1 - y0, -(x1 - x0) * 0.3] + (x0, y0, 0) + verts = cv.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2) + for (i, j) in ar_edges: + ((x0, y0), (x1, y1)) = (verts[i], verts[j]) + cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2) +if __name__ == '__main__': + print(__doc__) + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + +# File: opencv-master/samples/python/plane_tracker.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +from collections import namedtuple +import video +import common +from video import presets +FLANN_INDEX_KDTREE = 1 +FLANN_INDEX_LSH = 6 +flann_params = dict(algorithm=FLANN_INDEX_LSH, table_number=6, key_size=12, multi_probe_level=1) +MIN_MATCH_COUNT = 10 +'' +PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data') +'' +TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') + +class PlaneTracker: + + def __init__(self): + self.detector = cv.ORB_create(nfeatures=1000) + self.matcher = cv.FlannBasedMatcher(flann_params, {}) + self.targets = [] + self.frame_points = [] + + def add_target(self, image, rect, data=None): + (x0, y0, x1, y1) = rect + (raw_points, raw_descrs) = self.detect_features(image) + (points, descs) = ([], []) + for (kp, desc) in zip(raw_points, raw_descrs): + (x, y) = kp.pt + if x0 <= x <= x1 and y0 <= y <= y1: + points.append(kp) + descs.append(desc) + descs = np.uint8(descs) + self.matcher.add([descs]) + target = PlanarTarget(image=image, rect=rect, keypoints=points, descrs=descs, data=data) + self.targets.append(target) + + def clear(self): + self.targets = [] + self.matcher.clear() + + def track(self, frame): + (self.frame_points, frame_descrs) = self.detect_features(frame) + if len(self.frame_points) < MIN_MATCH_COUNT: + return [] + matches = self.matcher.knnMatch(frame_descrs, k=2) + matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] + if len(matches) < MIN_MATCH_COUNT: + return [] + matches_by_id = [[] for _ in xrange(len(self.targets))] + for m in matches: + matches_by_id[m.imgIdx].append(m) + tracked = [] + for (imgIdx, matches) in enumerate(matches_by_id): + if len(matches) < MIN_MATCH_COUNT: + continue + target = self.targets[imgIdx] + p0 = [target.keypoints[m.trainIdx].pt for m in matches] + p1 = [self.frame_points[m.queryIdx].pt for m in matches] + (p0, p1) = np.float32((p0, p1)) + (H, status) = cv.findHomography(p0, p1, cv.RANSAC, 3.0) + status = status.ravel() != 0 + if status.sum() < MIN_MATCH_COUNT: + continue + (p0, p1) = (p0[status], p1[status]) + (x0, y0, x1, y1) = target.rect + quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) + quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) + track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) + tracked.append(track) + tracked.sort(key=lambda t: len(t.p0), reverse=True) + return tracked + + def detect_features(self, frame): + (keypoints, descrs) = self.detector.detectAndCompute(frame, None) + if descrs is None: + descrs = [] + return (keypoints, descrs) + +class App: + + def __init__(self, src): + self.cap = video.create_capture(src, presets['book']) + self.frame = None + self.paused = False + self.tracker = PlaneTracker() + cv.namedWindow('plane') + self.rect_sel = common.RectSelector('plane', self.on_rect) + + def on_rect(self, rect): + self.tracker.add_target(self.frame, rect) + + def run(self): + while True: + playing = not self.paused and (not self.rect_sel.dragging) + if playing or self.frame is None: + (ret, frame) = self.cap.read() + if not ret: + break + self.frame = frame.copy() + vis = self.frame.copy() + if playing: + tracked = self.tracker.track(self.frame) + for tr in tracked: + cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) + for (x, y) in np.int32(tr.p1): + cv.circle(vis, (x, y), 2, (255, 255, 255)) + self.rect_sel.draw(vis) + cv.imshow('plane', vis) + ch = cv.waitKey(1) + if ch == ord(' '): + self.paused = not self.paused + if ch == ord('c'): + self.tracker.clear() + if ch == 27: + break +if __name__ == '__main__': + print(__doc__) + import sys + try: + video_src = sys.argv[1] + except: + video_src = 0 + App(video_src).run() + +# File: opencv-master/samples/python/qrcode.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import argparse +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range + +class QrSample: + + def __init__(self, args): + self.fname = '' + self.fext = '' + self.fsaveid = 0 + self.input = args.input + self.detect = args.detect + self.out = args.out + self.multi = args.multi + self.saveDetections = args.save_detections + self.saveAll = args.save_all + self.arucoBased = args.aruco_based + + def getQRModeString(self): + msg1 = 'multi ' if self.multi else '' + msg2 = 'detector' if self.detect else 'decoder' + msg = 'QR {:s}{:s}'.format(msg1, msg2) + return msg + + def drawFPS(self, result, fps): + message = '{:.2f} FPS({:s})'.format(fps, self.getQRModeString()) + cv.putText(result, message, (20, 20), 1, cv.FONT_HERSHEY_DUPLEX, (0, 0, 255)) + + def drawQRCodeContours(self, image, cnt): + if cnt.size != 0: + (rows, cols, _) = image.shape + show_radius = 2.813 * (rows / cols if rows > cols else cols / rows) + contour_radius = show_radius * 0.4 + cv.drawContours(image, [cnt], 0, (0, 255, 0), int(round(contour_radius))) + tpl = cnt.reshape((-1, 2)) + for x in tuple(tpl.tolist()): + color = (255, 0, 0) + cv.circle(image, tuple(x), int(round(contour_radius)), color, -1) + + def drawQRCodeResults(self, result, points, decode_info, fps): + n = len(points) + if isinstance(decode_info, str): + decode_info = [decode_info] + if n > 0: + for i in range(n): + cnt = np.array(points[i]).reshape((-1, 1, 2)).astype(np.int32) + self.drawQRCodeContours(result, cnt) + msg = 'QR[{:d}]@{} : '.format(i, *cnt.reshape(1, -1).tolist()) + print(msg, end='') + if len(decode_info) > i: + if decode_info[i]: + print("'", decode_info[i], "'") + else: + print("Can't decode QR code") + else: + print('Decode information is not available (disabled)') + else: + print('QRCode not detected!') + self.drawFPS(result, fps) + + def runQR(self, qrCode, inputimg): + if not self.multi: + if not self.detect: + (decode_info, points, _) = qrCode.detectAndDecode(inputimg) + dec_info = decode_info + else: + (_, points) = qrCode.detect(inputimg) + dec_info = [] + elif not self.detect: + (_, decode_info, points, _) = qrCode.detectAndDecodeMulti(inputimg) + dec_info = decode_info + else: + (_, points) = qrCode.detectMulti(inputimg) + dec_info = [] + if points is None: + points = [] + return (points, dec_info) + + def DetectQRFrmImage(self, inputfile): + inputimg = cv.imread(inputfile, cv.IMREAD_COLOR) + if inputimg is None: + print('ERROR: Can not read image: {}'.format(inputfile)) + return + print('Run {:s} on image [{:d}x{:d}]'.format(self.getQRModeString(), inputimg.shape[1], inputimg.shape[0])) + if self.arucoBased: + qrCode = cv.QRCodeDetectorAruco() + else: + qrCode = cv.QRCodeDetector() + count = 10 + timer = cv.TickMeter() + for _ in range(count): + timer.start() + (points, decode_info) = self.runQR(qrCode, inputimg) + timer.stop() + fps = count / timer.getTimeSec() + print('FPS: {}'.format(fps)) + result = inputimg + self.drawQRCodeResults(result, points, decode_info, fps) + cv.imshow('QR', result) + cv.waitKey(1) + if self.out != '': + outfile = self.fname + self.fext + print('Saving Result: {}'.format(outfile)) + cv.imwrite(outfile, result) + print('Press any key to exit ...') + cv.waitKey(0) + print('Exit') + + def processQRCodeDetection(self, qrcode, frame): + if len(frame.shape) == 2: + result = cv.cvtColor(frame, cv.COLOR_GRAY2BGR) + else: + result = frame + print('Run {:s} on video frame [{:d}x{:d}]'.format(self.getQRModeString(), frame.shape[1], frame.shape[0])) + timer = cv.TickMeter() + timer.start() + (points, decode_info) = self.runQR(qrcode, frame) + timer.stop() + fps = 1 / timer.getTimeSec() + self.drawQRCodeResults(result, points, decode_info, fps) + return (fps, result, points) + + def DetectQRFrmCamera(self): + cap = cv.VideoCapture(0) + if not cap.isOpened(): + print('Cannot open the camera') + return + print("Press 'm' to switch between detectAndDecode and detectAndDecodeMulti") + print("Press 'd' to switch between decoder and detector") + print("Press ' ' (space) to save result into images") + print("Press 'ESC' to exit") + if self.arucoBased: + qrcode = cv.QRCodeDetectorAruco() + else: + qrcode = cv.QRCodeDetector() + while True: + (ret, frame) = cap.read() + if not ret: + print('End of video stream') + break + forcesave = self.saveAll + result = frame + try: + (fps, result, corners) = self.processQRCodeDetection(qrcode, frame) + print('FPS: {:.2f}'.format(fps)) + forcesave |= self.saveDetections and len(corners) != 0 + except cv.error as e: + print('Error exception: ', e) + forcesave = True + cv.imshow('QR code', result) + code = cv.waitKey(1) + if code < 0 and (not forcesave): + continue + if code == ord(' ') or forcesave: + fsuffix = '-{:05d}'.format(self.fsaveid) + self.fsaveid += 1 + fname_in = self.fname + fsuffix + '_input.png' + print("Saving QR code detection result: '{}' ...".format(fname_in)) + cv.imwrite(fname_in, frame) + print('Saved') + if code == ord('m'): + self.multi = not self.multi + msg = 'Switching QR code mode ==> {:s}'.format('detectAndDecodeMulti' if self.multi else 'detectAndDecode') + print(msg) + if code == ord('d'): + self.detect = not self.detect + msg = 'Switching QR code mode ==> {:s}'.format('detect' if self.detect else 'decode') + print(msg) + if code == 27: + print("'ESC' is pressed. Exiting...") + break + print('Exit.') + +def main(): + parser = argparse.ArgumentParser(description='This program detects the QR-codes input images using OpenCV Library.') + parser.add_argument('-i', '--input', help="input image path (for example, 'opencv_extra/testdata/cv/qrcode/multiple/*_qrcodes.png)", default='', metavar='') + parser.add_argument('--aruco_based', help='use aruco-based detector', action='store_true') + parser.add_argument('-d', '--detect', help='detect QR code only (skip decoding) (default: False)', action='store_true') + parser.add_argument('-m', '--multi', help='enable multiple qr-codes detection', action='store_true') + parser.add_argument('-o', '--out', help='path to result file (default: qr_code.png)', default='qr_code.png', metavar='') + parser.add_argument('--save_detections', help='save all QR detections (video mode only)', action='store_true') + parser.add_argument('--save_all', help='save all processed frames (video mode only)', action='store_true') + args = parser.parse_args() + qrinst = QrSample(args) + if args.out != '': + index = args.out.rfind('.') + if index != -1: + qrinst.fname = args.out[:index] + qrinst.fext = args.out[index:] + else: + qrinst.fname = args.out + qrinst.fext = '.png' + if args.input != '': + qrinst.DetectQRFrmImage(args.input) + else: + qrinst.DetectQRFrmCamera() +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/squares.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv + +def angle_cos(p0, p1, p2): + (d1, d2) = ((p0 - p1).astype('float'), (p2 - p1).astype('float')) + return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2))) + +def find_squares(img): + img = cv.GaussianBlur(img, (5, 5), 0) + squares = [] + for gray in cv.split(img): + for thrs in xrange(0, 255, 26): + if thrs == 0: + bin = cv.Canny(gray, 0, 50, apertureSize=5) + bin = cv.dilate(bin, None) + else: + (_retval, bin) = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY) + (contours, _hierarchy) = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) + for cnt in contours: + cnt_len = cv.arcLength(cnt, True) + cnt = cv.approxPolyDP(cnt, 0.02 * cnt_len, True) + if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt): + cnt = cnt.reshape(-1, 2) + max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)]) + if max_cos < 0.1: + squares.append(cnt) + return squares + +def main(): + from glob import glob + for fn in glob('../data/pic*.png'): + img = cv.imread(fn) + squares = find_squares(img) + cv.drawContours(img, squares, -1, (0, 255, 0), 3) + cv.imshow('squares', img) + ch = cv.waitKey() + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/stereo_match.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +ply_header = 'ply\nformat ascii 1.0\nelement vertex %(vert_num)d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header\n' + +def write_ply(fn, verts, colors): + verts = verts.reshape(-1, 3) + colors = colors.reshape(-1, 3) + verts = np.hstack([verts, colors]) + with open(fn, 'wb') as f: + f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8')) + np.savetxt(f, verts, fmt='%f %f %f %d %d %d ') + +def main(): + print('loading images...') + imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg'))) + imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg'))) + window_size = 3 + min_disp = 16 + num_disp = 112 - min_disp + stereo = cv.StereoSGBM_create(minDisparity=min_disp, numDisparities=num_disp, blockSize=16, P1=8 * 3 * window_size ** 2, P2=32 * 3 * window_size ** 2, disp12MaxDiff=1, uniquenessRatio=10, speckleWindowSize=100, speckleRange=32) + print('computing disparity...') + disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0 + print('generating 3d point cloud...') + (h, w) = imgL.shape[:2] + f = 0.8 * w + Q = np.float32([[1, 0, 0, -0.5 * w], [0, -1, 0, 0.5 * h], [0, 0, 0, -f], [0, 0, 1, 0]]) + points = cv.reprojectImageTo3D(disp, Q) + colors = cv.cvtColor(imgL, cv.COLOR_BGR2RGB) + mask = disp > disp.min() + out_points = points[mask] + out_colors = colors[mask] + out_fn = 'out.ply' + write_ply(out_fn, out_points, out_colors) + print('%s saved' % out_fn) + cv.imshow('left', imgL) + cv.imshow('disparity', (disp - min_disp) / num_disp) + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/stitching.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import argparse +import sys +modes = (cv.Stitcher_PANORAMA, cv.Stitcher_SCANS) +parser = argparse.ArgumentParser(prog='stitching.py', description='Stitching sample.') +parser.add_argument('--mode', type=int, choices=modes, default=cv.Stitcher_PANORAMA, help='Determines configuration of stitcher. The default is `PANORAMA` (%d), mode suitable for creating photo panoramas. Option `SCANS` (%d) is suitable for stitching materials under affine transformation, such as scans.' % modes) +parser.add_argument('--output', default='result.jpg', help='Resulting image. The default is `result.jpg`.') +parser.add_argument('img', nargs='+', help='input images') +__doc__ += '\n' + parser.format_help() + +def main(): + args = parser.parse_args() + imgs = [] + for img_name in args.img: + img = cv.imread(cv.samples.findFile(img_name)) + if img is None: + print("can't read image " + img_name) + sys.exit(-1) + imgs.append(img) + stitcher = cv.Stitcher.create(args.mode) + (status, pano) = stitcher.stitch(imgs) + if status != cv.Stitcher_OK: + print("Can't stitch images, error code = %d" % status) + sys.exit(-1) + cv.imwrite(args.output, pano) + print('stitching completed successfully. %s saved!' % args.output) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/stitching_detailed.py +"""""" +from __future__ import print_function +import argparse +from collections import OrderedDict +import cv2 as cv +import numpy as np +EXPOS_COMP_CHOICES = OrderedDict() +EXPOS_COMP_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS +EXPOS_COMP_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN +EXPOS_COMP_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS +EXPOS_COMP_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS +EXPOS_COMP_CHOICES['no'] = cv.detail.ExposureCompensator_NO +BA_COST_CHOICES = OrderedDict() +BA_COST_CHOICES['ray'] = cv.detail_BundleAdjusterRay +BA_COST_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj +BA_COST_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial +BA_COST_CHOICES['no'] = cv.detail_NoBundleAdjuster +FEATURES_FIND_CHOICES = OrderedDict() +try: + cv.xfeatures2d_SURF.create() + FEATURES_FIND_CHOICES['surf'] = cv.xfeatures2d_SURF.create +except (AttributeError, cv.error) as e: + print('SURF not available') +FEATURES_FIND_CHOICES['orb'] = cv.ORB.create +try: + FEATURES_FIND_CHOICES['sift'] = cv.SIFT_create +except AttributeError: + print('SIFT not available') +try: + FEATURES_FIND_CHOICES['brisk'] = cv.BRISK_create +except AttributeError: + print('BRISK not available') +try: + FEATURES_FIND_CHOICES['akaze'] = cv.AKAZE_create +except AttributeError: + print('AKAZE not available') +SEAM_FIND_CHOICES = OrderedDict() +SEAM_FIND_CHOICES['gc_color'] = cv.detail_GraphCutSeamFinder('COST_COLOR') +SEAM_FIND_CHOICES['gc_colorgrad'] = cv.detail_GraphCutSeamFinder('COST_COLOR_GRAD') +SEAM_FIND_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR') +SEAM_FIND_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD') +SEAM_FIND_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM) +SEAM_FIND_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) +ESTIMATOR_CHOICES = OrderedDict() +ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator +ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator +WARP_CHOICES = ('spherical', 'plane', 'affine', 'cylindrical', 'fisheye', 'stereographic', 'compressedPlaneA2B1', 'compressedPlaneA1.5B1', 'compressedPlanePortraitA2B1', 'compressedPlanePortraitA1.5B1', 'paniniA2B1', 'paniniA1.5B1', 'paniniPortraitA2B1', 'paniniPortraitA1.5B1', 'mercator', 'transverseMercator') +WAVE_CORRECT_CHOICES = OrderedDict() +WAVE_CORRECT_CHOICES['horiz'] = cv.detail.WAVE_CORRECT_HORIZ +WAVE_CORRECT_CHOICES['no'] = None +WAVE_CORRECT_CHOICES['vert'] = cv.detail.WAVE_CORRECT_VERT +BLEND_CHOICES = ('multiband', 'feather', 'no') +parser = argparse.ArgumentParser(prog='stitching_detailed.py', description='Rotation model images stitcher') +parser.add_argument('img_names', nargs='+', help='Files to stitch', type=str) +parser.add_argument('--try_cuda', action='store', default=False, help='Try to use CUDA. The default value is no. All default values are for CPU mode.', type=bool, dest='try_cuda') +parser.add_argument('--work_megapix', action='store', default=0.6, help='Resolution for image registration step. The default is 0.6 Mpx', type=float, dest='work_megapix') +parser.add_argument('--features', action='store', default=list(FEATURES_FIND_CHOICES.keys())[0], help="Type of features used for images matching. The default is '%s'." % list(FEATURES_FIND_CHOICES.keys())[0], choices=FEATURES_FIND_CHOICES.keys(), type=str, dest='features') +parser.add_argument('--matcher', action='store', default='homography', help="Matcher used for pairwise image matching. The default is 'homography'.", choices=('homography', 'affine'), type=str, dest='matcher') +parser.add_argument('--estimator', action='store', default=list(ESTIMATOR_CHOICES.keys())[0], help="Type of estimator used for transformation estimation. The default is '%s'." % list(ESTIMATOR_CHOICES.keys())[0], choices=ESTIMATOR_CHOICES.keys(), type=str, dest='estimator') +parser.add_argument('--match_conf', action='store', help='Confidence for feature matching step. The default is 0.3 for ORB and 0.65 for other feature types.', type=float, dest='match_conf') +parser.add_argument('--conf_thresh', action='store', default=1.0, help='Threshold for two images are from the same panorama confidence.The default is 1.0.', type=float, dest='conf_thresh') +parser.add_argument('--ba', action='store', default=list(BA_COST_CHOICES.keys())[0], help="Bundle adjustment cost function. The default is '%s'." % list(BA_COST_CHOICES.keys())[0], choices=BA_COST_CHOICES.keys(), type=str, dest='ba') +parser.add_argument('--ba_refine_mask', action='store', default='xxxxx', help="Set refinement mask for bundle adjustment. It looks like 'x_xxx', where 'x' means refine respective parameter and '_' means don't refine, and has the following format:. The default mask is 'xxxxx'. If bundle adjustment doesn't support estimation of selected parameter then the respective flag is ignored.", type=str, dest='ba_refine_mask') +parser.add_argument('--wave_correct', action='store', default=list(WAVE_CORRECT_CHOICES.keys())[0], help="Perform wave effect correction. The default is '%s'" % list(WAVE_CORRECT_CHOICES.keys())[0], choices=WAVE_CORRECT_CHOICES.keys(), type=str, dest='wave_correct') +parser.add_argument('--save_graph', action='store', default=None, help='Save matches graph represented in DOT language to file.', type=str, dest='save_graph') +parser.add_argument('--warp', action='store', default=WARP_CHOICES[0], help="Warp surface type. The default is '%s'." % WARP_CHOICES[0], choices=WARP_CHOICES, type=str, dest='warp') +parser.add_argument('--seam_megapix', action='store', default=0.1, help='Resolution for seam estimation step. The default is 0.1 Mpx.', type=float, dest='seam_megapix') +parser.add_argument('--seam', action='store', default=list(SEAM_FIND_CHOICES.keys())[0], help="Seam estimation method. The default is '%s'." % list(SEAM_FIND_CHOICES.keys())[0], choices=SEAM_FIND_CHOICES.keys(), type=str, dest='seam') +parser.add_argument('--compose_megapix', action='store', default=-1, help='Resolution for compositing step. Use -1 for original resolution. The default is -1', type=float, dest='compose_megapix') +parser.add_argument('--expos_comp', action='store', default=list(EXPOS_COMP_CHOICES.keys())[0], help="Exposure compensation method. The default is '%s'." % list(EXPOS_COMP_CHOICES.keys())[0], choices=EXPOS_COMP_CHOICES.keys(), type=str, dest='expos_comp') +parser.add_argument('--expos_comp_nr_feeds', action='store', default=1, help='Number of exposure compensation feed.', type=np.int32, dest='expos_comp_nr_feeds') +parser.add_argument('--expos_comp_nr_filtering', action='store', default=2, help='Number of filtering iterations of the exposure compensation gains.', type=float, dest='expos_comp_nr_filtering') +parser.add_argument('--expos_comp_block_size', action='store', default=32, help='BLock size in pixels used by the exposure compensator. The default is 32.', type=np.int32, dest='expos_comp_block_size') +parser.add_argument('--blend', action='store', default=BLEND_CHOICES[0], help="Blending method. The default is '%s'." % BLEND_CHOICES[0], choices=BLEND_CHOICES, type=str, dest='blend') +parser.add_argument('--blend_strength', action='store', default=5, help='Blending strength from [0,100] range. The default is 5', type=np.int32, dest='blend_strength') +parser.add_argument('--output', action='store', default='result.jpg', help="The default is 'result.jpg'", type=str, dest='output') +parser.add_argument('--timelapse', action='store', default=None, help="Output warped images separately as frames of a time lapse movie, with 'fixed_' prepended to input file names.", type=str, dest='timelapse') +parser.add_argument('--rangewidth', action='store', default=-1, help='uses range_width to limit number of images to match with.', type=int, dest='rangewidth') +__doc__ += '\n' + parser.format_help() + +def get_matcher(args): + try_cuda = args.try_cuda + matcher_type = args.matcher + if args.match_conf is None: + if args.features == 'orb': + match_conf = 0.3 + else: + match_conf = 0.65 + else: + match_conf = args.match_conf + range_width = args.rangewidth + if matcher_type == 'affine': + matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf) + elif range_width == -1: + matcher = cv.detail_BestOf2NearestMatcher(try_cuda, match_conf) + else: + matcher = cv.detail_BestOf2NearestRangeMatcher(range_width, try_cuda, match_conf) + return matcher + +def get_compensator(args): + expos_comp_type = EXPOS_COMP_CHOICES[args.expos_comp] + expos_comp_nr_feeds = args.expos_comp_nr_feeds + expos_comp_block_size = args.expos_comp_block_size + if expos_comp_type == cv.detail.ExposureCompensator_CHANNELS: + compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds) + elif expos_comp_type == cv.detail.ExposureCompensator_CHANNELS_BLOCKS: + compensator = cv.detail_BlocksChannelsCompensator(expos_comp_block_size, expos_comp_block_size, expos_comp_nr_feeds) + else: + compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type) + return compensator + +def main(): + args = parser.parse_args() + img_names = args.img_names + print(img_names) + work_megapix = args.work_megapix + seam_megapix = args.seam_megapix + compose_megapix = args.compose_megapix + conf_thresh = args.conf_thresh + ba_refine_mask = args.ba_refine_mask + wave_correct = WAVE_CORRECT_CHOICES[args.wave_correct] + if args.save_graph is None: + save_graph = False + else: + save_graph = True + warp_type = args.warp + blend_type = args.blend + blend_strength = args.blend_strength + result_name = args.output + if args.timelapse is not None: + timelapse = True + if args.timelapse == 'as_is': + timelapse_type = cv.detail.Timelapser_AS_IS + elif args.timelapse == 'crop': + timelapse_type = cv.detail.Timelapser_CROP + else: + print('Bad timelapse method') + exit() + else: + timelapse = False + finder = FEATURES_FIND_CHOICES[args.features]() + seam_work_aspect = 1 + full_img_sizes = [] + features = [] + images = [] + is_work_scale_set = False + is_seam_scale_set = False + is_compose_scale_set = False + for name in img_names: + full_img = cv.imread(cv.samples.findFile(name)) + if full_img is None: + print('Cannot read image ', name) + exit() + full_img_sizes.append((full_img.shape[1], full_img.shape[0])) + if work_megapix < 0: + img = full_img + work_scale = 1 + is_work_scale_set = True + else: + if is_work_scale_set is False: + work_scale = min(1.0, np.sqrt(work_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))) + is_work_scale_set = True + img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT) + if is_seam_scale_set is False: + if seam_megapix > 0: + seam_scale = min(1.0, np.sqrt(seam_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))) + else: + seam_scale = 1.0 + seam_work_aspect = seam_scale / work_scale + is_seam_scale_set = True + img_feat = cv.detail.computeImageFeatures2(finder, img) + features.append(img_feat) + img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT) + images.append(img) + matcher = get_matcher(args) + p = matcher.apply2(features) + matcher.collectGarbage() + if save_graph: + with open(args.save_graph, 'w') as fh: + fh.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh)) + indices = cv.detail.leaveBiggestComponent(features, p, conf_thresh) + img_subset = [] + img_names_subset = [] + full_img_sizes_subset = [] + for i in range(len(indices)): + img_names_subset.append(img_names[indices[i]]) + img_subset.append(images[indices[i]]) + full_img_sizes_subset.append(full_img_sizes[indices[i]]) + images = img_subset + img_names = img_names_subset + full_img_sizes = full_img_sizes_subset + num_images = len(img_names) + if num_images < 2: + print('Need more images') + exit() + estimator = ESTIMATOR_CHOICES[args.estimator]() + (b, cameras) = estimator.apply(features, p, None) + if not b: + print('Homography estimation failed.') + exit() + for cam in cameras: + cam.R = cam.R.astype(np.float32) + adjuster = BA_COST_CHOICES[args.ba]() + adjuster.setConfThresh(conf_thresh) + refine_mask = np.zeros((3, 3), np.uint8) + if ba_refine_mask[0] == 'x': + refine_mask[0, 0] = 1 + if ba_refine_mask[1] == 'x': + refine_mask[0, 1] = 1 + if ba_refine_mask[2] == 'x': + refine_mask[0, 2] = 1 + if ba_refine_mask[3] == 'x': + refine_mask[1, 1] = 1 + if ba_refine_mask[4] == 'x': + refine_mask[1, 2] = 1 + adjuster.setRefinementMask(refine_mask) + (b, cameras) = adjuster.apply(features, p, cameras) + if not b: + print('Camera parameters adjusting failed.') + exit() + focals = [] + for cam in cameras: + focals.append(cam.focal) + focals.sort() + if len(focals) % 2 == 1: + warped_image_scale = focals[len(focals) // 2] + else: + warped_image_scale = (focals[len(focals) // 2] + focals[len(focals) // 2 - 1]) / 2 + if wave_correct is not None: + rmats = [] + for cam in cameras: + rmats.append(np.copy(cam.R)) + rmats = cv.detail.waveCorrect(rmats, wave_correct) + for (idx, cam) in enumerate(cameras): + cam.R = rmats[idx] + corners = [] + masks_warped = [] + images_warped = [] + sizes = [] + masks = [] + for i in range(0, num_images): + um = cv.UMat(255 * np.ones((images[i].shape[0], images[i].shape[1]), np.uint8)) + masks.append(um) + warper = cv.PyRotationWarper(warp_type, warped_image_scale * seam_work_aspect) + for idx in range(0, num_images): + K = cameras[idx].K().astype(np.float32) + swa = seam_work_aspect + K[0, 0] *= swa + K[0, 2] *= swa + K[1, 1] *= swa + K[1, 2] *= swa + (corner, image_wp) = warper.warp(images[idx], K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT) + corners.append(corner) + sizes.append((image_wp.shape[1], image_wp.shape[0])) + images_warped.append(image_wp) + (p, mask_wp) = warper.warp(masks[idx], K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT) + masks_warped.append(mask_wp.get()) + images_warped_f = [] + for img in images_warped: + imgf = img.astype(np.float32) + images_warped_f.append(imgf) + compensator = get_compensator(args) + compensator.feed(corners=corners, images=images_warped, masks=masks_warped) + seam_finder = SEAM_FIND_CHOICES[args.seam] + masks_warped = seam_finder.find(images_warped_f, corners, masks_warped) + compose_scale = 1 + corners = [] + sizes = [] + blender = None + timelapser = None + for (idx, name) in enumerate(img_names): + full_img = cv.imread(name) + if not is_compose_scale_set: + if compose_megapix > 0: + compose_scale = min(1.0, np.sqrt(compose_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))) + is_compose_scale_set = True + compose_work_aspect = compose_scale / work_scale + warped_image_scale *= compose_work_aspect + warper = cv.PyRotationWarper(warp_type, warped_image_scale) + for i in range(0, len(img_names)): + cameras[i].focal *= compose_work_aspect + cameras[i].ppx *= compose_work_aspect + cameras[i].ppy *= compose_work_aspect + sz = (int(round(full_img_sizes[i][0] * compose_scale)), int(round(full_img_sizes[i][1] * compose_scale))) + K = cameras[i].K().astype(np.float32) + roi = warper.warpRoi(sz, K, cameras[i].R) + corners.append(roi[0:2]) + sizes.append(roi[2:4]) + if abs(compose_scale - 1) > 0.1: + img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale, interpolation=cv.INTER_LINEAR_EXACT) + else: + img = full_img + _img_size = (img.shape[1], img.shape[0]) + K = cameras[idx].K().astype(np.float32) + (corner, image_warped) = warper.warp(img, K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT) + mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8) + (p, mask_warped) = warper.warp(mask, K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT) + compensator.apply(idx, corners[idx], image_warped, mask_warped) + image_warped_s = image_warped.astype(np.int16) + dilated_mask = cv.dilate(masks_warped[idx], None) + seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT) + mask_warped = cv.bitwise_and(seam_mask, mask_warped) + if blender is None and (not timelapse): + blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) + dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes) + blend_width = np.sqrt(dst_sz[2] * dst_sz[3]) * blend_strength / 100 + if blend_width < 1: + blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) + elif blend_type == 'multiband': + blender = cv.detail_MultiBandBlender() + blender.setNumBands((np.log(blend_width) / np.log(2.0) - 1.0).astype(np.int32)) + elif blend_type == 'feather': + blender = cv.detail_FeatherBlender() + blender.setSharpness(1.0 / blend_width) + blender.prepare(dst_sz) + elif timelapser is None and timelapse: + timelapser = cv.detail.Timelapser_createDefault(timelapse_type) + timelapser.initialize(corners, sizes) + if timelapse: + ma_tones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8) + timelapser.process(image_warped_s, ma_tones, corners[idx]) + pos_s = img_names[idx].rfind('/') + if pos_s == -1: + fixed_file_name = 'fixed_' + img_names[idx] + else: + fixed_file_name = img_names[idx][:pos_s + 1] + 'fixed_' + img_names[idx][pos_s + 1:] + cv.imwrite(fixed_file_name, timelapser.getDst()) + else: + blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx]) + if not timelapse: + result = None + result_mask = None + (result, result_mask) = blender.blend(result, result_mask) + cv.imwrite(result_name, result) + zoom_x = 600.0 / result.shape[1] + dst = cv.normalize(src=result, dst=None, alpha=255.0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U) + dst = cv.resize(dst, dsize=None, fx=zoom_x, fy=zoom_x) + cv.imshow(result_name, dst) + cv.waitKey() + print('Done') +if __name__ == '__main__': + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/text_skewness_correction.py +"""""" +import numpy as np +import cv2 as cv +import sys +import argparse + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--image', default='imageTextR.png', help='path to input image file') + args = vars(parser.parse_args()) + image = cv.imread(cv.samples.findFile(args['image'])) + if image is None: + print("can't read image " + args['image']) + sys.exit(-1) + gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) + thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1] + erosion_size = 1 + element = cv.getStructuringElement(cv.MORPH_RECT, (2 * erosion_size + 1, 2 * erosion_size + 1), (erosion_size, erosion_size)) + thresh = cv.erode(thresh, element) + coords = cv.findNonZero(thresh) + angle = cv.minAreaRect(coords)[-1] + if angle > 45: + angle = angle - 90 + (h, w) = image.shape[:2] + center = (w // 2, h // 2) + M = cv.getRotationMatrix2D(center, angle, 1.0) + rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE) + cv.putText(rotated, 'Angle: {:.2f} degrees'.format(angle), (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) + print('[INFO] angle: {:.2f}'.format(angle)) + cv.imshow('Input', image) + cv.imshow('Rotated', rotated) + cv.waitKey(0) +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/texture_flow.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 'starry_night.jpg' + img = cv.imread(cv.samples.findFile(fn)) + if img is None: + print('Failed to load image file:', fn) + sys.exit(1) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + (h, w) = img.shape[:2] + eigen = cv.cornerEigenValsAndVecs(gray, 15, 3) + eigen = eigen.reshape(h, w, 3, 2) + flow = eigen[:, :, 2] + vis = img.copy() + vis[:] = (192 + np.uint32(vis)) / 2 + d = 12 + points = np.dstack(np.mgrid[d / 2:w:d, d / 2:h:d]).reshape(-1, 2) + for (x, y) in np.int32(points): + (vx, vy) = np.int32(flow[y, x] * d) + cv.line(vis, (x - vx, y - vy), (x + vx, y + vy), (0, 0, 0), 1, cv.LINE_AA) + cv.imshow('input', img) + cv.imshow('flow', vis) + cv.waitKey() + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/tracker.py +"""""" +from __future__ import print_function +import sys +import numpy as np +import cv2 as cv +import argparse +from video import create_capture, presets +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) + +class App(object): + + def __init__(self, args): + self.args = args + self.trackerAlgorithm = args.tracker_algo + self.tracker = self.createTracker() + + def createTracker(self): + if self.trackerAlgorithm == 'mil': + tracker = cv.TrackerMIL_create() + elif self.trackerAlgorithm == 'goturn': + params = cv.TrackerGOTURN_Params() + params.modelTxt = self.args.goturn + params.modelBin = self.args.goturn_model + tracker = cv.TrackerGOTURN_create(params) + elif self.trackerAlgorithm == 'dasiamrpn': + params = cv.TrackerDaSiamRPN_Params() + params.model = self.args.dasiamrpn_net + params.kernel_cls1 = self.args.dasiamrpn_kernel_cls1 + params.kernel_r1 = self.args.dasiamrpn_kernel_r1 + params.backend = args.backend + params.target = args.target + tracker = cv.TrackerDaSiamRPN_create(params) + elif self.trackerAlgorithm == 'nanotrack': + params = cv.TrackerNano_Params() + params.backbone = args.nanotrack_backbone + params.neckhead = args.nanotrack_headneck + params.backend = args.backend + params.target = args.target + tracker = cv.TrackerNano_create(params) + elif self.trackerAlgorithm == 'vittrack': + params = cv.TrackerVit_Params() + params.net = args.vittrack_net + params.tracking_score_threshold = args.tracking_score_threshold + params.backend = args.backend + params.target = args.target + tracker = cv.TrackerVit_create(params) + else: + sys.exit('Tracker {} is not recognized. Please use one of three available: mil, goturn, dasiamrpn, nanotrack.'.format(self.trackerAlgorithm)) + return tracker + + def initializeTracker(self, image): + while True: + print('==> Select object ROI for tracker ...') + bbox = cv.selectROI('tracking', image) + print('ROI: {}'.format(bbox)) + if bbox[2] <= 0 or bbox[3] <= 0: + sys.exit('ROI selection cancelled. Exiting...') + try: + self.tracker.init(image, bbox) + except Exception as e: + print('Unable to initialize tracker with requested bounding box. Is there any object?') + print(e) + print('Try again ...') + continue + return + + def run(self): + videoPath = self.args.input + print('Using video: {}'.format(videoPath)) + camera = create_capture(cv.samples.findFileOrKeep(videoPath), presets['cube']) + if not camera.isOpened(): + sys.exit("Can't open video stream: {}".format(videoPath)) + (ok, image) = camera.read() + if not ok: + sys.exit("Can't read first frame") + assert image is not None + cv.namedWindow('tracking') + self.initializeTracker(image) + print("==> Tracking is started. Press 'SPACE' to re-initialize tracker or 'ESC' for exit...") + while camera.isOpened(): + (ok, image) = camera.read() + if not ok: + print("Can't read frame") + break + (ok, newbox) = self.tracker.update(image) + if ok: + cv.rectangle(image, newbox, (200, 0, 0)) + cv.imshow('tracking', image) + k = cv.waitKey(1) + if k == 32: + self.initializeTracker(image) + if k == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + parser = argparse.ArgumentParser(description='Run tracker') + parser.add_argument('--input', type=str, default='vtest.avi', help='Path to video source') + parser.add_argument('--tracker_algo', type=str, default='nanotrack', help='One of available tracking algorithms: mil, goturn, dasiamrpn, nanotrack, vittrack') + parser.add_argument('--goturn', type=str, default='goturn.prototxt', help='Path to GOTURN architecture') + parser.add_argument('--goturn_model', type=str, default='goturn.caffemodel', help='Path to GOTERN model') + parser.add_argument('--dasiamrpn_net', type=str, default='dasiamrpn_model.onnx', help='Path to onnx model of DaSiamRPN net') + parser.add_argument('--dasiamrpn_kernel_r1', type=str, default='dasiamrpn_kernel_r1.onnx', help='Path to onnx model of DaSiamRPN kernel_r1') + parser.add_argument('--dasiamrpn_kernel_cls1', type=str, default='dasiamrpn_kernel_cls1.onnx', help='Path to onnx model of DaSiamRPN kernel_cls1') + parser.add_argument('--nanotrack_backbone', type=str, default='nanotrack_backbone_sim.onnx', help='Path to onnx model of NanoTrack backBone') + parser.add_argument('--nanotrack_headneck', type=str, default='nanotrack_head_sim.onnx', help='Path to onnx model of NanoTrack headNeck') + parser.add_argument('--vittrack_net', type=str, default='vitTracker.onnx', help='Path to onnx model of vittrack') + parser.add_argument('--tracking_score_threshold', type=float, help='Tracking score threshold. If a bbox of score >= 0.3, it is considered as found ') + parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, help="Choose one of computation backends: %d: automatically (by default), %d: Halide language (http://halide-lang.org/), %d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), %d: OpenCV implementation, %d: VKCOM, %d: CUDA" % backends) + parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: %d: CPU target (by default), %d: OpenCL, %d: OpenCL fp16 (half-float precision), %d: VPU, %d: VULKAN, %d: CUDA, %d: CUDA fp16 (half-float preprocess)' % targets) + args = parser.parse_args() + App(args).run() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/tst_scene_render.py +from __future__ import print_function +import numpy as np +import cv2 as cv +from numpy import pi, sin, cos +defaultSize = 512 + +class TestSceneRender: + + def __init__(self, bgImg=None, fgImg=None, deformation=False, speed=0.25, **params): + self.time = 0.0 + self.timeStep = 1.0 / 30.0 + self.foreground = fgImg + self.deformation = deformation + self.speed = speed + if bgImg is not None: + self.sceneBg = bgImg.copy() + else: + self.sceneBg = np.zeros((defaultSize, defaultSize, 3), np.uint8) + self.w = self.sceneBg.shape[0] + self.h = self.sceneBg.shape[1] + if fgImg is not None: + self.foreground = fgImg.copy() + self.center = self.currentCenter = (int(self.w / 2 - fgImg.shape[0] / 2), int(self.h / 2 - fgImg.shape[1] / 2)) + self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0]) + self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1]) + self.initialRect = np.array([(self.h / 2, self.w / 2), (self.h / 2, self.w / 2 + self.w / 10), (self.h / 2 + self.h / 10, self.w / 2 + self.w / 10), (self.h / 2 + self.h / 10, self.w / 2)]).astype(int) + self.currentRect = self.initialRect + + def getXOffset(self, time): + return int(self.xAmpl * cos(time * self.speed)) + + def getYOffset(self, time): + return int(self.yAmpl * sin(time * self.speed)) + + def setInitialRect(self, rect): + self.initialRect = rect + + def getRectInTime(self, time): + if self.foreground is not None: + tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) + (x0, y0) = tmp + (x1, y1) = tmp + self.foreground.shape[0:2] + return np.array([y0, x0, y1, x1]) + else: + (x0, y0) = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time))) + (x1, y1) = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time))) + return np.array([y0, x0, y1, x1]) + + def getCurrentRect(self): + if self.foreground is not None: + x0 = self.currentCenter[0] + y0 = self.currentCenter[1] + x1 = self.currentCenter[0] + self.foreground.shape[0] + y1 = self.currentCenter[1] + self.foreground.shape[1] + return np.array([y0, x0, y1, x1]) + else: + (x0, y0) = self.currentRect[0] + (x1, y1) = self.currentRect[2] + return np.array([x0, y0, x1, y1]) + + def getNextFrame(self): + img = self.sceneBg.copy() + if self.foreground is not None: + self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) + img[self.currentCenter[0]:self.currentCenter[0] + self.foreground.shape[0], self.currentCenter[1]:self.currentCenter[1] + self.foreground.shape[1]] = self.foreground + else: + self.currentRect = self.initialRect + int(30 * cos(self.time * self.speed) + 50 * sin(self.time * self.speed)) + if self.deformation: + self.currentRect[1:3] += int(self.h / 20 * cos(self.time)) + cv.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + self.time += self.timeStep + return img + + def resetTime(self): + self.time = 0.0 + +def main(): + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) + render = TestSceneRender(backGr, fgr) + while True: + img = render.getNextFrame() + cv.imshow('img', img) + ch = cv.waitKey(3) + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/turing.py +"""""" +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range +import numpy as np +import cv2 as cv +from common import draw_str +import getopt, sys +from itertools import count +help_message = '\nUSAGE: turing.py [-o ]\n\nPress ESC to stop.\n' + +def main(): + print(help_message) + (w, h) = (512, 512) + (args, _args_list) = getopt.getopt(sys.argv[1:], 'o:', []) + args = dict(args) + out = None + if '-o' in args: + fn = args['-o'] + out = cv.VideoWriter(args['-o'], cv.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False) + print('writing %s ...' % fn) + a = np.zeros((h, w), np.float32) + cv.randu(a, np.array([0]), np.array([1])) + + def process_scale(a_lods, lod): + d = a_lods[lod] - cv.pyrUp(a_lods[lod + 1]) + for _i in xrange(lod): + d = cv.pyrUp(d) + v = cv.GaussianBlur(d * d, (3, 3), 0) + return (np.sign(d), v) + scale_num = 6 + for frame_i in count(): + a_lods = [a] + for i in xrange(scale_num): + a_lods.append(cv.pyrDown(a_lods[-1])) + (ms, vs) = ([], []) + for i in xrange(1, scale_num): + (m, v) = process_scale(a_lods, i) + ms.append(m) + vs.append(v) + mi = np.argmin(vs, 0) + a += np.choose(mi, ms) * 0.025 + a = (a - a.min()) / a.ptp() + if out: + out.write(a) + vis = a.copy() + draw_str(vis, (20, 20), 'frame %d' % frame_i) + cv.imshow('a', vis) + if cv.waitKey(5) == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse + +def Hist_and_Backproj(val): + bins = val + histSize = max(bins, 2) + ranges = [0, 180] + hist = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False) + cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) + backproj = cv.calcBackProject([hue], [0], hist, ranges, scale=1) + cv.imshow('BackProj', backproj) + w = 400 + h = 400 + bin_w = int(round(w / histSize)) + histImg = np.zeros((h, w, 3), dtype=np.uint8) + for i in range(bins): + cv.rectangle(histImg, (i * bin_w, h), ((i + 1) * bin_w, h - int(np.round(hist[i] * h / 255.0))), (0, 0, 255), cv.FILLED) + cv.imshow('Histogram', histImg) +parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.') +parser.add_argument('--input', help='Path to input image.', default='home.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV) +ch = (0, 0) +hue = np.empty(hsv.shape, hsv.dtype) +cv.mixChannels([hsv], [hue], ch) +window_image = 'Source image' +cv.namedWindow(window_image) +bins = 25 +cv.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj) +Hist_and_Backproj(bins) +cv.imshow(window_image, src) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +low = 20 +up = 20 + +def callback_low(val): + global low + low = val + +def callback_up(val): + global up + up = val + +def pickPoint(event, x, y, flags, param): + if event != cv.EVENT_LBUTTONDOWN: + return + seed = (x, y) + newMaskVal = 255 + newVal = (120, 120, 120) + connectivity = 8 + flags = connectivity + (newMaskVal << 8) + cv.FLOODFILL_FIXED_RANGE + cv.FLOODFILL_MASK_ONLY + mask2 = np.zeros((src.shape[0] + 2, src.shape[1] + 2), dtype=np.uint8) + print('low:', low, 'up:', up) + cv.floodFill(src, mask2, seed, newVal, (low, low, low), (up, up, up), flags) + mask = mask2[1:-1, 1:-1] + cv.imshow('Mask', mask) + Hist_and_Backproj(mask) + +def Hist_and_Backproj(mask): + h_bins = 30 + s_bins = 32 + histSize = [h_bins, s_bins] + h_range = [0, 180] + s_range = [0, 256] + ranges = h_range + s_range + channels = [0, 1] + hist = cv.calcHist([hsv], channels, mask, histSize, ranges, accumulate=False) + cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) + backproj = cv.calcBackProject([hsv], channels, hist, ranges, scale=1) + cv.imshow('BackProj', backproj) +parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.') +parser.add_argument('--input', help='Path to input image.', default='home.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV) +window_image = 'Source image' +cv.namedWindow(window_image) +cv.imshow(window_image, src) +cv.createTrackbar('Low thresh', window_image, low, 255, callback_low) +cv.createTrackbar('High thresh', window_image, up, 255, callback_up) +cv.setMouseCallback(window_image, pickPoint) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +bgr_planes = cv.split(src) +histSize = 256 +histRange = (0, 256) +accumulate = False +b_hist = cv.calcHist(bgr_planes, [0], None, [histSize], histRange, accumulate=accumulate) +g_hist = cv.calcHist(bgr_planes, [1], None, [histSize], histRange, accumulate=accumulate) +r_hist = cv.calcHist(bgr_planes, [2], None, [histSize], histRange, accumulate=accumulate) +hist_w = 512 +hist_h = 400 +bin_w = int(round(hist_w / histSize)) +histImage = np.zeros((hist_h, hist_w, 3), dtype=np.uint8) +cv.normalize(b_hist, b_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX) +cv.normalize(g_hist, g_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX) +cv.normalize(r_hist, r_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX) +for i in range(1, histSize): + cv.line(histImage, (bin_w * (i - 1), hist_h - int(b_hist[i - 1])), (bin_w * i, hist_h - int(b_hist[i])), (255, 0, 0), thickness=2) + cv.line(histImage, (bin_w * (i - 1), hist_h - int(g_hist[i - 1])), (bin_w * i, hist_h - int(g_hist[i])), (0, 255, 0), thickness=2) + cv.line(histImage, (bin_w * (i - 1), hist_h - int(r_hist[i - 1])), (bin_w * i, hist_h - int(r_hist[i])), (0, 0, 255), thickness=2) +cv.imshow('Source image', src) +cv.imshow('calcHist Demo', histImage) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Histogram Comparison tutorial.') +parser.add_argument('--input1', help='Path to input image 1.') +parser.add_argument('--input2', help='Path to input image 2.') +parser.add_argument('--input3', help='Path to input image 3.') +args = parser.parse_args() +src_base = cv.imread(args.input1) +src_test1 = cv.imread(args.input2) +src_test2 = cv.imread(args.input3) +if src_base is None or src_test1 is None or src_test2 is None: + print('Could not open or find the images!') + exit(0) +hsv_base = cv.cvtColor(src_base, cv.COLOR_BGR2HSV) +hsv_test1 = cv.cvtColor(src_test1, cv.COLOR_BGR2HSV) +hsv_test2 = cv.cvtColor(src_test2, cv.COLOR_BGR2HSV) +hsv_half_down = hsv_base[hsv_base.shape[0] // 2:, :] +h_bins = 50 +s_bins = 60 +histSize = [h_bins, s_bins] +h_ranges = [0, 180] +s_ranges = [0, 256] +ranges = h_ranges + s_ranges +channels = [0, 1] +hist_base = cv.calcHist([hsv_base], channels, None, histSize, ranges, accumulate=False) +cv.normalize(hist_base, hist_base, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) +hist_half_down = cv.calcHist([hsv_half_down], channels, None, histSize, ranges, accumulate=False) +cv.normalize(hist_half_down, hist_half_down, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) +hist_test1 = cv.calcHist([hsv_test1], channels, None, histSize, ranges, accumulate=False) +cv.normalize(hist_test1, hist_test1, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) +hist_test2 = cv.calcHist([hsv_test2], channels, None, histSize, ranges, accumulate=False) +cv.normalize(hist_test2, hist_test2, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) +for compare_method in range(4): + base_base = cv.compareHist(hist_base, hist_base, compare_method) + base_half = cv.compareHist(hist_base, hist_half_down, compare_method) + base_test1 = cv.compareHist(hist_base, hist_test1, compare_method) + base_test2 = cv.compareHist(hist_base, hist_test2, compare_method) + print('Method:', compare_method, 'Perfect, Base-Half, Base-Test(1), Base-Test(2) :', base_base, '/', base_half, '/', base_test1, '/', base_test2) + +# File: opencv-master/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py +from __future__ import print_function +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +dst = cv.equalizeHist(src) +cv.imshow('Source image', src) +cv.imshow('Equalized Image', dst) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py +"""""" +import sys +import cv2 as cv +import numpy as np + +def main(argv): + window_name = 'filter2D Demo' + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) + if src is None: + print('Error opening image!') + print('Usage: filter2D.py [image_name -- default lena.jpg] \n') + return -1 + ddepth = -1 + ind = 0 + while True: + kernel_size = 3 + 2 * (ind % 5) + kernel = np.ones((kernel_size, kernel_size), dtype=np.float32) + kernel /= kernel_size * kernel_size + dst = cv.filter2D(src, ddepth, kernel) + cv.imshow(window_name, dst) + c = cv.waitKey(500) + if c == 27: + break + ind += 1 + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py +import sys +import cv2 as cv +import numpy as np + +def main(argv): + default_file = 'smarties.png' + filename = argv[0] if len(argv) > 0 else default_file + src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_COLOR) + if src is None: + print('Error opening image!') + print('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n') + return -1 + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + gray = cv.medianBlur(gray, 5) + rows = gray.shape[0] + circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8, param1=100, param2=30, minRadius=1, maxRadius=30) + if circles is not None: + circles = np.uint16(np.around(circles)) + for i in circles[0, :]: + center = (i[0], i[1]) + cv.circle(src, center, 1, (0, 100, 100), 3) + radius = i[2] + cv.circle(src, center, radius, (255, 0, 255), 3) + cv.imshow('detected circles', src) + cv.waitKey(0) + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py +"""""" +import sys +import math +import cv2 as cv +import numpy as np + +def main(argv): + default_file = 'sudoku.png' + filename = argv[0] if len(argv) > 0 else default_file + src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE) + if src is None: + print('Error opening image!') + print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n') + return -1 + dst = cv.Canny(src, 50, 200, None, 3) + cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) + cdstP = np.copy(cdst) + lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0) + if lines is not None: + for i in range(0, len(lines)): + rho = lines[i][0][0] + theta = lines[i][0][1] + a = math.cos(theta) + b = math.sin(theta) + x0 = a * rho + y0 = b * rho + pt1 = (int(x0 + 1000 * -b), int(y0 + 1000 * a)) + pt2 = (int(x0 - 1000 * -b), int(y0 - 1000 * a)) + cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA) + linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10) + if linesP is not None: + for i in range(0, len(linesP)): + l = linesP[i][0] + cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv.LINE_AA) + cv.imshow('Source', src) + cv.imshow('Detected Lines (in red) - Standard Hough Line Transform', cdst) + cv.imshow('Detected Lines (in red) - Probabilistic Line Transform', cdstP) + cv.waitKey() + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py +"""""" +import sys +import cv2 as cv + +def main(argv): + ddepth = cv.CV_16S + kernel_size = 3 + window_name = 'Laplace Demo' + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) + if src is None: + print('Error opening image') + print('Program Arguments: [image_name -- default lena.jpg]') + return -1 + src = cv.GaussianBlur(src, (3, 3), 0) + src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) + dst = cv.Laplacian(src_gray, ddepth, ksize=kernel_size) + abs_dst = cv.convertScaleAbs(dst) + cv.imshow(window_name, abs_dst) + cv.waitKey(0) + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py +"""""" +import sys +from random import randint +import cv2 as cv + +def main(argv): + borderType = cv.BORDER_CONSTANT + window_name = 'copyMakeBorder Demo' + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) + if src is None: + print('Error opening image!') + print('Usage: copy_make_border.py [image_name -- default lena.jpg] \n') + return -1 + print("\n\t \t copyMakeBorder Demo: \n\t -------------------- \n ** Press 'c' to set the border to a random constant value \n ** Press 'r' to set the border to be replicated \n ** Press 'ESC' to exit the program ") + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) + top = int(0.05 * src.shape[0]) + bottom = top + left = int(0.05 * src.shape[1]) + right = left + while 1: + value = [randint(0, 255), randint(0, 255), randint(0, 255)] + dst = cv.copyMakeBorder(src, top, bottom, left, right, borderType, None, value) + cv.imshow(window_name, dst) + c = cv.waitKey(500) + if c == 27: + break + elif c == 99: + borderType = cv.BORDER_CONSTANT + elif c == 114: + borderType = cv.BORDER_REPLICATE + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py +"""""" +import sys +import cv2 as cv + +def main(argv): + window_name = 'Sobel Demo - Simple Edge Detector' + scale = 1 + delta = 0 + ddepth = cv.CV_16S + if len(argv) < 1: + print('Not enough parameters') + print('Usage:\nmorph_lines_detection.py < path_to_image >') + return -1 + src = cv.imread(argv[0], cv.IMREAD_COLOR) + if src is None: + print('Error opening image: ' + argv[0]) + return -1 + src = cv.GaussianBlur(src, (3, 3), 0) + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + grad_x = cv.Sobel(gray, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) + grad_y = cv.Sobel(gray, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) + abs_grad_x = cv.convertScaleAbs(grad_x) + abs_grad_y = cv.convertScaleAbs(grad_y) + grad = cv.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0) + cv.imshow(window_name, grad) + cv.waitKey(0) + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py +from __future__ import print_function +import cv2 as cv +import argparse +max_lowThreshold = 100 +window_name = 'Edge Map' +title_trackbar = 'Min Threshold:' +ratio = 3 +kernel_size = 3 + +def CannyThreshold(val): + low_threshold = val + img_blur = cv.blur(src_gray, (3, 3)) + detected_edges = cv.Canny(img_blur, low_threshold, low_threshold * ratio, kernel_size) + mask = detected_edges != 0 + dst = src * mask[:, :, None].astype(src.dtype) + cv.imshow(window_name, dst) +parser = argparse.ArgumentParser(description='Code for Canny Edge Detector tutorial.') +parser.add_argument('--input', help='Path to input image.', default='fruits.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image: ', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +cv.namedWindow(window_name) +cv.createTrackbar(title_trackbar, window_name, 0, max_lowThreshold, CannyThreshold) +CannyThreshold(0) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) +parser = argparse.ArgumentParser(description='Code for Image Segmentation with Distance Transform and Watershed Algorithm. Sample code showing how to segment overlapping objects using Laplacian filtering, in addition to Watershed and Distance Transformation') +parser.add_argument('--input', help='Path to input image.', default='cards.png') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +cv.imshow('Source Image', src) +src[np.all(src == 255, axis=2)] = 0 +cv.imshow('Black Background Image', src) +kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32) +imgLaplacian = cv.filter2D(src, cv.CV_32F, kernel) +sharp = np.float32(src) +imgResult = sharp - imgLaplacian +imgResult = np.clip(imgResult, 0, 255) +imgResult = imgResult.astype('uint8') +imgLaplacian = np.clip(imgLaplacian, 0, 255) +imgLaplacian = np.uint8(imgLaplacian) +cv.imshow('New Sharped Image', imgResult) +bw = cv.cvtColor(imgResult, cv.COLOR_BGR2GRAY) +(_, bw) = cv.threshold(bw, 40, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) +cv.imshow('Binary Image', bw) +dist = cv.distanceTransform(bw, cv.DIST_L2, 3) +cv.normalize(dist, dist, 0, 1.0, cv.NORM_MINMAX) +cv.imshow('Distance Transform Image', dist) +(_, dist) = cv.threshold(dist, 0.4, 1.0, cv.THRESH_BINARY) +kernel1 = np.ones((3, 3), dtype=np.uint8) +dist = cv.dilate(dist, kernel1) +cv.imshow('Peaks', dist) +dist_8u = dist.astype('uint8') +(contours, _) = cv.findContours(dist_8u, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) +markers = np.zeros(dist.shape, dtype=np.int32) +for i in range(len(contours)): + cv.drawContours(markers, contours, i, i + 1, -1) +cv.circle(markers, (5, 5), 3, (255, 255, 255), -1) +markers_8u = (markers * 10).astype('uint8') +cv.imshow('Markers', markers_8u) +cv.watershed(imgResult, markers) +mark = markers.astype('uint8') +mark = cv.bitwise_not(mark) +colors = [] +for contour in contours: + colors.append((rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256))) +dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8) +for i in range(markers.shape[0]): + for j in range(markers.shape[1]): + index = markers[i, j] + if index > 0 and index <= len(contours): + dst[i, j, :] = colors[index - 1] +cv.imshow('Final Result', dst) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse + +def update_map(ind, map_x, map_y): + if ind == 0: + for i in range(map_x.shape[0]): + for j in range(map_x.shape[1]): + if j > map_x.shape[1] * 0.25 and j < map_x.shape[1] * 0.75 and (i > map_x.shape[0] * 0.25) and (i < map_x.shape[0] * 0.75): + map_x[i, j] = 2 * (j - map_x.shape[1] * 0.25) + 0.5 + map_y[i, j] = 2 * (i - map_y.shape[0] * 0.25) + 0.5 + else: + map_x[i, j] = 0 + map_y[i, j] = 0 + elif ind == 1: + for i in range(map_x.shape[0]): + map_x[i, :] = [x for x in range(map_x.shape[1])] + for j in range(map_y.shape[1]): + map_y[:, j] = [map_y.shape[0] - y for y in range(map_y.shape[0])] + elif ind == 2: + for i in range(map_x.shape[0]): + map_x[i, :] = [map_x.shape[1] - x for x in range(map_x.shape[1])] + for j in range(map_y.shape[1]): + map_y[:, j] = [y for y in range(map_y.shape[0])] + elif ind == 3: + for i in range(map_x.shape[0]): + map_x[i, :] = [map_x.shape[1] - x for x in range(map_x.shape[1])] + for j in range(map_y.shape[1]): + map_y[:, j] = [map_y.shape[0] - y for y in range(map_y.shape[0])] +parser = argparse.ArgumentParser(description='Code for Remapping tutorial.') +parser.add_argument('--input', help='Path to input image.', default='chicky_512.png') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_COLOR) +if src is None: + print('Could not open or find the image: ', args.input) + exit(0) +map_x = np.zeros((src.shape[0], src.shape[1]), dtype=np.float32) +map_y = np.zeros((src.shape[0], src.shape[1]), dtype=np.float32) +window_name = 'Remap demo' +cv.namedWindow(window_name) +ind = 0 +while True: + update_map(ind, map_x, map_y) + ind = (ind + 1) % 4 + dst = cv.remap(src, map_x, map_y, cv.INTER_LINEAR) + cv.imshow(window_name, dst) + c = cv.waitKey(1000) + if c == 27: + break + +# File: opencv-master/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Affine Transformations tutorial.') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +srcTri = np.array([[0, 0], [src.shape[1] - 1, 0], [0, src.shape[0] - 1]]).astype(np.float32) +dstTri = np.array([[0, src.shape[1] * 0.33], [src.shape[1] * 0.85, src.shape[0] * 0.25], [src.shape[1] * 0.15, src.shape[0] * 0.7]]).astype(np.float32) +warp_mat = cv.getAffineTransform(srcTri, dstTri) +warp_dst = cv.warpAffine(src, warp_mat, (src.shape[1], src.shape[0])) +center = (warp_dst.shape[1] // 2, warp_dst.shape[0] // 2) +angle = -50 +scale = 0.6 +rot_mat = cv.getRotationMatrix2D(center, angle, scale) +warp_rotate_dst = cv.warpAffine(warp_dst, rot_mat, (warp_dst.shape[1], warp_dst.shape[0])) +cv.imshow('Source image', src) +cv.imshow('Warp', warp_dst) +cv.imshow('Warp + Rotate', warp_rotate_dst) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) + +def thresh_callback(val): + threshold = val + canny_output = cv.Canny(src_gray, threshold, threshold * 2) + (contours, _) = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + contours_poly = [None] * len(contours) + boundRect = [None] * len(contours) + centers = [None] * len(contours) + radius = [None] * len(contours) + for (i, c) in enumerate(contours): + contours_poly[i] = cv.approxPolyDP(c, 3, True) + boundRect[i] = cv.boundingRect(contours_poly[i]) + (centers[i], radius[i]) = cv.minEnclosingCircle(contours_poly[i]) + drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) + for i in range(len(contours)): + color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) + cv.drawContours(drawing, contours_poly, i, color) + cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), (int(boundRect[i][0] + boundRect[i][2]), int(boundRect[i][1] + boundRect[i][3])), color, 2) + cv.circle(drawing, (int(centers[i][0]), int(centers[i][1])), int(radius[i]), color, 2) + cv.imshow('Contours', drawing) +parser = argparse.ArgumentParser(description='Code for Creating Bounding boxes and circles for contours tutorial.') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +src_gray = cv.blur(src_gray, (3, 3)) +source_window = 'Source' +cv.namedWindow(source_window) +cv.imshow(source_window, src) +max_thresh = 255 +thresh = 100 +cv.createTrackbar('Canny thresh:', source_window, thresh, max_thresh, thresh_callback) +thresh_callback(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) + +def thresh_callback(val): + threshold = val + canny_output = cv.Canny(src_gray, threshold, threshold * 2) + (contours, _) = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + minRect = [None] * len(contours) + minEllipse = [None] * len(contours) + for (i, c) in enumerate(contours): + minRect[i] = cv.minAreaRect(c) + if c.shape[0] > 5: + minEllipse[i] = cv.fitEllipse(c) + drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) + for (i, c) in enumerate(contours): + color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) + cv.drawContours(drawing, contours, i, color) + if c.shape[0] > 5: + cv.ellipse(drawing, minEllipse[i], color, 2) + box = cv.boxPoints(minRect[i]) + box = np.intp(box) + cv.drawContours(drawing, [box], 0, color) + cv.imshow('Contours', drawing) +parser = argparse.ArgumentParser(description='Code for Creating Bounding rotated boxes and ellipses for contours tutorial.') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +src_gray = cv.blur(src_gray, (3, 3)) +source_window = 'Source' +cv.namedWindow(source_window) +cv.imshow(source_window, src) +max_thresh = 255 +thresh = 100 +cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) +thresh_callback(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) + +def thresh_callback(val): + threshold = val + canny_output = cv.Canny(src_gray, threshold, threshold * 2) + (contours, hierarchy) = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) + for i in range(len(contours)): + color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) + cv.drawContours(drawing, contours, i, color, 2, cv.LINE_8, hierarchy, 0) + cv.imshow('Contours', drawing) +parser = argparse.ArgumentParser(description='Code for Finding contours in your image tutorial.') +parser.add_argument('--input', help='Path to input image.', default='HappyFish.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +src_gray = cv.blur(src_gray, (3, 3)) +source_window = 'Source' +cv.namedWindow(source_window) +cv.imshow(source_window, src) +max_thresh = 255 +thresh = 100 +cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) +thresh_callback(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) + +def thresh_callback(val): + threshold = val + canny_output = cv.Canny(src_gray, threshold, threshold * 2) + (contours, _) = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + hull_list = [] + for i in range(len(contours)): + hull = cv.convexHull(contours[i]) + hull_list.append(hull) + drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) + for i in range(len(contours)): + color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) + cv.drawContours(drawing, contours, i, color) + cv.drawContours(drawing, hull_list, i, color) + cv.imshow('Contours', drawing) +parser = argparse.ArgumentParser(description='Code for Convex Hull tutorial.') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +src_gray = cv.blur(src_gray, (3, 3)) +source_window = 'Source' +cv.namedWindow(source_window) +cv.imshow(source_window, src) +max_thresh = 255 +thresh = 100 +cv.createTrackbar('Canny thresh:', source_window, thresh, max_thresh, thresh_callback) +thresh_callback(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +import random as rng +rng.seed(12345) + +def thresh_callback(val): + threshold = val + canny_output = cv.Canny(src_gray, threshold, threshold * 2) + (contours, _) = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + mu = [None] * len(contours) + for i in range(len(contours)): + mu[i] = cv.moments(contours[i]) + mc = [None] * len(contours) + for i in range(len(contours)): + mc[i] = (mu[i]['m10'] / (mu[i]['m00'] + 1e-05), mu[i]['m01'] / (mu[i]['m00'] + 1e-05)) + drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8) + for i in range(len(contours)): + color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)) + cv.drawContours(drawing, contours, i, color, 2) + cv.circle(drawing, (int(mc[i][0]), int(mc[i][1])), 4, color, -1) + cv.imshow('Contours', drawing) + for i in range(len(contours)): + print(' * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f' % (i, mu[i]['m00'], cv.contourArea(contours[i]), cv.arcLength(contours[i], True))) +parser = argparse.ArgumentParser(description='Code for Image Moments tutorial.') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +src_gray = cv.blur(src_gray, (3, 3)) +source_window = 'Source' +cv.namedWindow(source_window) +cv.imshow(source_window, src) +max_thresh = 255 +thresh = 100 +cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback) +thresh_callback(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +source_window = 'Image' +maxTrackbar = 25 +rng.seed(12345) + +def goodFeaturesToTrack_Demo(val): + maxCorners = max(val, 1) + qualityLevel = 0.01 + minDistance = 10 + blockSize = 3 + gradientSize = 3 + useHarrisDetector = False + k = 0.04 + copy = np.copy(src) + corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k) + print('** Number of corners detected:', corners.shape[0]) + radius = 4 + for i in range(corners.shape[0]): + cv.circle(copy, (int(corners[i, 0, 0]), int(corners[i, 0, 1])), radius, (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)), cv.FILLED) + cv.namedWindow(source_window) + cv.imshow(source_window, copy) + winSize = (5, 5) + zeroZone = (-1, -1) + criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 40, 0.001) + corners = cv.cornerSubPix(src_gray, corners, winSize, zeroZone, criteria) + for i in range(corners.shape[0]): + print(' -- Refined Corner [', i, '] (', corners[i, 0, 0], ',', corners[i, 0, 1], ')') +parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') +parser.add_argument('--input', help='Path to input image.', default='pic3.png') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +cv.namedWindow(source_window) +maxCorners = 10 +cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo) +cv.imshow(source_window, src) +goodFeaturesToTrack_Demo(maxCorners) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +myHarris_window = 'My Harris corner detector' +myShiTomasi_window = 'My Shi Tomasi corner detector' +myHarris_qualityLevel = 50 +myShiTomasi_qualityLevel = 50 +max_qualityLevel = 100 +rng.seed(12345) + +def myHarris_function(val): + myHarris_copy = np.copy(src) + myHarris_qualityLevel = max(val, 1) + for i in range(src_gray.shape[0]): + for j in range(src_gray.shape[1]): + if Mc[i, j] > myHarris_minVal + (myHarris_maxVal - myHarris_minVal) * myHarris_qualityLevel / max_qualityLevel: + cv.circle(myHarris_copy, (j, i), 4, (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)), cv.FILLED) + cv.imshow(myHarris_window, myHarris_copy) + +def myShiTomasi_function(val): + myShiTomasi_copy = np.copy(src) + myShiTomasi_qualityLevel = max(val, 1) + for i in range(src_gray.shape[0]): + for j in range(src_gray.shape[1]): + if myShiTomasi_dst[i, j] > myShiTomasi_minVal + (myShiTomasi_maxVal - myShiTomasi_minVal) * myShiTomasi_qualityLevel / max_qualityLevel: + cv.circle(myShiTomasi_copy, (j, i), 4, (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)), cv.FILLED) + cv.imshow(myShiTomasi_window, myShiTomasi_copy) +parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.') +parser.add_argument('--input', help='Path to input image.', default='building.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +blockSize = 3 +apertureSize = 3 +myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize) +Mc = np.empty(src_gray.shape, dtype=np.float32) +for i in range(src_gray.shape[0]): + for j in range(src_gray.shape[1]): + lambda_1 = myHarris_dst[i, j, 0] + lambda_2 = myHarris_dst[i, j, 1] + Mc[i, j] = lambda_1 * lambda_2 - 0.04 * pow(lambda_1 + lambda_2, 2) +(myHarris_minVal, myHarris_maxVal, _, _) = cv.minMaxLoc(Mc) +cv.namedWindow(myHarris_window) +cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function) +myHarris_function(myHarris_qualityLevel) +myShiTomasi_dst = cv.cornerMinEigenVal(src_gray, blockSize, apertureSize) +(myShiTomasi_minVal, myShiTomasi_maxVal, _, _) = cv.minMaxLoc(myShiTomasi_dst) +cv.namedWindow(myShiTomasi_window) +cv.createTrackbar('Quality Level:', myShiTomasi_window, myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function) +myShiTomasi_function(myShiTomasi_qualityLevel) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +import random as rng +source_window = 'Image' +maxTrackbar = 100 +rng.seed(12345) + +def goodFeaturesToTrack_Demo(val): + maxCorners = max(val, 1) + qualityLevel = 0.01 + minDistance = 10 + blockSize = 3 + gradientSize = 3 + useHarrisDetector = False + k = 0.04 + copy = np.copy(src) + corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k) + print('** Number of corners detected:', corners.shape[0]) + radius = 4 + for i in range(corners.shape[0]): + cv.circle(copy, (int(corners[i, 0, 0]), int(corners[i, 0, 1])), radius, (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)), cv.FILLED) + cv.namedWindow(source_window) + cv.imshow(source_window, copy) +parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') +parser.add_argument('--input', help='Path to input image.', default='pic3.png') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +cv.namedWindow(source_window) +maxCorners = 23 +cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo) +cv.imshow(source_window, src) +goodFeaturesToTrack_Demo(maxCorners) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +source_window = 'Source image' +corners_window = 'Corners detected' +max_thresh = 255 + +def cornerHarris_demo(val): + thresh = val + blockSize = 2 + apertureSize = 3 + k = 0.04 + dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k) + dst_norm = np.empty(dst.shape, dtype=np.float32) + cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) + dst_norm_scaled = cv.convertScaleAbs(dst_norm) + for i in range(dst_norm.shape[0]): + for j in range(dst_norm.shape[1]): + if int(dst_norm[i, j]) > thresh: + cv.circle(dst_norm_scaled, (j, i), 5, 0, 2) + cv.namedWindow(corners_window) + cv.imshow(corners_window, dst_norm_scaled) +parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.') +parser.add_argument('--input', help='Path to input image.', default='building.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +cv.namedWindow(source_window) +thresh = 200 +cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo) +cv.imshow(source_window, src) +cornerHarris_demo(thresh) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/core/AddingImages/adding_images.py +from __future__ import print_function +import cv2 as cv +alpha = 0.5 +try: + raw_input +except NameError: + raw_input = input +print(' Simple Linear Blender\n-----------------------\n* Enter alpha [0.0-1.0]: ') +input_alpha = float(raw_input().strip()) +if 0 <= alpha <= 1: + alpha = input_alpha +src1 = cv.imread(cv.samples.findFile('LinuxLogo.jpg')) +src2 = cv.imread(cv.samples.findFile('WindowsLogo.jpg')) +if src1 is None: + print('Error loading src1') + exit(-1) +elif src2 is None: + print('Error loading src2') + exit(-1) +beta = 1.0 - alpha +dst = cv.addWeighted(src1, alpha, src2, beta, 0.0) +cv.imshow('dst', dst) +cv.waitKey(0) +cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py +from __future__ import print_function +import sys +import cv2 as cv +import numpy as np + +def print_help(): + print("\n This program demonstrated the use of the discrete Fourier transform (DFT).\n The dft of an image is taken and it's power spectrum is displayed.\n Usage:\n discrete_fourier_transform.py [image_name -- default lena.jpg]") + +def main(argv): + print_help() + filename = argv[0] if len(argv) > 0 else 'lena.jpg' + I = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE) + if I is None: + print('Error opening image') + return -1 + (rows, cols) = I.shape + m = cv.getOptimalDFTSize(rows) + n = cv.getOptimalDFTSize(cols) + padded = cv.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0]) + planes = [np.float32(padded), np.zeros(padded.shape, np.float32)] + complexI = cv.merge(planes) + cv.dft(complexI, complexI) + cv.split(complexI, planes) + cv.magnitude(planes[0], planes[1], planes[0]) + magI = planes[0] + matOfOnes = np.ones(magI.shape, dtype=magI.dtype) + cv.add(matOfOnes, magI, magI) + cv.log(magI, magI) + (magI_rows, magI_cols) = magI.shape + magI = magI[0:magI_rows & -2, 0:magI_cols & -2] + cx = int(magI_rows / 2) + cy = int(magI_cols / 2) + q0 = magI[0:cx, 0:cy] + q1 = magI[cx:cx + cx, 0:cy] + q2 = magI[0:cx, cy:cy + cy] + q3 = magI[cx:cx + cx, cy:cy + cy] + tmp = np.copy(q0) + magI[0:cx, 0:cy] = q3 + magI[cx:cx + cx, cy:cy + cy] = tmp + tmp = np.copy(q1) + magI[cx:cx + cx, 0:cy] = q2 + magI[0:cx, cy:cy + cy] = tmp + cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX) + cv.imshow('Input Image', I) + cv.imshow('spectrum magnitude', magI) + cv.waitKey() +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/core/file_input_output/file_input_output.py +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +def help(filename): + print('\n {0} shows the usage of the OpenCV serialization functionality. \n\n\n usage:\n\n python3 {0} outputfile.yml.gz\n\n\n The output file may be either in XML, YAML or JSON. You can even compress it\n\n by specifying this in its extension like xml.gz yaml.gz etc... With\n\n FileStorage you can serialize objects in OpenCV.\n\n\n For example: - create a class and have it serialized\n\n - use it to read and write matrices.\n\n '.format(filename)) + +class MyData: + A = 97 + X = np.pi + name = 'mydata1234' + + def __repr__(self): + s = '{ name = ' + self.name + ', X = ' + str(self.X) + s = s + ', A = ' + str(self.A) + '}' + return s + + def write(self, fs, name): + fs.startWriteStruct(name, cv.FileNode_MAP | cv.FileNode_FLOW) + fs.write('A', self.A) + fs.write('X', self.X) + fs.write('name', self.name) + fs.endWriteStruct() + + def read(self, node): + if not node.empty(): + self.A = int(node.getNode('A').real()) + self.X = node.getNode('X').real() + self.name = node.getNode('name').string() + else: + self.A = self.X = 0 + self.name = '' + +def main(argv): + if len(argv) != 2: + help(argv[0]) + exit(1) + R = np.eye(3, 3) + T = np.zeros((3, 1)) + m = MyData() + filename = argv[1] + s = cv.FileStorage(filename, cv.FileStorage_WRITE) + s.write('iterationNr', 100) + s.startWriteStruct('strings', cv.FileNode_SEQ) + for elem in ['image1.jpg', 'Awesomeness', '../data/baboon.jpg']: + s.write('', elem) + s.endWriteStruct() + s.startWriteStruct('Mapping', cv.FileNode_MAP) + s.write('One', 1) + s.write('Two', 2) + s.endWriteStruct() + s.write('R_MAT', R) + s.write('T_MAT', T) + m.write(s, 'MyData') + s.release() + print('Write Done.') + print('\nReading: ') + s = cv.FileStorage() + s.open(filename, cv.FileStorage_READ) + n = s.getNode('iterationNr') + itNr = int(n.real()) + print(itNr) + if not s.isOpened(): + print('Failed to open ', filename, file=sys.stderr) + help(argv[0]) + exit(1) + n = s.getNode('strings') + if not n.isSeq(): + print('strings is not a sequence! FAIL', file=sys.stderr) + exit(1) + for i in range(n.size()): + print(n.at(i).string()) + n = s.getNode('Mapping') + print('Two', int(n.getNode('Two').real()), '; ') + print('One', int(n.getNode('One').real()), '\n') + R = s.getNode('R_MAT').mat() + T = s.getNode('T_MAT').mat() + m.read(s.getNode('MyData')) + print('\nR =', R) + print('T =', T, '\n') + print('MyData =', '\n', m, '\n') + print('Attempt to read NonExisting (should initialize the data structure', 'with its default).') + m.read(s.getNode('NonExisting')) + print('\nNonExisting =', '\n', m) + print('\nTip: Open up', filename, 'with a text editor to see the serialized data.') +if __name__ == '__main__': + main(sys.argv) + +# File: opencv-master/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py +from __future__ import print_function +import sys +import time +import numpy as np +import cv2 as cv + +def is_grayscale(my_image): + return len(my_image.shape) < 3 + +def saturated(sum_value): + if sum_value > 255: + sum_value = 255 + if sum_value < 0: + sum_value = 0 + return sum_value + +def sharpen(my_image): + if is_grayscale(my_image): + (height, width) = my_image.shape + else: + my_image = cv.cvtColor(my_image, cv.CV_8U) + (height, width, n_channels) = my_image.shape + result = np.zeros(my_image.shape, my_image.dtype) + for j in range(1, height - 1): + for i in range(1, width - 1): + if is_grayscale(my_image): + sum_value = 5 * my_image[j, i] - my_image[j + 1, i] - my_image[j - 1, i] - my_image[j, i + 1] - my_image[j, i - 1] + result[j, i] = saturated(sum_value) + else: + for k in range(0, n_channels): + sum_value = 5 * my_image[j, i, k] - my_image[j + 1, i, k] - my_image[j - 1, i, k] - my_image[j, i + 1, k] - my_image[j, i - 1, k] + result[j, i, k] = saturated(sum_value) + return result + +def main(argv): + filename = 'lena.jpg' + img_codec = cv.IMREAD_COLOR + if argv: + filename = sys.argv[1] + if len(argv) >= 2 and sys.argv[2] == 'G': + img_codec = cv.IMREAD_GRAYSCALE + src = cv.imread(cv.samples.findFile(filename), img_codec) + if src is None: + print("Can't open image [" + filename + ']') + print('Usage:') + print('mat_mask_operations.py [image_path -- default lena.jpg] [G -- grayscale]') + return -1 + cv.namedWindow('Input', cv.WINDOW_AUTOSIZE) + cv.namedWindow('Output', cv.WINDOW_AUTOSIZE) + cv.imshow('Input', src) + t = round(time.time()) + dst0 = sharpen(src) + t = time.time() - t + print('Hand written function time passed in seconds: %s' % t) + cv.imshow('Output', dst0) + cv.waitKey() + t = time.time() + kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) + dst1 = cv.filter2D(src, -1, kernel) + t = time.time() - t + print('Built-in filter2D time passed in seconds: %s' % t) + cv.imshow('Output', dst1) + cv.waitKey(0) + cv.destroyAllWindows() + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/core/mat_operations/mat_operations.py +from __future__ import division +import cv2 as cv +import numpy as np + +def load(): + filename = 'img.jpg' + img = cv.imread(filename) + img = cv.imread(filename, cv.IMREAD_GRAYSCALE) + cv.imwrite(filename, img) + +def access_pixel(): + img = np.empty((4, 4, 3), np.uint8) + y = 0 + x = 0 + _intensity = img[y, x] + _blue = img[y, x, 0] + _green = img[y, x, 1] + _red = img[y, x, 2] + img[y, x] = 128 + +def reference_counting(): + img = cv.imread('image.jpg') + _img1 = np.copy(img) + img = cv.imread('image.jpg') + _sobelx = cv.Sobel(img, cv.CV_32F, 1, 0) + +def primitive_operations(): + img = np.empty((4, 4, 3), np.uint8) + img[:] = 0 + _smallImg = img[10:110, 10:110] + img = cv.imread('image.jpg') + _grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + src = np.ones((4, 4), np.uint8) + _dst = src.astype(np.float32) + +def visualize_images(): + img = cv.imread('image.jpg') + cv.namedWindow('image', cv.WINDOW_AUTOSIZE) + cv.imshow('image', img) + cv.waitKey() + img = cv.imread('image.jpg') + grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + sobelx = cv.Sobel(grey, cv.CV_32F, 1, 0) + minVal = np.amin(sobelx) + maxVal = np.amax(sobelx) + draw = cv.convertScaleAbs(sobelx, alpha=255.0 / (maxVal - minVal), beta=-minVal * 255.0 / (maxVal - minVal)) + cv.namedWindow('image', cv.WINDOW_AUTOSIZE) + cv.imshow('image', draw) + cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/dnn/dnn_conversion/pytorch/segmentation/py_to_py_fcn_resnet50.py +from torchvision import models +from ..pytorch_model import PyTorchModelPreparer, PyTorchModelProcessor, PyTorchDnnModelProcessor +from ...common.utils import set_pytorch_env, create_parser + +class PyTorchFcnResNet50(PyTorchModelPreparer): + + def __init__(self, model_name, original_model): + super(PyTorchFcnResNet50, self).__init__(model_name, original_model) + +def main(): + parser = create_parser() + cmd_args = parser.parse_args() + set_pytorch_env() + resnets = PyTorchFcnResNet50(model_name='resnet50', original_model=models.segmentation.fcn_resnet50(pretrained=True)) + model_dict = resnets.get_prepared_models() + if cmd_args.is_evaluate: + from ...common.test_config import TestConfig + from ...common.accuracy_eval import PASCALDataFetch + from ...common.test.voc_segm_test import test_segm_models + eval_params = TestConfig() + model_names = list(model_dict.keys()) + original_model_name = model_names[0] + dnn_model_name = model_names[1] + data_fetcher = PASCALDataFetch(imgs_dir=eval_params.imgs_segm_dir, frame_size=eval_params.frame_size, bgr_to_rgb=eval_params.bgr_to_rgb) + test_segm_models([PyTorchModelProcessor(model_dict[original_model_name], original_model_name), PyTorchDnnModelProcessor(model_dict[dnn_model_name], dnn_model_name)], data_fetcher, eval_params, original_model_name) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py +from __future__ import print_function +import numpy as np +import cv2 as cv + +def basicPanoramaStitching(img1Path, img2Path): + img1 = cv.imread(cv.samples.findFile(img1Path)) + img2 = cv.imread(cv.samples.findFile(img2Path)) + c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112], [0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], [-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], [0, 0, 0, 1]], dtype=np.float64) + c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112], [-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], [0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], [0, 0, 0, 1]], dtype=np.float64) + cameraMatrix = np.array([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0, 0, 1]], dtype=np.float32) + R1 = c1Mo[0:3, 0:3] + R2 = c2Mo[0:3, 0:3] + R2 = R2.transpose() + R_2to1 = np.dot(R1, R2) + H = cameraMatrix.dot(R_2to1).dot(np.linalg.inv(cameraMatrix)) + H = H / H[2][2] + img_stitch = cv.warpPerspective(img2, H, (img2.shape[1] * 2, img2.shape[0])) + img_stitch[0:img1.shape[0], 0:img1.shape[1]] = img1 + img_space = np.zeros((img1.shape[0], 50, 3), dtype=np.uint8) + img_compare = cv.hconcat([img1, img_space, img2]) + cv.imshow('Final', img_compare) + cv.imshow('Panorama', img_stitch) + cv.waitKey(0) + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Code for homography tutorial. Example 5: basic panorama stitching from a rotating camera.') + parser.add_argument('-I1', '--image1', help='path to first image', default='Blender_Suzanne1.jpg') + parser.add_argument('-I2', '--image2', help='path to second image', default='Blender_Suzanne2.jpg') + args = parser.parse_args() + print('Panorama Stitching Started') + basicPanoramaStitching(args.image1, args.image2) + print('Panorama Stitching Completed Successfully') +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/tutorial_code/features2D/Homography/perspective_correction.py +from __future__ import print_function +import numpy as np +import cv2 as cv +import sys + +def randomColor(): + color = np.random.randint(0, 255, (1, 3)) + return color[0].tolist() + +def perspectiveCorrection(img1Path, img2Path, patternSize): + img1 = cv.imread(cv.samples.findFile(img1Path)) + img2 = cv.imread(cv.samples.findFile(img2Path)) + (ret1, corners1) = cv.findChessboardCorners(img1, patternSize) + (ret2, corners2) = cv.findChessboardCorners(img2, patternSize) + if not ret1 or not ret2: + print('Error, cannot find the chessboard corners in both images.') + sys.exit(-1) + (H, _) = cv.findHomography(corners1, corners2) + print(H) + img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0])) + img_draw_warp = cv.hconcat([img2, img1_warp]) + cv.imshow('Desired chessboard view / Warped source chessboard view', img_draw_warp) + corners1 = corners1.tolist() + corners1 = [a[0] for a in corners1] + img_draw_matches = cv.hconcat([img1, img2]) + for i in range(len(corners1)): + pt1 = np.array([corners1[i][0], corners1[i][1], 1]) + pt1 = pt1.reshape(3, 1) + pt2 = np.dot(H, pt1) + pt2 = pt2 / pt2[2] + end = (int(img1.shape[1] + pt2[0]), int(pt2[1])) + cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2) + cv.imshow('Draw matches', img_draw_matches) + cv.waitKey(0) + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-I1', '--image1', help='Path to the first image', default='left02.jpg') + parser.add_argument('-I2', '--image2', help='Path to the second image', default='left01.jpg') + parser.add_argument('-H', '--height', help='Height of pattern size', default=6) + parser.add_argument('-W', '--width', help='Width of pattern size', default=9) + args = parser.parse_args() + img1Path = args.image1 + img2Path = args.image2 + h = args.height + w = args.width + perspectiveCorrection(img1Path, img2Path, (w, h)) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +from math import sqrt +parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.') +parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png') +parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png') +parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml') +args = parser.parse_args() +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) +if img1 is None or img2 is None: + print('Could not open or find the images!') + exit(0) +fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ) +homography = fs.getFirstTopLevelNode().mat() +akaze = cv.AKAZE_create() +(kpts1, desc1) = akaze.detectAndCompute(img1, None) +(kpts2, desc2) = akaze.detectAndCompute(img2, None) +matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING) +nn_matches = matcher.knnMatch(desc1, desc2, 2) +matched1 = [] +matched2 = [] +nn_match_ratio = 0.8 +for (m, n) in nn_matches: + if m.distance < nn_match_ratio * n.distance: + matched1.append(kpts1[m.queryIdx]) + matched2.append(kpts2[m.trainIdx]) +inliers1 = [] +inliers2 = [] +good_matches = [] +inlier_threshold = 2.5 +for (i, m) in enumerate(matched1): + col = np.ones((3, 1), dtype=np.float64) + col[0:2, 0] = m.pt + col = np.dot(homography, col) + col /= col[2, 0] + dist = sqrt(pow(col[0, 0] - matched2[i].pt[0], 2) + pow(col[1, 0] - matched2[i].pt[1], 2)) + if dist < inlier_threshold: + good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0)) + inliers1.append(matched1[i]) + inliers2.append(matched2[i]) +res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3), dtype=np.uint8) +cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res) +cv.imwrite('akaze_result.png', res) +inlier_ratio = len(inliers1) / float(len(matched1)) +print('A-KAZE Matching Results') +print('*******************************') +print('# Keypoints 1: \t', len(kpts1)) +print('# Keypoints 2: \t', len(kpts2)) +print('# Matches: \t', len(matched1)) +print('# Inliers: \t', len(inliers1)) +print('# Inliers Ratio: \t', inlier_ratio) +cv.imshow('result', res) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') +args = parser.parse_args() +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) +if img1 is None or img2 is None: + print('Could not open or find the images!') + exit(0) +minHessian = 400 +detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) +(keypoints1, descriptors1) = detector.detectAndCompute(img1, None) +(keypoints2, descriptors2) = detector.detectAndCompute(img2, None) +matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE) +matches = matcher.match(descriptors1, descriptors2) +img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3), dtype=np.uint8) +cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches) +cv.imshow('Matches', img_matches) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') +parser.add_argument('--input', help='Path to input image.', default='box.png') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_GRAYSCALE) +if src is None: + print('Could not open or find the image:', args.input) + exit(0) +minHessian = 400 +detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) +keypoints = detector.detect(src) +img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8) +cv.drawKeypoints(src, keypoints, img_keypoints) +cv.imshow('SURF Keypoints', img_keypoints) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') +args = parser.parse_args() +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) +if img1 is None or img2 is None: + print('Could not open or find the images!') + exit(0) +minHessian = 400 +detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) +(keypoints1, descriptors1) = detector.detectAndCompute(img1, None) +(keypoints2, descriptors2) = detector.detectAndCompute(img2, None) +matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) +knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2) +ratio_thresh = 0.7 +good_matches = [] +for (m, n) in knn_matches: + if m.distance < ratio_thresh * n.distance: + good_matches.append(m) +img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3), dtype=np.uint8) +cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) +cv.imshow('Good Matches', img_matches) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') +args = parser.parse_args() +img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img_scene = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) +if img_object is None or img_scene is None: + print('Could not open or find the images!') + exit(0) +minHessian = 400 +detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian) +(keypoints_obj, descriptors_obj) = detector.detectAndCompute(img_object, None) +(keypoints_scene, descriptors_scene) = detector.detectAndCompute(img_scene, None) +matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED) +knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2) +ratio_thresh = 0.75 +good_matches = [] +for (m, n) in knn_matches: + if m.distance < ratio_thresh * n.distance: + good_matches.append(m) +img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1] + img_scene.shape[1], 3), dtype=np.uint8) +cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) +obj = np.empty((len(good_matches), 2), dtype=np.float32) +scene = np.empty((len(good_matches), 2), dtype=np.float32) +for i in range(len(good_matches)): + obj[i, 0] = keypoints_obj[good_matches[i].queryIdx].pt[0] + obj[i, 1] = keypoints_obj[good_matches[i].queryIdx].pt[1] + scene[i, 0] = keypoints_scene[good_matches[i].trainIdx].pt[0] + scene[i, 1] = keypoints_scene[good_matches[i].trainIdx].pt[1] +(H, _) = cv.findHomography(obj, scene, cv.RANSAC) +obj_corners = np.empty((4, 1, 2), dtype=np.float32) +obj_corners[0, 0, 0] = 0 +obj_corners[0, 0, 1] = 0 +obj_corners[1, 0, 0] = img_object.shape[1] +obj_corners[1, 0, 1] = 0 +obj_corners[2, 0, 0] = img_object.shape[1] +obj_corners[2, 0, 1] = img_object.shape[0] +obj_corners[3, 0, 0] = 0 +obj_corners[3, 0, 1] = img_object.shape[0] +scene_corners = cv.perspectiveTransform(obj_corners, H) +cv.line(img_matches, (int(scene_corners[0, 0, 0] + img_object.shape[1]), int(scene_corners[0, 0, 1])), (int(scene_corners[1, 0, 0] + img_object.shape[1]), int(scene_corners[1, 0, 1])), (0, 255, 0), 4) +cv.line(img_matches, (int(scene_corners[1, 0, 0] + img_object.shape[1]), int(scene_corners[1, 0, 1])), (int(scene_corners[2, 0, 0] + img_object.shape[1]), int(scene_corners[2, 0, 1])), (0, 255, 0), 4) +cv.line(img_matches, (int(scene_corners[2, 0, 0] + img_object.shape[1]), int(scene_corners[2, 0, 1])), (int(scene_corners[3, 0, 0] + img_object.shape[1]), int(scene_corners[3, 0, 1])), (0, 255, 0), 4) +cv.line(img_matches, (int(scene_corners[3, 0, 0] + img_object.shape[1]), int(scene_corners[3, 0, 1])), (int(scene_corners[0, 0, 0] + img_object.shape[1]), int(scene_corners[0, 0, 1])), (0, 255, 0), 4) +cv.imshow('Good Matches & Object detection', img_matches) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import argparse +alpha_slider_max = 100 +title_window = 'Linear Blend' + +def on_trackbar(val): + alpha = val / alpha_slider_max + beta = 1.0 - alpha + dst = cv.addWeighted(src1, alpha, src2, beta, 0.0) + cv.imshow(title_window, dst) +parser = argparse.ArgumentParser(description='Code for Adding a Trackbar to our applications tutorial.') +parser.add_argument('--input1', help='Path to the first input image.', default='LinuxLogo.jpg') +parser.add_argument('--input2', help='Path to the second input image.', default='WindowsLogo.jpg') +args = parser.parse_args() +src1 = cv.imread(cv.samples.findFile(args.input1)) +src2 = cv.imread(cv.samples.findFile(args.input2)) +if src1 is None: + print('Could not open or find the image: ', args.input1) + exit(0) +if src2 is None: + print('Could not open or find the image: ', args.input2) + exit(0) +cv.namedWindow(title_window) +trackbar_name = 'Alpha x %d' % alpha_slider_max +cv.createTrackbar(trackbar_name, title_window, 0, alpha_slider_max, on_trackbar) +on_trackbar(0) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/imgProc/BasicGeometricDrawing/basic_geometric_drawing.py +import cv2 as cv +import numpy as np +W = 400 + +def my_ellipse(img, angle): + thickness = 2 + line_type = 8 + cv.ellipse(img, (W // 2, W // 2), (W // 4, W // 16), angle, 0, 360, (255, 0, 0), thickness, line_type) + +def my_filled_circle(img, center): + thickness = -1 + line_type = 8 + cv.circle(img, center, W // 32, (0, 0, 255), thickness, line_type) + +def my_polygon(img): + line_type = 8 + ppt = np.array([[W / 4, 7 * W / 8], [3 * W / 4, 7 * W / 8], [3 * W / 4, 13 * W / 16], [11 * W / 16, 13 * W / 16], [19 * W / 32, 3 * W / 8], [3 * W / 4, 3 * W / 8], [3 * W / 4, W / 8], [26 * W / 40, W / 8], [26 * W / 40, W / 4], [22 * W / 40, W / 4], [22 * W / 40, W / 8], [18 * W / 40, W / 8], [18 * W / 40, W / 4], [14 * W / 40, W / 4], [14 * W / 40, W / 8], [W / 4, W / 8], [W / 4, 3 * W / 8], [13 * W / 32, 3 * W / 8], [5 * W / 16, 13 * W / 16], [W / 4, 13 * W / 16]], np.int32) + ppt = ppt.reshape((-1, 1, 2)) + cv.fillPoly(img, [ppt], (255, 255, 255), line_type) + +def my_line(img, start, end): + thickness = 2 + line_type = 8 + cv.line(img, start, end, (0, 0, 0), thickness, line_type) +atom_window = 'Drawing 1: Atom' +rook_window = 'Drawing 2: Rook' +size = (W, W, 3) +atom_image = np.zeros(size, dtype=np.uint8) +rook_image = np.zeros(size, dtype=np.uint8) +my_ellipse(atom_image, 90) +my_ellipse(atom_image, 0) +my_ellipse(atom_image, 45) +my_ellipse(atom_image, -45) +my_filled_circle(atom_image, (W // 2, W // 2)) +my_polygon(rook_image) +cv.rectangle(rook_image, (0, 7 * W // 8), (W, W), (0, 255, 255), -1, 8) +my_line(rook_image, (0, 15 * W // 16), (W, 15 * W // 16)) +my_line(rook_image, (W // 4, 7 * W // 8), (W // 4, W)) +my_line(rook_image, (W // 2, 7 * W // 8), (W // 2, W)) +my_line(rook_image, (3 * W // 4, 7 * W // 8), (3 * W // 4, W)) +cv.imshow(atom_window, atom_image) +cv.moveWindow(atom_window, 0, 200) +cv.imshow(rook_window, rook_image) +cv.moveWindow(rook_window, W, 200) +cv.waitKey(0) +cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py +import cv2 as cv +import numpy as np +input_image = np.array(([0, 0, 0, 0, 0, 0, 0, 0], [0, 255, 255, 255, 0, 0, 0, 255], [0, 255, 255, 255, 0, 0, 0, 0], [0, 255, 255, 255, 0, 255, 0, 0], [0, 0, 255, 0, 0, 0, 0, 0], [0, 0, 255, 0, 0, 255, 255, 0], [0, 255, 0, 255, 0, 0, 255, 0], [0, 255, 255, 255, 0, 0, 0, 0]), dtype='uint8') +kernel = np.array(([0, 1, 0], [1, -1, 1], [0, 1, 0]), dtype='int') +output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel) +rate = 50 +kernel = (kernel + 1) * 127 +kernel = np.uint8(kernel) +kernel = cv.resize(kernel, None, fx=rate, fy=rate, interpolation=cv.INTER_NEAREST) +cv.imshow('kernel', kernel) +cv.moveWindow('kernel', 0, 0) +input_image = cv.resize(input_image, None, fx=rate, fy=rate, interpolation=cv.INTER_NEAREST) +cv.imshow('Original', input_image) +cv.moveWindow('Original', 0, 200) +output_image = cv.resize(output_image, None, fx=rate, fy=rate, interpolation=cv.INTER_NEAREST) +cv.imshow('Hit or Miss', output_image) +cv.moveWindow('Hit or Miss', 500, 200) +cv.waitKey(0) +cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py +import sys +import cv2 as cv + +def main(argv): + print('\n Zoom In-Out demo\n ------------------\n * [i] -> Zoom [i]n\n * [o] -> Zoom [o]ut\n * [ESC] -> Close program\n ') + filename = argv[0] if len(argv) > 0 else 'chicky_512.png' + src = cv.imread(cv.samples.findFile(filename)) + if src is None: + print('Error opening image!') + print('Usage: pyramids.py [image_name -- default ../data/chicky_512.png] \n') + return -1 + while 1: + (rows, cols, _channels) = map(int, src.shape) + cv.imshow('Pyramids Demo', src) + k = cv.waitKey(0) + if k == 27: + break + elif chr(k) == 'i': + src = cv.pyrUp(src, dstsize=(2 * cols, 2 * rows)) + print('** Zoom In: Image x 2') + elif chr(k) == 'o': + src = cv.pyrDown(src, dstsize=(cols // 2, rows // 2)) + print('** Zoom Out: Image / 2') + cv.destroyAllWindows() + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py +import sys +import cv2 as cv +import numpy as np +DELAY_CAPTION = 1500 +DELAY_BLUR = 100 +MAX_KERNEL_LENGTH = 31 +src = None +dst = None +window_name = 'Smoothing Demo' + +def main(argv): + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' + global src + src = cv.imread(cv.samples.findFile(imageName)) + if src is None: + print('Error opening image') + print('Usage: smoothing.py [image_name -- default ../data/lena.jpg] \n') + return -1 + if display_caption('Original Image') != 0: + return 0 + global dst + dst = np.copy(src) + if display_dst(DELAY_CAPTION) != 0: + return 0 + if display_caption('Homogeneous Blur') != 0: + return 0 + for i in range(1, MAX_KERNEL_LENGTH, 2): + dst = cv.blur(src, (i, i)) + if display_dst(DELAY_BLUR) != 0: + return 0 + if display_caption('Gaussian Blur') != 0: + return 0 + for i in range(1, MAX_KERNEL_LENGTH, 2): + dst = cv.GaussianBlur(src, (i, i), 0) + if display_dst(DELAY_BLUR) != 0: + return 0 + if display_caption('Median Blur') != 0: + return 0 + for i in range(1, MAX_KERNEL_LENGTH, 2): + dst = cv.medianBlur(src, i) + if display_dst(DELAY_BLUR) != 0: + return 0 + if display_caption('Bilateral Blur') != 0: + return 0 + for i in range(1, MAX_KERNEL_LENGTH, 2): + dst = cv.bilateralFilter(src, i, i * 2, i / 2) + if display_dst(DELAY_BLUR) != 0: + return 0 + display_caption('Done!') + return 0 + +def display_caption(caption): + global dst + dst = np.zeros(src.shape, src.dtype) + (rows, cols, _ch) = src.shape + cv.putText(dst, caption, (int(cols / 4), int(rows / 2)), cv.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255)) + return display_dst(DELAY_CAPTION) + +def display_dst(delay): + cv.imshow(window_name, dst) + c = cv.waitKey(delay) + if c >= 0: + return -1 + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py +import cv2 as cv +import numpy as np +import argparse +W = 52 +C_Thr = 0.43 +LowThr = 35 +HighThr = 57 + +def calcGST(inputIMG, w): + img = inputIMG.astype(np.float32) + imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3) + imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3) + imgDiffXY = cv.multiply(imgDiffX, imgDiffY) + imgDiffXX = cv.multiply(imgDiffX, imgDiffX) + imgDiffYY = cv.multiply(imgDiffY, imgDiffY) + J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w, w)) + J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w, w)) + J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w, w)) + tmp1 = J11 + J22 + tmp2 = J11 - J22 + tmp2 = cv.multiply(tmp2, tmp2) + tmp3 = cv.multiply(J12, J12) + tmp4 = np.sqrt(tmp2 + 4.0 * tmp3) + lambda1 = 0.5 * (tmp1 + tmp4) + lambda2 = 0.5 * (tmp1 - tmp4) + imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2) + imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees=True) + imgOrientationOut = 0.5 * imgOrientationOut + return (imgCoherencyOut, imgOrientationOut) +parser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.') +parser.add_argument('-i', '--input', help='Path to input image.', required=True) +args = parser.parse_args() +imgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE) +if imgIn is None: + print('Could not open or find the image: {}'.format(args.input)) + exit(0) +(imgCoherency, imgOrientation) = calcGST(imgIn, W) +(_, imgCoherencyBin) = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY) +(_, imgOrientationBin) = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY) +imgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin) +imgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F) +imgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F) +cv.imshow('result.jpg', np.uint8(0.5 * (imgIn + imgBin))) +cv.imshow('Coherency.jpg', imgCoherency) +cv.imshow('Orientation.jpg', imgOrientation) +cv.waitKey(0) + +# File: opencv-master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py +from __future__ import print_function +from builtins import input +import cv2 as cv +import numpy as np +import argparse +parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') +args = parser.parse_args() +image = cv.imread(cv.samples.findFile(args.input)) +if image is None: + print('Could not open or find the image: ', args.input) + exit(0) +new_image = np.zeros(image.shape, image.dtype) +alpha = 1.0 +beta = 0 +print(' Basic Linear Transforms ') +print('-------------------------') +try: + alpha = float(input('* Enter the alpha value [1.0-3.0]: ')) + beta = int(input('* Enter the beta value [0-100]: ')) +except ValueError: + print('Error, not a number') +for y in range(image.shape[0]): + for x in range(image.shape[1]): + for c in range(image.shape[2]): + new_image[y, x, c] = np.clip(alpha * image[y, x, c] + beta, 0, 255) +cv.imshow('Original Image', image) +cv.imshow('New Image', new_image) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +alpha = 1.0 +alpha_max = 500 +beta = 0 +beta_max = 200 +gamma = 1.0 +gamma_max = 200 + +def basicLinearTransform(): + res = cv.convertScaleAbs(img_original, alpha=alpha, beta=beta) + img_corrected = cv.hconcat([img_original, res]) + cv.imshow('Brightness and contrast adjustments', img_corrected) + +def gammaCorrection(): + lookUpTable = np.empty((1, 256), np.uint8) + for i in range(256): + lookUpTable[0, i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255) + res = cv.LUT(img_original, lookUpTable) + img_gamma_corrected = cv.hconcat([img_original, res]) + cv.imshow('Gamma correction', img_gamma_corrected) + +def on_linear_transform_alpha_trackbar(val): + global alpha + alpha = val / 100 + basicLinearTransform() + +def on_linear_transform_beta_trackbar(val): + global beta + beta = val - 100 + basicLinearTransform() + +def on_gamma_correction_trackbar(val): + global gamma + gamma = val / 100 + gammaCorrection() +parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') +args = parser.parse_args() +img_original = cv.imread(cv.samples.findFile(args.input)) +if img_original is None: + print('Could not open or find the image: ', args.input) + exit(0) +img_corrected = np.empty((img_original.shape[0], img_original.shape[1] * 2, img_original.shape[2]), img_original.dtype) +img_gamma_corrected = np.empty((img_original.shape[0], img_original.shape[1] * 2, img_original.shape[2]), img_original.dtype) +img_corrected = cv.hconcat([img_original, img_original]) +img_gamma_corrected = cv.hconcat([img_original, img_original]) +cv.namedWindow('Brightness and contrast adjustments') +cv.namedWindow('Gamma correction') +alpha_init = int(alpha * 100) +cv.createTrackbar('Alpha gain (contrast)', 'Brightness and contrast adjustments', alpha_init, alpha_max, on_linear_transform_alpha_trackbar) +beta_init = beta + 100 +cv.createTrackbar('Beta bias (brightness)', 'Brightness and contrast adjustments', beta_init, beta_max, on_linear_transform_beta_trackbar) +gamma_init = int(gamma * 100) +cv.createTrackbar('Gamma correction', 'Gamma correction', gamma_init, gamma_max, on_gamma_correction_trackbar) +on_linear_transform_alpha_trackbar(alpha_init) +on_gamma_correction_trackbar(gamma_init) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +src = None +erosion_size = 0 +max_elem = 2 +max_kernel_size = 21 +title_trackbar_element_shape = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse' +title_trackbar_kernel_size = 'Kernel size:\n 2n +1' +title_erosion_window = 'Erosion Demo' +title_dilation_window = 'Dilation Demo' + +def main(image): + global src + src = cv.imread(cv.samples.findFile(image)) + if src is None: + print('Could not open or find the image: ', image) + exit(0) + cv.namedWindow(title_erosion_window) + cv.createTrackbar(title_trackbar_element_shape, title_erosion_window, 0, max_elem, erosion) + cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window, 0, max_kernel_size, erosion) + cv.namedWindow(title_dilation_window) + cv.createTrackbar(title_trackbar_element_shape, title_dilation_window, 0, max_elem, dilatation) + cv.createTrackbar(title_trackbar_kernel_size, title_dilation_window, 0, max_kernel_size, dilatation) + erosion(0) + dilatation(0) + cv.waitKey() + +def morph_shape(val): + if val == 0: + return cv.MORPH_RECT + elif val == 1: + return cv.MORPH_CROSS + elif val == 2: + return cv.MORPH_ELLIPSE + +def erosion(val): + erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window) + erosion_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_erosion_window)) + element = cv.getStructuringElement(erosion_shape, (2 * erosion_size + 1, 2 * erosion_size + 1), (erosion_size, erosion_size)) + erosion_dst = cv.erode(src, element) + cv.imshow(title_erosion_window, erosion_dst) + +def dilatation(val): + dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilation_window) + dilation_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_dilation_window)) + element = cv.getStructuringElement(dilation_shape, (2 * dilatation_size + 1, 2 * dilatation_size + 1), (dilatation_size, dilatation_size)) + dilatation_dst = cv.dilate(src, element) + cv.imshow(title_dilation_window, dilatation_dst) +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.') + parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg') + args = parser.parse_args() + main(args.input) + +# File: opencv-master/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py +import cv2 as cv +import numpy as np +img = cv.imread(cv.samples.findFile('sudoku.png')) +gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) +edges = cv.Canny(gray, 50, 150, apertureSize=3) +lines = cv.HoughLines(edges, 1, np.pi / 180, 200) +for line in lines: + (rho, theta) = line[0] + a = np.cos(theta) + b = np.sin(theta) + x0 = a * rho + y0 = b * rho + x1 = int(x0 + 1000 * -b) + y1 = int(y0 + 1000 * a) + x2 = int(x0 - 1000 * -b) + y2 = int(y0 - 1000 * a) + cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) +cv.imwrite('houghlines3.jpg', img) + +# File: opencv-master/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py +import cv2 as cv +import numpy as np +img = cv.imread(cv.samples.findFile('sudoku.png')) +gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) +edges = cv.Canny(gray, 50, 150, apertureSize=3) +lines = cv.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength=100, maxLineGap=10) +for line in lines: + (x1, y1, x2, y2) = line[0] + cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2) +cv.imwrite('houghlines5.jpg', img) + +# File: opencv-master/samples/python/tutorial_code/imgProc/match_template/match_template.py +from __future__ import print_function +import sys +import cv2 as cv +use_mask = False +img = None +templ = None +mask = None +image_window = 'Source Image' +result_window = 'Result window' +match_method = 0 +max_Trackbar = 5 + +def main(argv): + if len(sys.argv) < 3: + print('Not enough parameters') + print('Usage:\nmatch_template_demo.py []') + return -1 + global img + global templ + img = cv.imread(sys.argv[1], cv.IMREAD_COLOR) + templ = cv.imread(sys.argv[2], cv.IMREAD_COLOR) + if len(sys.argv) > 3: + global use_mask + use_mask = True + global mask + mask = cv.imread(sys.argv[3], cv.IMREAD_COLOR) + if img is None or templ is None or (use_mask and mask is None): + print("Can't read one of the images") + return -1 + cv.namedWindow(image_window, cv.WINDOW_AUTOSIZE) + cv.namedWindow(result_window, cv.WINDOW_AUTOSIZE) + trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED' + cv.createTrackbar(trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod) + MatchingMethod(match_method) + cv.waitKey(0) + return 0 + +def MatchingMethod(param): + global match_method + match_method = param + img_display = img.copy() + method_accepts_mask = cv.TM_SQDIFF == match_method or match_method == cv.TM_CCORR_NORMED + if use_mask and method_accepts_mask: + result = cv.matchTemplate(img, templ, match_method, None, mask) + else: + result = cv.matchTemplate(img, templ, match_method) + cv.normalize(result, result, 0, 1, cv.NORM_MINMAX, -1) + (_minVal, _maxVal, minLoc, maxLoc) = cv.minMaxLoc(result, None) + if match_method == cv.TM_SQDIFF or match_method == cv.TM_SQDIFF_NORMED: + matchLoc = minLoc + else: + matchLoc = maxLoc + cv.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0, 0, 0), 2, 8, 0) + cv.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0, 0, 0), 2, 8, 0) + cv.imshow(image_window, img_display) + cv.imshow(result_window, result) + pass +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py +"""""" +import numpy as np +import sys +import cv2 as cv + +def show_wait_destroy(winname, img): + cv.imshow(winname, img) + cv.moveWindow(winname, 500, 0) + cv.waitKey(0) + cv.destroyWindow(winname) + +def main(argv): + if len(argv) < 1: + print('Not enough parameters') + print('Usage:\nmorph_lines_detection.py < path_to_image >') + return -1 + src = cv.imread(argv[0], cv.IMREAD_COLOR) + if src is None: + print('Error opening image: ' + argv[0]) + return -1 + cv.imshow('src', src) + if len(src.shape) != 2: + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + else: + gray = src + show_wait_destroy('gray', gray) + gray = cv.bitwise_not(gray) + bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 15, -2) + show_wait_destroy('binary', bw) + horizontal = np.copy(bw) + vertical = np.copy(bw) + cols = horizontal.shape[1] + horizontal_size = cols // 30 + horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1)) + horizontal = cv.erode(horizontal, horizontalStructure) + horizontal = cv.dilate(horizontal, horizontalStructure) + show_wait_destroy('horizontal', horizontal) + rows = vertical.shape[0] + verticalsize = rows // 30 + verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize)) + vertical = cv.erode(vertical, verticalStructure) + vertical = cv.dilate(vertical, verticalStructure) + show_wait_destroy('vertical', vertical) + vertical = cv.bitwise_not(vertical) + show_wait_destroy('vertical_bit', vertical) + '' + edges = cv.adaptiveThreshold(vertical, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 3, -2) + show_wait_destroy('edges', edges) + kernel = np.ones((2, 2), np.uint8) + edges = cv.dilate(edges, kernel) + show_wait_destroy('dilate', edges) + smooth = np.copy(vertical) + smooth = cv.blur(smooth, (2, 2)) + (rows, cols) = np.where(edges != 0) + vertical[rows, cols] = smooth[rows, cols] + show_wait_destroy('smooth - final', vertical) + return 0 +if __name__ == '__main__': + main(sys.argv[1:]) + +# File: opencv-master/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import argparse +morph_size = 0 +max_operator = 4 +max_elem = 2 +max_kernel_size = 21 +title_trackbar_operator_type = 'Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat' +title_trackbar_element_type = 'Element:\n 0: Rect - 1: Cross - 2: Ellipse' +title_trackbar_kernel_size = 'Kernel size:\n 2n + 1' +title_window = 'Morphology Transformations Demo' +morph_op_dic = {0: cv.MORPH_OPEN, 1: cv.MORPH_CLOSE, 2: cv.MORPH_GRADIENT, 3: cv.MORPH_TOPHAT, 4: cv.MORPH_BLACKHAT} + +def morphology_operations(val): + morph_operator = cv.getTrackbarPos(title_trackbar_operator_type, title_window) + morph_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_window) + morph_elem = 0 + val_type = cv.getTrackbarPos(title_trackbar_element_type, title_window) + if val_type == 0: + morph_elem = cv.MORPH_RECT + elif val_type == 1: + morph_elem = cv.MORPH_CROSS + elif val_type == 2: + morph_elem = cv.MORPH_ELLIPSE + element = cv.getStructuringElement(morph_elem, (2 * morph_size + 1, 2 * morph_size + 1), (morph_size, morph_size)) + operation = morph_op_dic[morph_operator] + dst = cv.morphologyEx(src, operation, element) + cv.imshow(title_window, dst) +parser = argparse.ArgumentParser(description='Code for More Morphology Transformations tutorial.') +parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image: ', args.input) + exit(0) +cv.namedWindow(title_window) +cv.createTrackbar(title_trackbar_operator_type, title_window, 0, max_operator, morphology_operations) +cv.createTrackbar(title_trackbar_element_type, title_window, 0, max_elem, morphology_operations) +cv.createTrackbar(title_trackbar_kernel_size, title_window, 0, max_kernel_size, morphology_operations) +morphology_operations(0) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/imgProc/threshold/threshold.py +from __future__ import print_function +import cv2 as cv +import argparse +max_value = 255 +max_type = 4 +max_binary_value = 255 +trackbar_type = 'Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted' +trackbar_value = 'Value' +window_name = 'Threshold Demo' + +def Threshold_Demo(val): + threshold_type = cv.getTrackbarPos(trackbar_type, window_name) + threshold_value = cv.getTrackbarPos(trackbar_value, window_name) + (_, dst) = cv.threshold(src_gray, threshold_value, max_binary_value, threshold_type) + cv.imshow(window_name, dst) +parser = argparse.ArgumentParser(description='Code for Basic Thresholding Operations tutorial.') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image: ', args.input) + exit(0) +src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +cv.namedWindow(window_name) +cv.createTrackbar(trackbar_type, window_name, 3, max_type, Threshold_Demo) +cv.createTrackbar(trackbar_value, window_name, 0, max_value, Threshold_Demo) +Threshold_Demo(0) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py +from __future__ import print_function +import cv2 as cv +import argparse +max_value = 255 +max_value_H = 360 // 2 +low_H = 0 +low_S = 0 +low_V = 0 +high_H = max_value_H +high_S = max_value +high_V = max_value +window_capture_name = 'Video Capture' +window_detection_name = 'Object Detection' +low_H_name = 'Low H' +low_S_name = 'Low S' +low_V_name = 'Low V' +high_H_name = 'High H' +high_S_name = 'High S' +high_V_name = 'High V' + +def on_low_H_thresh_trackbar(val): + global low_H + global high_H + low_H = val + low_H = min(high_H - 1, low_H) + cv.setTrackbarPos(low_H_name, window_detection_name, low_H) + +def on_high_H_thresh_trackbar(val): + global low_H + global high_H + high_H = val + high_H = max(high_H, low_H + 1) + cv.setTrackbarPos(high_H_name, window_detection_name, high_H) + +def on_low_S_thresh_trackbar(val): + global low_S + global high_S + low_S = val + low_S = min(high_S - 1, low_S) + cv.setTrackbarPos(low_S_name, window_detection_name, low_S) + +def on_high_S_thresh_trackbar(val): + global low_S + global high_S + high_S = val + high_S = max(high_S, low_S + 1) + cv.setTrackbarPos(high_S_name, window_detection_name, high_S) + +def on_low_V_thresh_trackbar(val): + global low_V + global high_V + low_V = val + low_V = min(high_V - 1, low_V) + cv.setTrackbarPos(low_V_name, window_detection_name, low_V) + +def on_high_V_thresh_trackbar(val): + global low_V + global high_V + high_V = val + high_V = max(high_V, low_V + 1) + cv.setTrackbarPos(high_V_name, window_detection_name, high_V) +parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.') +parser.add_argument('--camera', help='Camera divide number.', default=0, type=int) +args = parser.parse_args() +cap = cv.VideoCapture(args.camera) +cv.namedWindow(window_capture_name) +cv.namedWindow(window_detection_name) +cv.createTrackbar(low_H_name, window_detection_name, low_H, max_value_H, on_low_H_thresh_trackbar) +cv.createTrackbar(high_H_name, window_detection_name, high_H, max_value_H, on_high_H_thresh_trackbar) +cv.createTrackbar(low_S_name, window_detection_name, low_S, max_value, on_low_S_thresh_trackbar) +cv.createTrackbar(high_S_name, window_detection_name, high_S, max_value, on_high_S_thresh_trackbar) +cv.createTrackbar(low_V_name, window_detection_name, low_V, max_value, on_low_V_thresh_trackbar) +cv.createTrackbar(high_V_name, window_detection_name, high_V, max_value, on_high_V_thresh_trackbar) +while True: + (ret, frame) = cap.read() + if frame is None: + break + frame_HSV = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + frame_threshold = cv.inRange(frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V)) + cv.imshow(window_capture_name, frame) + cv.imshow(window_detection_name, frame_threshold) + key = cv.waitKey(30) + if key == ord('q') or key == 27: + break + +# File: opencv-master/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +from math import atan2, cos, sin, sqrt, pi + +def drawAxis(img, p_, q_, colour, scale): + p = list(p_) + q = list(q_) + angle = atan2(p[1] - q[1], p[0] - q[0]) + hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0])) + q[0] = p[0] - scale * hypotenuse * cos(angle) + q[1] = p[1] - scale * hypotenuse * sin(angle) + cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) + p[0] = q[0] + 9 * cos(angle + pi / 4) + p[1] = q[1] + 9 * sin(angle + pi / 4) + cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) + p[0] = q[0] + 9 * cos(angle - pi / 4) + p[1] = q[1] + 9 * sin(angle - pi / 4) + cv.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv.LINE_AA) + +def getOrientation(pts, img): + sz = len(pts) + data_pts = np.empty((sz, 2), dtype=np.float64) + for i in range(data_pts.shape[0]): + data_pts[i, 0] = pts[i, 0, 0] + data_pts[i, 1] = pts[i, 0, 1] + mean = np.empty(0) + (mean, eigenvectors, eigenvalues) = cv.PCACompute2(data_pts, mean) + cntr = (int(mean[0, 0]), int(mean[0, 1])) + cv.circle(img, cntr, 3, (255, 0, 255), 2) + p1 = (cntr[0] + 0.02 * eigenvectors[0, 0] * eigenvalues[0, 0], cntr[1] + 0.02 * eigenvectors[0, 1] * eigenvalues[0, 0]) + p2 = (cntr[0] - 0.02 * eigenvectors[1, 0] * eigenvalues[1, 0], cntr[1] - 0.02 * eigenvectors[1, 1] * eigenvalues[1, 0]) + drawAxis(img, cntr, p1, (0, 255, 0), 1) + drawAxis(img, cntr, p2, (255, 255, 0), 5) + angle = atan2(eigenvectors[0, 1], eigenvectors[0, 0]) + return angle +parser = argparse.ArgumentParser(description='Code for Introduction to Principal Component Analysis (PCA) tutorial. This program demonstrates how to use OpenCV PCA to extract the orientation of an object.') +parser.add_argument('--input', help='Path to input image.', default='pca_test1.jpg') +args = parser.parse_args() +src = cv.imread(cv.samples.findFile(args.input)) +if src is None: + print('Could not open or find the image: ', args.input) + exit(0) +cv.imshow('src', src) +gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) +(_, bw) = cv.threshold(gray, 50, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) +(contours, _) = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE) +for (i, c) in enumerate(contours): + area = cv.contourArea(c) + if area < 100.0 or 100000.0 < area: + continue + cv.drawContours(src, contours, i, (0, 0, 255), 2) + getOrientation(c, src) +cv.imshow('output', src) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py +import cv2 as cv +import numpy as np +labels = np.array([1, -1, -1, -1]) +trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) +svm = cv.ml.SVM_create() +svm.setType(cv.ml.SVM_C_SVC) +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-06)) +svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) +width = 512 +height = 512 +image = np.zeros((height, width, 3), dtype=np.uint8) +green = (0, 255, 0) +blue = (255, 0, 0) +for i in range(image.shape[0]): + for j in range(image.shape[1]): + sampleMat = np.matrix([[j, i]], dtype=np.float32) + response = svm.predict(sampleMat)[1] + if response == 1: + image[i, j] = green + elif response == -1: + image[i, j] = blue +thickness = -1 +cv.circle(image, (501, 10), 5, (0, 0, 0), thickness) +cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) +cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) +cv.circle(image, (10, 501), 5, (255, 255, 255), thickness) +thickness = 2 +sv = svm.getUncompressedSupportVectors() +for i in range(sv.shape[0]): + cv.circle(image, (int(sv[i, 0]), int(sv[i, 1])), 6, (128, 128, 128), thickness) +cv.imwrite('result.png', image) +cv.imshow('SVM Simple Example', image) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py +from __future__ import print_function +import cv2 as cv +import numpy as np +import random as rng +NTRAINING_SAMPLES = 100 +FRAC_LINEAR_SEP = 0.9 +WIDTH = 512 +HEIGHT = 512 +I = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8) +trainData = np.empty((2 * NTRAINING_SAMPLES, 2), dtype=np.float32) +labels = np.empty((2 * NTRAINING_SAMPLES, 1), dtype=np.int32) +rng.seed(100) +nLinearSamples = int(FRAC_LINEAR_SEP * NTRAINING_SAMPLES) +trainClass = trainData[0:nLinearSamples, :] +c = trainClass[:, 0:1] +c[:] = np.random.uniform(0.0, 0.4 * WIDTH, c.shape) +c = trainClass[:, 1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) +trainClass = trainData[2 * NTRAINING_SAMPLES - nLinearSamples:2 * NTRAINING_SAMPLES, :] +c = trainClass[:, 0:1] +c[:] = np.random.uniform(0.6 * WIDTH, WIDTH, c.shape) +c = trainClass[:, 1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) +trainClass = trainData[nLinearSamples:2 * NTRAINING_SAMPLES - nLinearSamples, :] +c = trainClass[:, 0:1] +c[:] = np.random.uniform(0.4 * WIDTH, 0.6 * WIDTH, c.shape) +c = trainClass[:, 1:2] +c[:] = np.random.uniform(0.0, HEIGHT, c.shape) +labels[0:NTRAINING_SAMPLES, :] = 1 +labels[NTRAINING_SAMPLES:2 * NTRAINING_SAMPLES, :] = 2 +print('Starting training process') +svm = cv.ml.SVM_create() +svm.setType(cv.ml.SVM_C_SVC) +svm.setC(0.1) +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, int(10000000.0), 1e-06)) +svm.train(trainData, cv.ml.ROW_SAMPLE, labels) +print('Finished training process') +green = (0, 100, 0) +blue = (100, 0, 0) +for i in range(I.shape[0]): + for j in range(I.shape[1]): + sampleMat = np.matrix([[j, i]], dtype=np.float32) + response = svm.predict(sampleMat)[1] + if response == 1: + I[i, j] = green + elif response == 2: + I[i, j] = blue +thick = -1 +for i in range(NTRAINING_SAMPLES): + px = trainData[i, 0] + py = trainData[i, 1] + cv.circle(I, (int(px), int(py)), 3, (0, 255, 0), thick) +for i in range(NTRAINING_SAMPLES, 2 * NTRAINING_SAMPLES): + px = trainData[i, 0] + py = trainData[i, 1] + cv.circle(I, (int(px), int(py)), 3, (255, 0, 0), thick) +thick = 2 +sv = svm.getUncompressedSupportVectors() +for i in range(sv.shape[0]): + cv.circle(I, (int(sv[i, 0]), int(sv[i, 1])), 6, (128, 128, 128), thick) +cv.imwrite('result.png', I) +cv.imshow('SVM for Non-Linear Training Data', I) +cv.waitKey() + +# File: opencv-master/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py +import cv2 as cv +import numpy as np +SZ = 20 +bin_n = 16 +affine_flags = cv.WARP_INVERSE_MAP | cv.INTER_LINEAR + +def deskew(img): + m = cv.moments(img) + if abs(m['mu02']) < 0.01: + return img.copy() + skew = m['mu11'] / m['mu02'] + M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) + img = cv.warpAffine(img, M, (SZ, SZ), flags=affine_flags) + return img + +def hog(img): + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + (mag, ang) = cv.cartToPolar(gx, gy) + bins = np.int32(bin_n * ang / (2 * np.pi)) + bin_cells = (bins[:10, :10], bins[10:, :10], bins[:10, 10:], bins[10:, 10:]) + mag_cells = (mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]) + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for (b, m) in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) + return hist +img = cv.imread(cv.samples.findFile('digits.png'), 0) +if img is None: + raise Exception('we need the digits.png image from samples/data here !') +cells = [np.hsplit(row, 100) for row in np.vsplit(img, 50)] +train_cells = [i[:50] for i in cells] +test_cells = [i[50:] for i in cells] +deskewed = [list(map(deskew, row)) for row in train_cells] +hogdata = [list(map(hog, row)) for row in deskewed] +trainData = np.float32(hogdata).reshape(-1, 64) +responses = np.repeat(np.arange(10), 250)[:, np.newaxis] +svm = cv.ml.SVM_create() +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setType(cv.ml.SVM_C_SVC) +svm.setC(2.67) +svm.setGamma(5.383) +svm.train(trainData, cv.ml.ROW_SAMPLE, responses) +svm.save('svm_data.dat') +deskewed = [list(map(deskew, row)) for row in test_cells] +hogdata = [list(map(hog, row)) for row in deskewed] +testData = np.float32(hogdata).reshape(-1, bin_n * 4) +result = svm.predict(testData)[1] +mask = result == responses +correct = np.count_nonzero(mask) +print(correct * 100.0 / result.size) + +# File: opencv-master/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py +from __future__ import print_function +import cv2 as cv +import argparse + +def detectAndDisplay(frame): + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + frame_gray = cv.equalizeHist(frame_gray) + faces = face_cascade.detectMultiScale(frame_gray) + for (x, y, w, h) in faces: + center = (x + w // 2, y + h // 2) + frame = cv.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4) + faceROI = frame_gray[y:y + h, x:x + w] + eyes = eyes_cascade.detectMultiScale(faceROI) + for (x2, y2, w2, h2) in eyes: + eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2) + radius = int(round((w2 + h2) * 0.25)) + frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4) + cv.imshow('Capture - Face detection', frame) +parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') +parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml') +parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') +parser.add_argument('--camera', help='Camera divide number.', type=int, default=0) +args = parser.parse_args() +face_cascade_name = args.face_cascade +eyes_cascade_name = args.eyes_cascade +face_cascade = cv.CascadeClassifier() +eyes_cascade = cv.CascadeClassifier() +if not face_cascade.load(cv.samples.findFile(face_cascade_name)): + print('--(!)Error loading face cascade') + exit(0) +if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)): + print('--(!)Error loading eyes cascade') + exit(0) +camera_device = args.camera +cap = cv.VideoCapture(camera_device) +if not cap.isOpened: + print('--(!)Error opening video capture') + exit(0) +while True: + (ret, frame) = cap.read() + if frame is None: + print('--(!) No captured frame -- Break!') + break + detectAndDisplay(frame) + if cv.waitKey(10) == 27: + break + +# File: opencv-master/samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py +from __future__ import print_function +from __future__ import division +import cv2 as cv +import numpy as np +import argparse +import os + +def loadExposureSeq(path): + images = [] + times = [] + with open(os.path.join(path, 'list.txt')) as f: + content = f.readlines() + for line in content: + tokens = line.split() + images.append(cv.imread(os.path.join(path, tokens[0]))) + times.append(1 / float(tokens[1])) + return (images, np.asarray(times, dtype=np.float32)) +parser = argparse.ArgumentParser(description='Code for High Dynamic Range Imaging tutorial.') +parser.add_argument('--input', type=str, help='Path to the directory that contains images and exposure times.') +args = parser.parse_args() +if not args.input: + parser.print_help() + exit(0) +(images, times) = loadExposureSeq(args.input) +calibrate = cv.createCalibrateDebevec() +response = calibrate.process(images, times) +merge_debevec = cv.createMergeDebevec() +hdr = merge_debevec.process(images, times, response) +tonemap = cv.createTonemap(2.2) +ldr = tonemap.process(hdr) +merge_mertens = cv.createMergeMertens() +fusion = merge_mertens.process(images) +cv.imwrite('fusion.png', fusion * 255) +cv.imwrite('ldr.png', ldr * 255) +cv.imwrite('hdr.hdr', hdr) + +# File: opencv-master/samples/python/tutorial_code/video/background_subtraction/bg_sub.py +from __future__ import print_function +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='This program shows how to use background subtraction methods provided by OpenCV. You can process both videos and images.') +parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default='vtest.avi') +parser.add_argument('--algo', type=str, help='Background subtraction method (KNN, MOG2).', default='MOG2') +args = parser.parse_args() +if args.algo == 'MOG2': + backSub = cv.createBackgroundSubtractorMOG2() +else: + backSub = cv.createBackgroundSubtractorKNN() +capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input)) +if not capture.isOpened(): + print('Unable to open: ' + args.input) + exit(0) +while True: + (ret, frame) = capture.read() + if frame is None: + break + fgMask = backSub.apply(frame) + cv.rectangle(frame, (10, 2), (100, 20), (255, 255, 255), -1) + cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) + cv.imshow('Frame', frame) + cv.imshow('FG Mask', fgMask) + keyboard = cv.waitKey(30) + if keyboard == 'q' or keyboard == 27: + break + +# File: opencv-master/samples/python/tutorial_code/video/meanshift/camshift.py +import numpy as np +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='This sample demonstrates the camshift algorithm. The example file can be downloaded from: https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4') +parser.add_argument('image', type=str, help='path to image file') +args = parser.parse_args() +cap = cv.VideoCapture(args.image) +(ret, frame) = cap.read() +(x, y, w, h) = (300, 200, 100, 50) +track_window = (x, y, w, h) +roi = frame[y:y + h, x:x + w] +hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) +mask = cv.inRange(hsv_roi, np.array((0.0, 60.0, 32.0)), np.array((180.0, 255.0, 255.0))) +roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180]) +cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX) +term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1) +while 1: + (ret, frame) = cap.read() + if ret == True: + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) + (ret, track_window) = cv.CamShift(dst, track_window, term_crit) + pts = cv.boxPoints(ret) + pts = np.int0(pts) + img2 = cv.polylines(frame, [pts], True, 255, 2) + cv.imshow('img2', img2) + k = cv.waitKey(30) & 255 + if k == 27: + break + else: + break + +# File: opencv-master/samples/python/tutorial_code/video/meanshift/meanshift.py +import numpy as np +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='This sample demonstrates the meanshift algorithm. The example file can be downloaded from: https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4') +parser.add_argument('image', type=str, help='path to image file') +args = parser.parse_args() +cap = cv.VideoCapture(args.image) +(ret, frame) = cap.read() +(x, y, w, h) = (300, 200, 100, 50) +track_window = (x, y, w, h) +roi = frame[y:y + h, x:x + w] +hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) +mask = cv.inRange(hsv_roi, np.array((0.0, 60.0, 32.0)), np.array((180.0, 255.0, 255.0))) +roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180]) +cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX) +term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1) +while 1: + (ret, frame) = cap.read() + if ret == True: + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) + (ret, track_window) = cv.meanShift(dst, track_window, term_crit) + (x, y, w, h) = track_window + img2 = cv.rectangle(frame, (x, y), (x + w, y + h), 255, 2) + cv.imshow('img2', img2) + k = cv.waitKey(30) & 255 + if k == 27: + break + else: + break + +# File: opencv-master/samples/python/tutorial_code/video/optical_flow/optical_flow.py +import numpy as np +import cv2 as cv +import argparse +parser = argparse.ArgumentParser(description='This sample demonstrates Lucas-Kanade Optical Flow calculation. The example file can be downloaded from: https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4') +parser.add_argument('image', type=str, help='path to image file') +args = parser.parse_args() +cap = cv.VideoCapture(args.image) +feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7) +lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) +color = np.random.randint(0, 255, (100, 3)) +(ret, old_frame) = cap.read() +old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY) +p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params) +mask = np.zeros_like(old_frame) +while 1: + (ret, frame) = cap.read() + if not ret: + print('No frames grabbed!') + break + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + (p1, st, err) = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) + if p1 is not None: + good_new = p1[st == 1] + good_old = p0[st == 1] + for (i, (new, old)) in enumerate(zip(good_new, good_old)): + (a, b) = new.ravel() + (c, d) = old.ravel() + mask = cv.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2) + frame = cv.circle(frame, (int(a), int(b)), 5, color[i].tolist(), -1) + img = cv.add(frame, mask) + cv.imshow('frame', img) + k = cv.waitKey(30) & 255 + if k == 27: + break + old_gray = frame_gray.copy() + p0 = good_new.reshape(-1, 1, 2) +cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py +import numpy as np +import cv2 as cv +cap = cv.VideoCapture(cv.samples.findFile('vtest.avi')) +(ret, frame1) = cap.read() +prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY) +hsv = np.zeros_like(frame1) +hsv[..., 1] = 255 +while 1: + (ret, frame2) = cap.read() + if not ret: + print('No frames grabbed!') + break + next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY) + flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0) + (mag, ang) = cv.cartToPolar(flow[..., 0], flow[..., 1]) + hsv[..., 0] = ang * 180 / np.pi / 2 + hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX) + bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) + cv.imshow('frame2', bgr) + k = cv.waitKey(30) & 255 + if k == 27: + break + elif k == ord('s'): + cv.imwrite('opticalfb.png', frame2) + cv.imwrite('opticalhsv.png', bgr) + prvs = next +cv.destroyAllWindows() + +# File: opencv-master/samples/python/tutorial_code/videoio/video-input-psnr-ssim.py +from __future__ import print_function +import numpy as np +import cv2 as cv +import argparse +import sys + +def getPSNR(I1, I2): + s1 = cv.absdiff(I1, I2) + s1 = np.float32(s1) + s1 = s1 * s1 + sse = s1.sum() + if sse <= 1e-10: + return 0 + else: + shape = I1.shape + mse = 1.0 * sse / (shape[0] * shape[1] * shape[2]) + psnr = 10.0 * np.log10(255 * 255 / mse) + return psnr + +def getMSSISM(i1, i2): + C1 = 6.5025 + C2 = 58.5225 + I1 = np.float32(i1) + I2 = np.float32(i2) + I2_2 = I2 * I2 + I1_2 = I1 * I1 + I1_I2 = I1 * I2 + mu1 = cv.GaussianBlur(I1, (11, 11), 1.5) + mu2 = cv.GaussianBlur(I2, (11, 11), 1.5) + mu1_2 = mu1 * mu1 + mu2_2 = mu2 * mu2 + mu1_mu2 = mu1 * mu2 + sigma1_2 = cv.GaussianBlur(I1_2, (11, 11), 1.5) + sigma1_2 -= mu1_2 + sigma2_2 = cv.GaussianBlur(I2_2, (11, 11), 1.5) + sigma2_2 -= mu2_2 + sigma12 = cv.GaussianBlur(I1_I2, (11, 11), 1.5) + sigma12 -= mu1_mu2 + t1 = 2 * mu1_mu2 + C1 + t2 = 2 * sigma12 + C2 + t3 = t1 * t2 + t1 = mu1_2 + mu2_2 + C1 + t2 = sigma1_2 + sigma2_2 + C2 + t1 = t1 * t2 + ssim_map = cv.divide(t3, t1) + mssim = cv.mean(ssim_map) + return mssim + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-d', '--delay', type=int, default=30, help=' Time delay') + parser.add_argument('-v', '--psnrtriggervalue', type=int, default=30, help='PSNR Trigger Value') + parser.add_argument('-r', '--ref', type=str, default='Megamind.avi', help='Path to reference video') + parser.add_argument('-t', '--undertest', type=str, default='Megamind_bugy.avi', help='Path to the video to be tested') + args = parser.parse_args() + sourceReference = args.ref + sourceCompareWith = args.undertest + delay = args.delay + psnrTriggerValue = args.psnrtriggervalue + framenum = -1 + captRefrnc = cv.VideoCapture(cv.samples.findFileOrKeep(sourceReference)) + captUndTst = cv.VideoCapture(cv.samples.findFileOrKeep(sourceCompareWith)) + if not captRefrnc.isOpened(): + print('Could not open the reference ' + sourceReference) + sys.exit(-1) + if not captUndTst.isOpened(): + print('Could not open case test ' + sourceCompareWith) + sys.exit(-1) + refS = (int(captRefrnc.get(cv.CAP_PROP_FRAME_WIDTH)), int(captRefrnc.get(cv.CAP_PROP_FRAME_HEIGHT))) + uTSi = (int(captUndTst.get(cv.CAP_PROP_FRAME_WIDTH)), int(captUndTst.get(cv.CAP_PROP_FRAME_HEIGHT))) + if refS != uTSi: + print('Inputs have different size!!! Closing.') + sys.exit(-1) + WIN_UT = 'Under Test' + WIN_RF = 'Reference' + cv.namedWindow(WIN_RF, cv.WINDOW_AUTOSIZE) + cv.namedWindow(WIN_UT, cv.WINDOW_AUTOSIZE) + cv.moveWindow(WIN_RF, 400, 0) + cv.moveWindow(WIN_UT, refS[0], 0) + print('Reference frame resolution: Width={} Height={} of nr#: {}'.format(refS[0], refS[1], captRefrnc.get(cv.CAP_PROP_FRAME_COUNT))) + print('PSNR trigger value {}'.format(psnrTriggerValue)) + while True: + (_, frameReference) = captRefrnc.read() + (_, frameUnderTest) = captUndTst.read() + if frameReference is None or frameUnderTest is None: + print(' < < < Game over! > > > ') + break + framenum += 1 + psnrv = getPSNR(frameReference, frameUnderTest) + print('Frame: {}# {}dB'.format(framenum, round(psnrv, 3)), end=' ') + if psnrv < psnrTriggerValue and psnrv: + mssimv = getMSSISM(frameReference, frameUnderTest) + print('MSSISM: R {}% G {}% B {}%'.format(round(mssimv[2] * 100, 2), round(mssimv[1] * 100, 2), round(mssimv[0] * 100, 2)), end=' ') + print() + cv.imshow(WIN_RF, frameReference) + cv.imshow(WIN_UT, frameUnderTest) + k = cv.waitKey(delay) + if k == 27: + break + sys.exit(0) +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/video.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +import re +from numpy import pi, sin, cos +from tst_scene_render import TestSceneRender +import common + +class VideoSynthBase(object): + + def __init__(self, size=None, noise=0.0, bg=None, **params): + self.bg = None + self.frame_size = (640, 480) + if bg is not None: + self.bg = cv.imread(cv.samples.findFile(bg)) + (h, w) = self.bg.shape[:2] + self.frame_size = (w, h) + if size is not None: + (w, h) = map(int, size.split('x')) + self.frame_size = (w, h) + self.bg = cv.resize(self.bg, self.frame_size) + self.noise = float(noise) + + def render(self, dst): + pass + + def read(self, dst=None): + (w, h) = self.frame_size + if self.bg is None: + buf = np.zeros((h, w, 3), np.uint8) + else: + buf = self.bg.copy() + self.render(buf) + if self.noise > 0.0: + noise = np.zeros((h, w, 3), np.int8) + cv.randn(noise, np.zeros(3), np.ones(3) * 255 * self.noise) + buf = cv.add(buf, noise, dtype=cv.CV_8UC3) + return (True, buf) + + def isOpened(self): + return True + +class Book(VideoSynthBase): + + def __init__(self, **kw): + super(Book, self).__init__(**kw) + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) + self.render = TestSceneRender(backGr, fgr, speed=1) + + def read(self, dst=None): + noise = np.zeros(self.render.sceneBg.shape, np.int8) + cv.randn(noise, np.zeros(3), np.ones(3) * 255 * self.noise) + return (True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)) + +class Cube(VideoSynthBase): + + def __init__(self, **kw): + super(Cube, self).__init__(**kw) + self.render = TestSceneRender(cv.imread(cv.samples.findFile('pca_test1.jpg')), deformation=True, speed=1) + + def read(self, dst=None): + noise = np.zeros(self.render.sceneBg.shape, np.int8) + cv.randn(noise, np.zeros(3), np.ones(3) * 255 * self.noise) + return (True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)) + +class Chess(VideoSynthBase): + + def __init__(self, **kw): + super(Chess, self).__init__(**kw) + (w, h) = self.frame_size + self.grid_size = (sx, sy) = (10, 7) + white_quads = [] + black_quads = [] + for (i, j) in np.ndindex(sy, sx): + q = [[j, i, 0], [j + 1, i, 0], [j + 1, i + 1, 0], [j, i + 1, 0]] + [white_quads, black_quads][(i + j) % 2].append(q) + self.white_quads = np.float32(white_quads) + self.black_quads = np.float32(black_quads) + fx = 0.9 + self.K = np.float64([[fx * w, 0, 0.5 * (w - 1)], [0, fx * w, 0.5 * (h - 1)], [0.0, 0.0, 1.0]]) + self.dist_coef = np.float64([-0.2, 0.1, 0, 0]) + self.t = 0 + + def draw_quads(self, img, quads, color=(0, 255, 0)): + img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef)[0] + img_quads.shape = quads.shape[:2] + (2,) + for q in img_quads: + cv.fillConvexPoly(img, np.int32(q * 4), color, cv.LINE_AA, shift=2) + + def render(self, dst): + t = self.t + self.t += 1.0 / 30.0 + (sx, sy) = self.grid_size + center = np.array([0.5 * sx, 0.5 * sy, 0.0]) + phi = pi / 3 + sin(t * 3) * pi / 8 + (c, s) = (cos(phi), sin(phi)) + ofs = np.array([sin(1.2 * t), cos(1.8 * t), 0]) * sx * 0.2 + eye_pos = center + np.array([cos(t) * c, sin(t) * c, s]) * 15.0 + ofs + target_pos = center + ofs + (R, self.tvec) = common.lookat(eye_pos, target_pos) + self.rvec = common.mtx2rvec(R) + self.draw_quads(dst, self.white_quads, (245, 245, 245)) + self.draw_quads(dst, self.black_quads, (10, 10, 10)) +classes = dict(chess=Chess, book=Book, cube=Cube) +presets = dict(empty='synth:', lena='synth:bg=lena.jpg:noise=0.1', chess='synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480', book='synth:class=book:bg=graf1.png:noise=0.1:size=640x480', cube='synth:class=cube:bg=pca_test1.jpg:noise=0.0:size=640x480') + +def create_capture(source=0, fallback=presets['chess']): + source = str(source).strip() + source = re.sub('(^|=)([a-zA-Z]):([/\\\\a-zA-Z0-9])', '\\1?disk\\2?\\3', source) + chunks = source.split(':') + chunks = [re.sub('\\?disk([a-zA-Z])\\?', '\\1:', s) for s in chunks] + source = chunks[0] + try: + source = int(source) + except ValueError: + pass + params = dict((s.split('=') for s in chunks[1:])) + cap = None + if source == 'synth': + Class = classes.get(params.get('class', None), VideoSynthBase) + try: + cap = Class(**params) + except: + pass + else: + cap = cv.VideoCapture(source) + if 'size' in params: + (w, h) = map(int, params['size'].split('x')) + cap.set(cv.CAP_PROP_FRAME_WIDTH, w) + cap.set(cv.CAP_PROP_FRAME_HEIGHT, h) + if cap is None or not cap.isOpened(): + print('Warning: unable to open video source: ', source) + if fallback is not None: + return create_capture(fallback, None) + return cap +if __name__ == '__main__': + import sys + import getopt + print(__doc__) + (args, sources) = getopt.getopt(sys.argv[1:], '', 'shotdir=') + args = dict(args) + shotdir = args.get('--shotdir', '.') + if len(sources) == 0: + sources = [0] + caps = list(map(create_capture, sources)) + shot_idx = 0 + while True: + imgs = [] + for (i, cap) in enumerate(caps): + (ret, img) = cap.read() + imgs.append(img) + cv.imshow('capture %d' % i, img) + ch = cv.waitKey(1) + if ch == 27: + break + if ch == ord(' '): + for (i, img) in enumerate(imgs): + fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx) + cv.imwrite(fn, img) + print(fn, 'saved') + shot_idx += 1 + cv.destroyAllWindows() + +# File: opencv-master/samples/python/video_threaded.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from multiprocessing.pool import ThreadPool +from collections import deque +from common import clock, draw_str, StatValue +import video + +class DummyTask: + + def __init__(self, data): + self.data = data + + def ready(self): + return True + + def get(self): + return self.data + +def main(): + import sys + try: + fn = sys.argv[1] + except: + fn = 0 + cap = video.create_capture(fn) + + def process_frame(frame, t0): + frame = cv.medianBlur(frame, 19) + frame = cv.medianBlur(frame, 19) + return (frame, t0) + threadn = cv.getNumberOfCPUs() + pool = ThreadPool(processes=threadn) + pending = deque() + threaded_mode = True + latency = StatValue() + frame_interval = StatValue() + last_frame_time = clock() + while True: + while len(pending) > 0 and pending[0].ready(): + (res, t0) = pending.popleft().get() + latency.update(clock() - t0) + draw_str(res, (20, 20), 'threaded : ' + str(threaded_mode)) + draw_str(res, (20, 40), 'latency : %.1f ms' % (latency.value * 1000)) + draw_str(res, (20, 60), 'frame interval : %.1f ms' % (frame_interval.value * 1000)) + cv.imshow('threaded video', res) + if len(pending) < threadn: + (_ret, frame) = cap.read() + t = clock() + frame_interval.update(t - last_frame_time) + last_frame_time = t + if threaded_mode: + task = pool.apply_async(process_frame, (frame.copy(), t)) + else: + task = DummyTask(process_frame(frame, t)) + pending.append(task) + ch = cv.waitKey(1) + if ch == ord(' '): + threaded_mode = not threaded_mode + if ch == 27: + break + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/video_v4l2.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv + +def main(): + + def decode_fourcc(v): + v = int(v) + return ''.join([chr(v >> 8 * i & 255) for i in range(4)]) + font = cv.FONT_HERSHEY_SIMPLEX + color = (0, 255, 0) + cap = cv.VideoCapture(0) + cap.set(cv.CAP_PROP_AUTOFOCUS, 0) + cv.namedWindow('Video') + convert_rgb = True + fps = int(cap.get(cv.CAP_PROP_FPS)) + focus = int(min(cap.get(cv.CAP_PROP_FOCUS) * 100, 2 ** 31 - 1)) + cv.createTrackbar('FPS', 'Video', fps, 30, lambda v: cap.set(cv.CAP_PROP_FPS, v)) + cv.createTrackbar('Focus', 'Video', focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100)) + while True: + (_status, img) = cap.read() + fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC)) + fps = cap.get(cv.CAP_PROP_FPS) + if not bool(cap.get(cv.CAP_PROP_CONVERT_RGB)): + if fourcc == 'MJPG': + img = cv.imdecode(img, cv.IMREAD_GRAYSCALE) + elif fourcc == 'YUYV': + img = cv.cvtColor(img, cv.COLOR_YUV2GRAY_YUYV) + else: + print('unsupported format') + break + cv.putText(img, 'Mode: {}'.format(fourcc), (15, 40), font, 1.0, color) + cv.putText(img, 'FPS: {}'.format(fps), (15, 80), font, 1.0, color) + cv.imshow('Video', img) + k = cv.waitKey(1) + if k == 27: + break + elif k == ord('g'): + convert_rgb = not convert_rgb + cap.set(cv.CAP_PROP_CONVERT_RGB, 1 if convert_rgb else 0) + print('Done') +if __name__ == '__main__': + print(__doc__) + main() + cv.destroyAllWindows() + +# File: opencv-master/samples/python/videocapture_obsensor.py +import numpy as np +import sys +import cv2 as cv + +def main(): + orbbec_cap = cv.VideoCapture(0, cv.CAP_OBSENSOR) + if orbbec_cap.isOpened() == False: + sys.exit('Fail to open camera.') + while True: + if orbbec_cap.grab(): + (ret_bgr, bgr_image) = orbbec_cap.retrieve(None, cv.CAP_OBSENSOR_BGR_IMAGE) + if ret_bgr: + cv.imshow('BGR', bgr_image) + (ret_depth, depth_map) = orbbec_cap.retrieve(None, cv.CAP_OBSENSOR_DEPTH_MAP) + if ret_depth: + color_depth_map = cv.normalize(depth_map, None, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1) + color_depth_map = cv.applyColorMap(color_depth_map, cv.COLORMAP_JET) + cv.imshow('DEPTH', color_depth_map) + else: + print('Fail to grab data from the camera.') + if cv.pollKey() >= 0: + break + orbbec_cap.release() +if __name__ == '__main__': + main() + +# File: opencv-master/samples/python/watershed.py +"""""" +from __future__ import print_function +import numpy as np +import cv2 as cv +from common import Sketcher + +class App: + + def __init__(self, fn): + self.img = cv.imread(fn) + if self.img is None: + raise Exception('Failed to load image file: %s' % fn) + (h, w) = self.img.shape[:2] + self.markers = np.zeros((h, w), np.int32) + self.markers_vis = self.img.copy() + self.cur_marker = 1 + self.colors = np.int32(list(np.ndindex(2, 2, 2))) * 255 + self.auto_update = True + self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors) + + def get_colors(self): + return (list(map(int, self.colors[self.cur_marker])), self.cur_marker) + + def watershed(self): + m = self.markers.copy() + cv.watershed(self.img, m) + overlay = self.colors[np.maximum(m, 0)] + vis = cv.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv.CV_8UC3) + cv.imshow('watershed', vis) + + def run(self): + while cv.getWindowProperty('img', 0) != -1 or cv.getWindowProperty('watershed', 0) != -1: + ch = cv.waitKey(50) + if ch == 27: + break + if ch >= ord('1') and ch <= ord('7'): + self.cur_marker = ch - ord('0') + print('marker: ', self.cur_marker) + if ch == ord(' ') or (self.sketch.dirty and self.auto_update): + self.watershed() + self.sketch.dirty = False + if ch in [ord('a'), ord('A')]: + self.auto_update = not self.auto_update + print('auto_update if', ['off', 'on'][self.auto_update]) + if ch in [ord('r'), ord('R')]: + self.markers[:] = 0 + self.markers_vis[:] = self.img + self.sketch.show() + cv.destroyAllWindows() +if __name__ == '__main__': + print(__doc__) + import sys + try: + fn = sys.argv[1] + except: + fn = 'fruits.jpg' + App(cv.samples.findFile(fn)).run() +